{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\"\"\n return html\n\n# Function to get the feature words from text\ndef extract_features(document):\n document_words = set(document)\n features = {}\n for word in document_words:\n features[word] = (word in document_words)\n return features\n\n# Function to create dictionary of text\ndef make_full_dict(words):\n return dict([(word, True) for word in words])\n\n# Function to score words in text and make distributions\ndef create_word_scores():\n posWords = []\n negWords = []\n with open(POS_DATA_FILE, 'r') as posSentences:\n for i in posSentences:\n posWord = re.findall(r\"[\\w']+|[.,!?;]\", i.rstrip())\n posWords.append(posWord)\n with open(NEG_DATA_FILE, 'r') as negSentences:\n for i in negSentences:\n negWord = re.findall(r\"[\\w']+|[.,!?;]\", i.rstrip())\n negWords.append(negWord)\n posWords = list(itertools.chain(*posWords))\n negWords = list(itertools.chain(*negWords))\n\n # Build frequency distibution of all words and then frequency distributions of words within positive and negative labels\n word_fd = FreqDist()\n cond_word_fd = ConditionalFreqDist()\n for word in posWords:\n word_fd.inc(word.lower())\n cond_word_fd['pos'].inc(word.lower())\n for word in negWords:\n word_fd.inc(word.lower())\n cond_word_fd['neg'].inc(word.lower())\n\n # Create counts of positive, negative, and total words\n pos_word_count = cond_word_fd['pos'].N()\n neg_word_count = cond_word_fd['neg'].N()\n total_word_count = pos_word_count + neg_word_count\n\n # Builds dictionary of word scores based on chi-squared test\n word_scores = {}\n for word, freq in word_fd.iteritems():\n pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count)\n neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count)\n word_scores[word] = pos_score + neg_score\n\n return word_scores\n\nif len(sys.argv) < 2:\n print \"No file specificed\\n\"\nelse:\n input_filename = sys.argv[1]\n\nprint \"Initialized \"+project_slug+\" with min of \"+str(min_comments)+\" - processing file \"+input_filename\n\nprint \"Loading training data...\"\nDATA_DIRECTORY = os.path.join('training-data', 'twitter_data')\nPOS_DATA_FILE = os.path.join(DATA_DIRECTORY, 'positive_tweets.txt')\nNEG_DATA_FILE = os.path.join(DATA_DIRECTORY, 'negative_tweets.txt')\n\n# DATA_DIRECTORY = os.path.join('training-data', 'combined')\n# POS_DATA_FILE = os.path.join(DATA_DIRECTORY, 'positive.txt')\n# NEG_DATA_FILE = os.path.join(DATA_DIRECTORY, 'negative.txt')\n\nprint \"Training NLTK Bayesian classifier...\"\n\nposFeatures = []\nnegFeatures = []\n# Process text into words with pos/neg connotation\nwith open(POS_DATA_FILE, 'r') as posSentences:\n for i in posSentences:\n posWords = re.findall(r\"[\\w']+|[.,!?;]\", i.rstrip())\n posWords = [make_full_dict(posWords), 'pos']\n posFeatures.append(posWords)\nwith open(NEG_DATA_FILE, 'r') as negSentences:\n for i in negSentences:\n negWords = re.findall(r\"[\\w']+|[.,!?;]\", i.rstrip())\n negWords = [make_full_dict(negWords), 'neg']\n negFeatures.append(negWords)\n\n\n# Selects 5/6 of the features to be used for training and 1/6 to be used for testing\nposCutoff = int(math.floor(len(posFeatures)*5/6))\nnegCutoff = int(math.floor(len(negFeatures)*5/6))\ntrainFeatures = posFeatures[:posCutoff] + negFeatures[:negCutoff]\ntestFeatures = posFeatures[posCutoff:] + negFeatures[negCutoff:]\n\n# Train a Naive Bayes Classifier\nclassifier = NaiveBayesClassifier.train(trainFeatures)\n\n# Create reference and test set\nreferenceSets = collections.defaultdict(set)\ntestSets = collections.defaultdict(set)\n\n# Puts correctly labeled sentences in referenceSets and the predictively labeled version in testSets\nfor i, (features, label) in enumerate(testFeatures):\n referenceSets[label].add(i)\n predicted = classifier.classify(features)\n testSets[predicted].add(i)\n\nprint \"Esimated accuracy: \", nltk.classify.util.accuracy(classifier, testFeatures)\n\n\nprint \"Talk data loaded from file\"\nprint \"Performing sentiment analysis...\"\n\ndf = pd.read_csv(input_filename, skipinitialspace=True, sep='\\t')\ng = df.groupby('focus_id')\nflist = g['body'].apply(list)\n\nfocus_list = []\nfor k,v in flist.iteritems():\n if (isinstance(v, list)):\n if (len(v)>min_comments):\n string = ' '.join([str(i) for i in v])\n # print string\n ob = (classifier.classify(extract_features(string.split())), classifier.prob_classify(extract_features(string.split())).prob('pos'), classifier.prob_classify(extract_features(string.split())).prob('neg'), k, len(v), extract_features(string.split()))\n focus_list.insert(0, ob)\n\n# Create lists\nsorted_list = sorted(focus_list, key=lambda x: (-x[1], x[4]))\nsorted_list_rev = list(sorted_list)\nsorted_list_rev.reverse()\n\n# Filter lists\npos_list = filter(lambda x: x[0] == 'pos', sorted_list_rev)\nneg_list = filter(lambda x: x[0] == 'neg', sorted_list)\nn = int(len(sorted_list)*1.00)\nprint \"%i positive and %i negative items\" % (len(pos_list), len(neg_list))\n\n# Output files as CSV and HTML\nprint \"Writing CSV...\"\nfilename = os.path.join('output', project_slug, project_slug+'_'+str(min_comments)+'.csv')\nwith open(filename, \"wb\") as f:\n writer = csv.writer(f)\n writer.writerows(sorted_list)\n\nprint \"Writing HTML files...\"\n\nhtml = focus_list_to_html_table(pos_list)\nfilename = os.path.join('output', project_slug, project_slug+'_'+str(min_comments)+'_positive.html')\nwith open(filename, \"w\") as text_file:\n text_file.write(html)\ncall([\"open\", filename])\n\nhtml = focus_list_to_html_table(neg_list)\nfilename = os.path.join('output', project_slug, project_slug+'_'+str(min_comments)+'_negative.html')\nwith open(filename, \"w\") as text_file:\n text_file.write(html)\ncall([\"open\", filename])\n\nprint \"Done!\"\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382621,"cells":{"repo_name":{"kind":"string","value":"zorroblue/scikit-learn"},"path":{"kind":"string","value":"sklearn/kernel_approximation.py"},"copies":{"kind":"string","value":"29"},"size":{"kind":"string","value":"19022"},"content":{"kind":"string","value":"\"\"\"\nThe :mod:`sklearn.kernel_approximation` module implements several\napproximate kernel feature maps base on Fourier transforms.\n\"\"\"\n\n# Author: Andreas Mueller \n#\n# License: BSD 3 clause\n\nimport warnings\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.linalg import svd\n\nfrom .base import BaseEstimator\nfrom .base import TransformerMixin\nfrom .utils import check_array, check_random_state, as_float_array\nfrom .utils.extmath import safe_sparse_dot\nfrom .utils.validation import check_is_fitted\nfrom .metrics.pairwise import pairwise_kernels, KERNEL_PARAMS\n\n\nclass RBFSampler(BaseEstimator, TransformerMixin):\n \"\"\"Approximates feature map of an RBF kernel by Monte Carlo approximation\n of its Fourier transform.\n\n It implements a variant of Random Kitchen Sinks.[1]\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n gamma : float\n Parameter of RBF kernel: exp(-gamma * x^2)\n\n n_components : int\n Number of Monte Carlo samples per original feature.\n Equals the dimensionality of the computed feature space.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Notes\n -----\n See \"Random Features for Large-Scale Kernel Machines\" by A. Rahimi and\n Benjamin Recht.\n\n [1] \"Weighted Sums of Random Kitchen Sinks: Replacing\n minimization with randomization in learning\" by A. Rahimi and\n Benjamin Recht.\n (http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)\n \"\"\"\n\n def __init__(self, gamma=1., n_components=100, random_state=None):\n self.gamma = gamma\n self.n_components = n_components\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X.\n\n Samples random projection according to n_features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the transformer.\n \"\"\"\n\n X = check_array(X, accept_sparse='csr')\n random_state = check_random_state(self.random_state)\n n_features = X.shape[1]\n\n self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(\n size=(n_features, self.n_components)))\n\n self.random_offset_ = random_state.uniform(0, 2 * np.pi,\n size=self.n_components)\n return self\n\n def transform(self, X):\n \"\"\"Apply the approximate feature map to X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n \"\"\"\n check_is_fitted(self, 'random_weights_')\n\n X = check_array(X, accept_sparse='csr')\n projection = safe_sparse_dot(X, self.random_weights_)\n projection += self.random_offset_\n np.cos(projection, projection)\n projection *= np.sqrt(2.) / np.sqrt(self.n_components)\n return projection\n\n\nclass SkewedChi2Sampler(BaseEstimator, TransformerMixin):\n \"\"\"Approximates feature map of the \"skewed chi-squared\" kernel by Monte\n Carlo approximation of its Fourier transform.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n skewedness : float\n \"skewedness\" parameter of the kernel. Needs to be cross-validated.\n\n n_components : int\n number of Monte Carlo samples per original feature.\n Equals the dimensionality of the computed feature space.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n References\n ----------\n See \"Random Fourier Approximations for Skewed Multiplicative Histogram\n Kernels\" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.\n\n See also\n --------\n AdditiveChi2Sampler : A different approach for approximating an additive\n variant of the chi squared kernel.\n\n sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.\n \"\"\"\n\n def __init__(self, skewedness=1., n_components=100, random_state=None):\n self.skewedness = skewedness\n self.n_components = n_components\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit the model with X.\n\n Samples random projection according to n_features.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Returns the transformer.\n \"\"\"\n\n X = check_array(X)\n random_state = check_random_state(self.random_state)\n n_features = X.shape[1]\n uniform = random_state.uniform(size=(n_features, self.n_components))\n # transform by inverse CDF of sech\n self.random_weights_ = (1. / np.pi\n * np.log(np.tan(np.pi / 2. * uniform)))\n self.random_offset_ = random_state.uniform(0, 2 * np.pi,\n size=self.n_components)\n return self\n\n def transform(self, X):\n \"\"\"Apply the approximate feature map to X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n New data, where n_samples in the number of samples\n and n_features is the number of features. All values of X must be\n strictly greater than \"-skewedness\".\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n \"\"\"\n check_is_fitted(self, 'random_weights_')\n\n X = as_float_array(X, copy=True)\n X = check_array(X, copy=False)\n if (X <= -self.skewedness).any():\n raise ValueError(\"X may not contain entries smaller than\"\n \" -skewedness.\")\n\n X += self.skewedness\n np.log(X, X)\n projection = safe_sparse_dot(X, self.random_weights_)\n projection += self.random_offset_\n np.cos(projection, projection)\n projection *= np.sqrt(2.) / np.sqrt(self.n_components)\n return projection\n\n\nclass AdditiveChi2Sampler(BaseEstimator, TransformerMixin):\n \"\"\"Approximate feature map for additive chi2 kernel.\n\n Uses sampling the fourier transform of the kernel characteristic\n at regular intervals.\n\n Since the kernel that is to be approximated is additive, the components of\n the input vectors can be treated separately. Each entry in the original\n space is transformed into 2*sample_steps+1 features, where sample_steps is\n a parameter of the method. Typical values of sample_steps include 1, 2 and\n 3.\n\n Optimal choices for the sampling interval for certain data ranges can be\n computed (see the reference). The default values should be reasonable.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n sample_steps : int, optional\n Gives the number of (complex) sampling points.\n sample_interval : float, optional\n Sampling interval. Must be specified when sample_steps not in {1,2,3}.\n\n Notes\n -----\n This estimator approximates a slightly different version of the additive\n chi squared kernel then ``metric.additive_chi2`` computes.\n\n See also\n --------\n SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of\n the chi squared kernel.\n\n sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.\n\n sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi\n squared kernel.\n\n References\n ----------\n See `\"Efficient additive kernels via explicit feature maps\"\n `_\n A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,\n 2011\n \"\"\"\n\n def __init__(self, sample_steps=2, sample_interval=None):\n self.sample_steps = sample_steps\n self.sample_interval = sample_interval\n\n def fit(self, X, y=None):\n \"\"\"Set parameters.\"\"\"\n X = check_array(X, accept_sparse='csr')\n if self.sample_interval is None:\n # See reference, figure 2 c)\n if self.sample_steps == 1:\n self.sample_interval_ = 0.8\n elif self.sample_steps == 2:\n self.sample_interval_ = 0.5\n elif self.sample_steps == 3:\n self.sample_interval_ = 0.4\n else:\n raise ValueError(\"If sample_steps is not in [1, 2, 3],\"\n \" you need to provide sample_interval\")\n else:\n self.sample_interval_ = self.sample_interval\n return self\n\n def transform(self, X):\n \"\"\"Apply approximate feature map to X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = (n_samples, n_features)\n\n Returns\n -------\n X_new : {array, sparse matrix}, \\\n shape = (n_samples, n_features * (2*sample_steps + 1))\n Whether the return value is an array of sparse matrix depends on\n the type of the input X.\n \"\"\"\n msg = (\"%(name)s is not fitted. Call fit to set the parameters before\"\n \" calling transform\")\n check_is_fitted(self, \"sample_interval_\", msg=msg)\n\n X = check_array(X, accept_sparse='csr')\n sparse = sp.issparse(X)\n\n # check if X has negative values. Doesn't play well with np.log.\n if ((X.data if sparse else X) < 0).any():\n raise ValueError(\"Entries of X must be non-negative.\")\n # zeroth component\n # 1/cosh = sech\n # cosh(0) = 1.0\n\n transf = self._transform_sparse if sparse else self._transform_dense\n return transf(X)\n\n def _transform_dense(self, X):\n non_zero = (X != 0.0)\n X_nz = X[non_zero]\n\n X_step = np.zeros_like(X)\n X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)\n\n X_new = [X_step]\n\n log_step_nz = self.sample_interval_ * np.log(X_nz)\n step_nz = 2 * X_nz * self.sample_interval_\n\n for j in range(1, self.sample_steps):\n factor_nz = np.sqrt(step_nz /\n np.cosh(np.pi * j * self.sample_interval_))\n\n X_step = np.zeros_like(X)\n X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)\n X_new.append(X_step)\n\n X_step = np.zeros_like(X)\n X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)\n X_new.append(X_step)\n\n return np.hstack(X_new)\n\n def _transform_sparse(self, X):\n indices = X.indices.copy()\n indptr = X.indptr.copy()\n\n data_step = np.sqrt(X.data * self.sample_interval_)\n X_step = sp.csr_matrix((data_step, indices, indptr),\n shape=X.shape, dtype=X.dtype, copy=False)\n X_new = [X_step]\n\n log_step_nz = self.sample_interval_ * np.log(X.data)\n step_nz = 2 * X.data * self.sample_interval_\n\n for j in range(1, self.sample_steps):\n factor_nz = np.sqrt(step_nz /\n np.cosh(np.pi * j * self.sample_interval_))\n\n data_step = factor_nz * np.cos(j * log_step_nz)\n X_step = sp.csr_matrix((data_step, indices, indptr),\n shape=X.shape, dtype=X.dtype, copy=False)\n X_new.append(X_step)\n\n data_step = factor_nz * np.sin(j * log_step_nz)\n X_step = sp.csr_matrix((data_step, indices, indptr),\n shape=X.shape, dtype=X.dtype, copy=False)\n X_new.append(X_step)\n\n return sp.hstack(X_new)\n\n\nclass Nystroem(BaseEstimator, TransformerMixin):\n \"\"\"Approximate a kernel map using a subset of the training data.\n\n Constructs an approximate feature map for an arbitrary kernel\n using a subset of the data as basis.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n kernel : string or callable, default=\"rbf\"\n Kernel map to be approximated. A callable should accept two arguments\n and the keyword arguments passed to this object as kernel_params, and\n should return a floating point number.\n\n n_components : int\n Number of features to construct.\n How many data points will be used to construct the mapping.\n\n gamma : float, default=None\n Gamma parameter for the RBF, laplacian, polynomial, exponential chi2\n and sigmoid kernels. Interpretation of the default value is left to\n the kernel; see the documentation for sklearn.metrics.pairwise.\n Ignored by other kernels.\n\n degree : float, default=None\n Degree of the polynomial kernel. Ignored by other kernels.\n\n coef0 : float, default=None\n Zero coefficient for polynomial and sigmoid kernels.\n Ignored by other kernels.\n\n kernel_params : mapping of string to any, optional\n Additional parameters (keyword arguments) for kernel function passed\n as callable object.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Attributes\n ----------\n components_ : array, shape (n_components, n_features)\n Subset of training points used to construct the feature map.\n\n component_indices_ : array, shape (n_components)\n Indices of ``components_`` in the training set.\n\n normalization_ : array, shape (n_components, n_components)\n Normalization matrix needed for embedding.\n Square root of the kernel matrix on ``components_``.\n\n\n References\n ----------\n * Williams, C.K.I. and Seeger, M.\n \"Using the Nystroem method to speed up kernel machines\",\n Advances in neural information processing systems 2001\n\n * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou\n \"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical\n Comparison\",\n Advances in Neural Information Processing Systems 2012\n\n\n See also\n --------\n RBFSampler : An approximation to the RBF kernel using random Fourier\n features.\n\n sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.\n \"\"\"\n def __init__(self, kernel=\"rbf\", gamma=None, coef0=None, degree=None,\n kernel_params=None, n_components=100, random_state=None):\n self.kernel = kernel\n self.gamma = gamma\n self.coef0 = coef0\n self.degree = degree\n self.kernel_params = kernel_params\n self.n_components = n_components\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Fit estimator to data.\n\n Samples a subset of training points, computes kernel\n on these and computes normalization matrix.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_feature)\n Training data.\n \"\"\"\n X = check_array(X, accept_sparse='csr')\n rnd = check_random_state(self.random_state)\n n_samples = X.shape[0]\n\n # get basis vectors\n if self.n_components > n_samples:\n # XXX should we just bail?\n n_components = n_samples\n warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n \"n_components was set to n_samples, which results\"\n \" in inefficient evaluation of the full kernel.\")\n\n else:\n n_components = self.n_components\n n_components = min(n_samples, n_components)\n inds = rnd.permutation(n_samples)\n basis_inds = inds[:n_components]\n basis = X[basis_inds]\n\n basis_kernel = pairwise_kernels(basis, metric=self.kernel,\n filter_params=True,\n **self._get_kernel_params())\n\n # sqrt of kernel matrix on basis vectors\n U, S, V = svd(basis_kernel)\n S = np.maximum(S, 1e-12)\n self.normalization_ = np.dot(U / np.sqrt(S), V)\n self.components_ = basis\n self.component_indices_ = inds\n return self\n\n def transform(self, X):\n \"\"\"Apply feature map to X.\n\n Computes an approximate feature map using the kernel\n between some training points and X.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_features)\n Data to transform.\n\n Returns\n -------\n X_transformed : array, shape=(n_samples, n_components)\n Transformed data.\n \"\"\"\n check_is_fitted(self, 'components_')\n X = check_array(X, accept_sparse='csr')\n\n kernel_params = self._get_kernel_params()\n embedded = pairwise_kernels(X, self.components_,\n metric=self.kernel,\n filter_params=True,\n **kernel_params)\n return np.dot(embedded, self.normalization_.T)\n\n def _get_kernel_params(self):\n params = self.kernel_params\n if params is None:\n params = {}\n if not callable(self.kernel):\n for param in (KERNEL_PARAMS[self.kernel]):\n if getattr(self, param) is not None:\n params[param] = getattr(self, param)\n else:\n if (self.gamma is not None or\n self.coef0 is not None or\n self.degree is not None):\n warnings.warn(\n \"Passing gamma, coef0 or degree to Nystroem when using a\"\n \" callable kernel is deprecated in version 0.19 and will\"\n \" raise an error in 0.21, as they are ignored. Use \"\n \"kernel_params instead.\", DeprecationWarning)\n\n return params\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382622,"cells":{"repo_name":{"kind":"string","value":"NelisVerhoef/scikit-learn"},"path":{"kind":"string","value":"examples/linear_model/plot_bayesian_ridge.py"},"copies":{"kind":"string","value":"248"},"size":{"kind":"string","value":"2588"},"content":{"kind":"string","value":"\"\"\"\n=========================\nBayesian Ridge Regression\n=========================\n\nComputes a Bayesian Ridge Regression on a synthetic dataset.\n\nSee :ref:`bayesian_ridge_regression` for more information on the regressor.\n\nCompared to the OLS (ordinary least squares) estimator, the coefficient\nweights are slightly shifted toward zeros, which stabilises them.\n\nAs the prior on the weights is a Gaussian prior, the histogram of the\nestimated weights is Gaussian.\n\nThe estimation of the model is done by iteratively maximizing the\nmarginal log-likelihood of the observations.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nfrom sklearn.linear_model import BayesianRidge, LinearRegression\n\n###############################################################################\n# Generating simulated data with Gaussian weigthts\nnp.random.seed(0)\nn_samples, n_features = 100, 100\nX = np.random.randn(n_samples, n_features) # Create Gaussian data\n# Create weigts with a precision lambda_ of 4.\nlambda_ = 4.\nw = np.zeros(n_features)\n# Only keep 10 weights of interest\nrelevant_features = np.random.randint(0, n_features, 10)\nfor i in relevant_features:\n w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))\n# Create noise with a precision alpha of 50.\nalpha_ = 50.\nnoise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)\n# Create the target\ny = np.dot(X, w) + noise\n\n###############################################################################\n# Fit the Bayesian Ridge Regression and an OLS for comparison\nclf = BayesianRidge(compute_score=True)\nclf.fit(X, y)\n\nols = LinearRegression()\nols.fit(X, y)\n\n###############################################################################\n# Plot true weights, estimated weights and histogram of the weights\nplt.figure(figsize=(6, 5))\nplt.title(\"Weights of the model\")\nplt.plot(clf.coef_, 'b-', label=\"Bayesian Ridge estimate\")\nplt.plot(w, 'g-', label=\"Ground truth\")\nplt.plot(ols.coef_, 'r--', label=\"OLS estimate\")\nplt.xlabel(\"Features\")\nplt.ylabel(\"Values of the weights\")\nplt.legend(loc=\"best\", prop=dict(size=12))\n\nplt.figure(figsize=(6, 5))\nplt.title(\"Histogram of the weights\")\nplt.hist(clf.coef_, bins=n_features, log=True)\nplt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),\n 'ro', label=\"Relevant features\")\nplt.ylabel(\"Features\")\nplt.xlabel(\"Values of the weights\")\nplt.legend(loc=\"lower left\")\n\nplt.figure(figsize=(6, 5))\nplt.title(\"Marginal log-likelihood\")\nplt.plot(clf.scores_)\nplt.ylabel(\"Score\")\nplt.xlabel(\"Iterations\")\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382623,"cells":{"repo_name":{"kind":"string","value":"QuLogic/burnman"},"path":{"kind":"string","value":"misc/paper_incorrect_averaging.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8987"},"content":{"kind":"string","value":"# BurnMan - a lower mantle toolkit\n# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.\n# Released under GPL v2 or later.\n\n\"\"\"\n\npaper_incorrect_averaging\n-------------------------\n\n\nThis script reproduces :cite:`Cottaar2014`, Figure 5. \nAttempt to reproduce Figure 6.12 from :cite:`Murakami2013`\n\"\"\"\n\nimport os, sys, numpy as np, matplotlib.pyplot as plt\n#hack to allow scripts to be placed in subdirectories next to burnman:\nif not os.path.exists('burnman') and os.path.exists('../burnman'):\n sys.path.insert(1,os.path.abspath('..'))\n\nimport burnman\nfrom burnman import minerals\nfrom burnman.mineral_helpers import HelperSolidSolution\nimport matplotlib.image as mpimg\nimport colors\n\nif __name__ == \"__main__\":\n plt.figure(dpi=100,figsize=(12,6))\n prop={'size':12}\n plt.rc('text', usetex=True)\n plt.rcParams['text.latex.preamble'] = '\\usepackage{relsize}'\n\n dashstyle2=(7,3)\n dashstyle3=(3,2)\n\n method = 'slb2'\n\n #define the minerals from table 6.3\n mg_perovskite = burnman.Mineral()\n mg_perovskite.params = {'name': 'Mg perovskite',\n 'molar_mass' : 0.1004,\n 'V_0': 24.43e-6,\n 'K_0': 253.0e9,\n 'Kprime_0': 3.9,\n 'G_0' : 172.9e9,\n 'Gprime_0' : 1.56,\n 'n': 5.0,\n 'Debye_0': 1100.,\n 'grueneisen_0': 1.40,\n 'q_0': 1.40,\n 'eta_s_0' : 2.6}\n mg_perovskite.set_method('slb2')\n\n fe_perovskite = burnman.Mineral()\n fe_perovskite.params = {'name': 'Fe perovskite',\n 'molar_mass' : 0.1319,\n 'V_0': 25.49e-6,\n 'K_0': 281.0e9,\n 'Kprime_0': 4.1,\n 'G_0' : 138.0e9,\n 'Gprime_0' : 1.70,\n 'n': 5.0,\n 'Debye_0': 841.,\n 'grueneisen_0': 1.48,\n 'q_0': 1.40,\n 'eta_s_0' : 2.1}\n fe_perovskite.set_method(method)\n\n periclase = burnman.Mineral()\n periclase.params = {'name': 'periclase',\n 'molar_mass' : 0.0403,\n 'V_0': 11.24e-6,\n 'K_0': 161.0e9,\n 'Kprime_0': 3.9,\n 'G_0' : 130.9e9,\n 'Gprime_0' : 1.92,\n 'n': 2.0,\n 'Debye_0': 773.,\n 'grueneisen_0': 1.50,\n 'q_0': 1.50,\n 'eta_s_0' : 2.3}\n periclase.set_method(method)\n\n wustite = burnman.Mineral()\n wustite.params = {'name': 'wustite',\n 'molar_mass' : 0.07184,\n 'V_0': 12.06e-6,\n 'K_0': 152.0e9,\n 'Kprime_0': 4.9,\n 'G_0' : 47.0e9,\n 'Gprime_0' : 0.70,\n 'n': 2.0,\n 'Debye_0': 455.,\n 'grueneisen_0': 1.28,\n 'q_0': 1.50,\n 'eta_s_0' : 0.8}\n wustite.set_method(method)\n\n\n #in the text for the book chapter a linear relationship in elastic properties\n #for the solid solutions is assumed...\n class ferropericlase(HelperSolidSolution):\n def __init__(self, fe_num):\n endmembers = [periclase, wustite]\n molar_fractions = [1. - fe_num, 0.0 + fe_num]\n HelperSolidSolution.__init__(self, endmembers, molar_fractions)\n\n\n\n class perovskite(HelperSolidSolution):\n def __init__(self, fe_num):\n endmembers = [mg_perovskite, fe_perovskite]\n molar_fractions = [1. - fe_num, 0.0 + fe_num]\n HelperSolidSolution.__init__(self, endmembers, molar_fractions)\n\n\n #define the P-T path\n pressure = np.linspace(28.0e9, 129e9, 25.)\n temperature_bs = burnman.geotherm.brown_shankland(pressure)\n temperature_an = burnman.geotherm.anderson(pressure)\n\n #seismic model for comparison:\n seismic_model = burnman.seismic.PREM() # pick from .prem() .slow() .fast() (see burnman/seismic.py)\n depths = map(seismic_model.depth, pressure)\n seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)\n\n #pure perovskite\n perovskitite = burnman.Composite( ( (perovskite(0.06), 1.0),) )\n perovskitite.set_method(method)\n\n #pure periclase\n periclasite = burnman.Composite( ( (ferropericlase(0.21), 1.0),))\n periclasite.set_method(method)\n\n #pyrolite (80% perovskite)\n pyrolite = burnman.Composite( ( (perovskite(0.06), 0.834),\n (ferropericlase(0.21), 0.166) ) )\n pyrolite.set_method(method)\n\n #preferred mixture?\n amount_perovskite = 0.92\n preferred_mixture = burnman.Composite( ( (perovskite(0.06), amount_perovskite),\n (ferropericlase(0.21), 1.0-amount_perovskite) ) )\n preferred_mixture.set_method(method)\n\n\n mat_rho_1, mat_vp_1, mat_vs_1, mat_vphi_1, mat_K_1, mat_G_1 = burnman.velocities_from_rock(perovskitite,seis_p, temperature_bs)\n mat_rho_2, mat_vp_2, mat_vs_2, mat_vphi_2, mat_K_2, mat_G_2 = burnman.velocities_from_rock(periclasite,seis_p, temperature_bs)\n mat_rho_3, mat_vp_3, mat_vs_3, mat_vphi_3, mat_K_3, mat_G_3 = burnman.velocities_from_rock(pyrolite,seis_p, temperature_bs)\n mat_rho_4, mat_vp_4, mat_vs_4, mat_vphi_4, mat_K_4, mat_G_4 = burnman.velocities_from_rock(preferred_mixture,seis_p, temperature_bs)\n\n\n\n ### HERE IS THE STEP WITH THE INCORRECT MIXING ###\n # comment this out to have correct phase averaging, leave it in to have incorrect phase averaging\n\n mat_vs_3_wr = 0.5*((0.834*mat_vs_1 + 0.166*mat_vs_2) + np.ones_like(mat_vs_1)/(0.834/mat_vs_1 + 0.166/mat_vs_2))\n mat_vs_4_wr = 0.5*((0.92*mat_vs_1 + 0.08*mat_vs_2) + np.ones_like(mat_vs_1)/(0.92/mat_vs_1 + 0.08/mat_vs_2))\n\n plt.subplot(1,2,2)\n plt.ylim(5.2,7.4)\n plt.xlim(25,135)\n #fig1 = mpimg.imread('input_figures/murakami_book_chapter.png')\n #plt.imshow(fig1, extent=[25,135,5.0,7.6], aspect='auto')\n plt.plot(seis_p/1.e9,seis_vs/1.e3,color='k',linestyle='-',marker='None',markerfacecolor='w',markersize=4,label='PREM',linewidth=3.0,mew=1.5)\n plt.plot(seis_p/1.e9,mat_vs_1/1.e3,color=colors.color(3),marker='v',markerfacecolor=colors.color(3), \\\n markersize=4, markeredgecolor=colors.color(3),markevery=2,linewidth=1.5,label='perovskite')\n plt.plot(seis_p/1.e9,mat_vs_2/1.e3,color=colors.color(1),linestyle='-', \\\n linewidth=1.5,marker='^',markerfacecolor=colors.color(1), markersize=4, \\\n markeredgecolor=colors.color(1),markevery=2,label='periclase')\n plt.plot(seis_p/1.e9,mat_vs_4_wr/1.e3,color=colors.color(4),dashes=dashstyle3, \\\n linewidth=1.5,marker='o',markerfacecolor=colors.color(4), markersize=4, \\\n markeredgecolor=colors.color(4),markevery=2,label='92\\% pv')\n plt.plot(seis_p/1.e9,mat_vs_3_wr/1.e3,color='g',linestyle='-',dashes=dashstyle2, \\\n linewidth=1.5,marker='o',markerfacecolor='w', markersize=4, markeredgecolor='g',markevery=2,label='83\\% pv')\n plt.legend(loc='lower right',prop={'size':12})\n\n\n plt.title(\"Phase average on velocities\")\n\n plt.xlabel(\"Pressure (GPa)\")\n\n plt.subplot(1,2,1)\n plt.ylim(5.2,7.4)\n plt.xlim(25,135)\n #fig1 = mpimg.imread('input_figures/murakami_book_chapter.png')\n #plt.imshow(fig1, extent=[25,135,5.0,7.6], aspect='auto')\n plt.plot(seis_p/1.e9,seis_vs/1.e3,color='k',linestyle='-',marker='None',markerfacecolor='w',markersize=4,label='PREM',linewidth=3.0,mew=1.5)\n plt.plot(seis_p/1.e9,mat_vs_1/1.e3,color=colors.color(3),marker='v',markerfacecolor=colors.color(3), \\\n markersize=4, markeredgecolor=colors.color(3),markevery=2,linewidth=1.5,label='perovskite')\n plt.plot(seis_p/1.e9,mat_vs_2/1.e3,color=colors.color(1),linestyle='-', \\\n linewidth=1.5,marker='^',markerfacecolor=colors.color(1), markersize=4, \\\n markeredgecolor=colors.color(1),markevery=2,label='periclase')\n plt.plot(seis_p/1.e9,mat_vs_4/1.e3,color=colors.color(4),dashes=dashstyle3, \\\n linewidth=1.5,marker='o',markerfacecolor=colors.color(4), markersize=4, \\\n markeredgecolor=colors.color(4),markevery=2,label='92\\% pv')\n plt.plot(seis_p/1.e9,mat_vs_3/1.e3,color='g',linestyle='-',dashes=dashstyle2, \\\n linewidth=1.5,marker='o',markerfacecolor='w', markersize=4, markeredgecolor='g',markevery=2, label='83\\% pv')\n\n plt.title(\" V.-R.-H. on moduli\")\n plt.xlabel(\"Pressure (GPa)\")\n plt.ylabel(\"Shear Velocity Vs (km/s)\")\n plt.tight_layout()\n plt.savefig(\"example_incorrect_averaging.pdf\",bbox_inches='tight')\n plt.show()\n\n\n\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":382624,"cells":{"repo_name":{"kind":"string","value":"DailyActie/Surrogate-Model"},"path":{"kind":"string","value":"01-codes/scikit-learn-master/sklearn/linear_model/tests/test_ransac.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"17468"},"content":{"kind":"string","value":"import numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_array_equal\nfrom numpy.testing import assert_equal, assert_raises\nfrom scipy import sparse\nfrom sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso\nfrom sklearn.linear_model.ransac import _dynamic_max_trials\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_less\nfrom sklearn.utils.testing import assert_raises_regexp\nfrom sklearn.utils.testing import assert_warns\n\n# Generate coordinates of line\nX = np.arange(-200, 200)\ny = 0.2 * X + 20\ndata = np.column_stack([X, y])\n\n# Add some faulty data\noutliers = np.array((10, 30, 200))\ndata[outliers[0], :] = (1000, 1000)\ndata[outliers[1], :] = (-1000, -1000)\ndata[outliers[2], :] = (-100, -50)\n\nX = data[:, 0][:, np.newaxis]\ny = data[:, 1]\n\n\ndef test_ransac_inliers_outliers():\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n\n # Estimate parameters of corrupted data\n ransac_estimator.fit(X, y)\n\n # Ground truth / reference inlier mask\n ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_\n ).astype(np.bool_)\n ref_inlier_mask[outliers] = False\n\n assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)\n\n\ndef test_ransac_is_data_valid():\n def is_data_valid(X, y):\n assert_equal(X.shape[0], 2)\n assert_equal(y.shape[0], 2)\n return False\n\n X = np.random.rand(10, 2)\n y = np.random.rand(10, 1)\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5,\n is_data_valid=is_data_valid,\n random_state=0)\n\n assert_raises(ValueError, ransac_estimator.fit, X, y)\n\n\ndef test_ransac_is_model_valid():\n def is_model_valid(estimator, X, y):\n assert_equal(X.shape[0], 2)\n assert_equal(y.shape[0], 2)\n return False\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5,\n is_model_valid=is_model_valid,\n random_state=0)\n\n assert_raises(ValueError, ransac_estimator.fit, X, y)\n\n\ndef test_ransac_max_trials():\n base_estimator = LinearRegression()\n\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, max_trials=0,\n random_state=0)\n assert_raises(ValueError, ransac_estimator.fit, X, y)\n\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, max_trials=11,\n random_state=0)\n assert getattr(ransac_estimator, 'n_trials_', None) is None\n ransac_estimator.fit(X, y)\n assert_equal(ransac_estimator.n_trials_, 2)\n\n\ndef test_ransac_stop_n_inliers():\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, stop_n_inliers=2,\n random_state=0)\n ransac_estimator.fit(X, y)\n\n assert_equal(ransac_estimator.n_trials_, 1)\n\n\ndef test_ransac_stop_score():\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, stop_score=0,\n random_state=0)\n ransac_estimator.fit(X, y)\n\n assert_equal(ransac_estimator.n_trials_, 1)\n\n\ndef test_ransac_score():\n X = np.arange(100)[:, None]\n y = np.zeros((100,))\n y[0] = 1\n y[1] = 100\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=0.5, random_state=0)\n ransac_estimator.fit(X, y)\n\n assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)\n assert_less(ransac_estimator.score(X[:2], y[:2]), 1)\n\n\ndef test_ransac_predict():\n X = np.arange(100)[:, None]\n y = np.zeros((100,))\n y[0] = 1\n y[1] = 100\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=0.5, random_state=0)\n ransac_estimator.fit(X, y)\n\n assert_equal(ransac_estimator.predict(X), np.zeros(100))\n\n\ndef test_ransac_resid_thresh_no_inliers():\n # When residual_threshold=0.0 there are no inliers and a\n # ValueError with a message should be raised\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=0.0, random_state=0)\n\n assert_raises_regexp(ValueError,\n \"No inliers.*residual_threshold.*0\\.0\",\n ransac_estimator.fit, X, y)\n\n\ndef test_ransac_sparse_coo():\n X_sparse = sparse.coo_matrix(X)\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n ransac_estimator.fit(X_sparse, y)\n\n ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_\n ).astype(np.bool_)\n ref_inlier_mask[outliers] = False\n\n assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)\n\n\ndef test_ransac_sparse_csr():\n X_sparse = sparse.csr_matrix(X)\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n ransac_estimator.fit(X_sparse, y)\n\n ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_\n ).astype(np.bool_)\n ref_inlier_mask[outliers] = False\n\n assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)\n\n\ndef test_ransac_sparse_csc():\n X_sparse = sparse.csc_matrix(X)\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n ransac_estimator.fit(X_sparse, y)\n\n ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_\n ).astype(np.bool_)\n ref_inlier_mask[outliers] = False\n\n assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)\n\n\ndef test_ransac_none_estimator():\n base_estimator = LinearRegression()\n\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)\n\n ransac_estimator.fit(X, y)\n ransac_none_estimator.fit(X, y)\n\n assert_array_almost_equal(ransac_estimator.predict(X),\n ransac_none_estimator.predict(X))\n\n\ndef test_ransac_min_n_samples():\n base_estimator = LinearRegression()\n ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n ransac_estimator2 = RANSACRegressor(base_estimator,\n min_samples=2. / X.shape[0],\n residual_threshold=5, random_state=0)\n ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,\n residual_threshold=5, random_state=0)\n ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,\n residual_threshold=5, random_state=0)\n ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,\n residual_threshold=5, random_state=0)\n ransac_estimator6 = RANSACRegressor(base_estimator,\n residual_threshold=5, random_state=0)\n ransac_estimator7 = RANSACRegressor(base_estimator,\n min_samples=X.shape[0] + 1,\n residual_threshold=5, random_state=0)\n\n ransac_estimator1.fit(X, y)\n ransac_estimator2.fit(X, y)\n ransac_estimator5.fit(X, y)\n ransac_estimator6.fit(X, y)\n\n assert_array_almost_equal(ransac_estimator1.predict(X),\n ransac_estimator2.predict(X))\n assert_array_almost_equal(ransac_estimator1.predict(X),\n ransac_estimator5.predict(X))\n assert_array_almost_equal(ransac_estimator1.predict(X),\n ransac_estimator6.predict(X))\n\n assert_raises(ValueError, ransac_estimator3.fit, X, y)\n assert_raises(ValueError, ransac_estimator4.fit, X, y)\n assert_raises(ValueError, ransac_estimator7.fit, X, y)\n\n\ndef test_ransac_multi_dimensional_targets():\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n\n # 3-D target values\n yyy = np.column_stack([y, y, y])\n\n # Estimate parameters of corrupted data\n ransac_estimator.fit(X, yyy)\n\n # Ground truth / reference inlier mask\n ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_\n ).astype(np.bool_)\n ref_inlier_mask[outliers] = False\n\n assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)\n\n\n# XXX: Remove in 0.20\ndef test_ransac_residual_metric():\n residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)\n residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)\n\n yyy = np.column_stack([y, y, y])\n\n base_estimator = LinearRegression()\n ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0,\n residual_metric=residual_metric1)\n ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0,\n residual_metric=residual_metric2)\n\n # multi-dimensional\n ransac_estimator0.fit(X, yyy)\n assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)\n assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)\n assert_array_almost_equal(ransac_estimator0.predict(X),\n ransac_estimator1.predict(X))\n assert_array_almost_equal(ransac_estimator0.predict(X),\n ransac_estimator2.predict(X))\n\n # one-dimensional\n ransac_estimator0.fit(X, y)\n assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)\n assert_array_almost_equal(ransac_estimator0.predict(X),\n ransac_estimator2.predict(X))\n\n\ndef test_ransac_residual_loss():\n loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)\n loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)\n\n loss_mono = lambda y_true, y_pred: np.abs(y_true - y_pred)\n yyy = np.column_stack([y, y, y])\n\n base_estimator = LinearRegression()\n ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0)\n ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0,\n loss=loss_multi1)\n ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0,\n loss=loss_multi2)\n\n # multi-dimensional\n ransac_estimator0.fit(X, yyy)\n ransac_estimator1.fit(X, yyy)\n ransac_estimator2.fit(X, yyy)\n assert_array_almost_equal(ransac_estimator0.predict(X),\n ransac_estimator1.predict(X))\n assert_array_almost_equal(ransac_estimator0.predict(X),\n ransac_estimator2.predict(X))\n\n # one-dimensional\n ransac_estimator0.fit(X, y)\n ransac_estimator2.loss = loss_mono\n ransac_estimator2.fit(X, y)\n assert_array_almost_equal(ransac_estimator0.predict(X),\n ransac_estimator2.predict(X))\n ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,\n residual_threshold=5, random_state=0,\n loss=\"squared_loss\")\n ransac_estimator3.fit(X, y)\n assert_array_almost_equal(ransac_estimator0.predict(X),\n ransac_estimator2.predict(X))\n\n\ndef test_ransac_default_residual_threshold():\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n random_state=0)\n\n # Estimate parameters of corrupted data\n ransac_estimator.fit(X, y)\n\n # Ground truth / reference inlier mask\n ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_\n ).astype(np.bool_)\n ref_inlier_mask[outliers] = False\n\n assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)\n\n\ndef test_ransac_dynamic_max_trials():\n # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in\n # Hartley, R.~I. and Zisserman, A., 2004,\n # Multiple View Geometry in Computer Vision, Second Edition,\n # Cambridge University Press, ISBN: 0521540518\n\n # e = 0%, min_samples = X\n assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)\n\n # e = 5%, min_samples = 2\n assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)\n # e = 10%, min_samples = 2\n assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)\n # e = 30%, min_samples = 2\n assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)\n # e = 50%, min_samples = 2\n assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)\n\n # e = 5%, min_samples = 8\n assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)\n # e = 10%, min_samples = 8\n assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)\n # e = 30%, min_samples = 8\n assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)\n # e = 50%, min_samples = 8\n assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)\n\n # e = 0%, min_samples = 10\n assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)\n assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))\n\n base_estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n stop_probability=-0.1)\n assert_raises(ValueError, ransac_estimator.fit, X, y)\n ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,\n stop_probability=1.1)\n assert_raises(ValueError, ransac_estimator.fit, X, y)\n\n\ndef test_ransac_fit_sample_weight():\n ransac_estimator = RANSACRegressor(random_state=0)\n n_samples = y.shape[0]\n weights = np.ones(n_samples)\n ransac_estimator.fit(X, y, weights)\n # sanity check\n assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)\n\n ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_\n ).astype(np.bool_)\n ref_inlier_mask[outliers] = False\n # check that mask is correct\n assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)\n\n # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where\n # X = X1 repeated n1 times, X2 repeated n2 times and so forth\n random_state = check_random_state(0)\n X_ = random_state.randint(0, 200, [10, 1])\n y_ = np.ndarray.flatten(0.2 * X_ + 2)\n sample_weight = random_state.randint(0, 10, 10)\n outlier_X = random_state.randint(0, 1000, [1, 1])\n outlier_weight = random_state.randint(0, 10, 1)\n outlier_y = random_state.randint(-1000, 0, 1)\n\n X_flat = np.append(np.repeat(X_, sample_weight, axis=0),\n np.repeat(outlier_X, outlier_weight, axis=0), axis=0)\n y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),\n np.repeat(outlier_y, outlier_weight, axis=0),\n axis=0))\n ransac_estimator.fit(X_flat, y_flat)\n ref_coef_ = ransac_estimator.estimator_.coef_\n\n sample_weight = np.append(sample_weight, outlier_weight)\n X_ = np.append(X_, outlier_X, axis=0)\n y_ = np.append(y_, outlier_y)\n ransac_estimator.fit(X_, y_, sample_weight)\n\n assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)\n\n # check that if base_estimator.fit doesn't support\n # sample_weight, raises error\n base_estimator = Lasso()\n ransac_estimator = RANSACRegressor(base_estimator)\n assert_raises(ValueError, ransac_estimator.fit, X, y, weights)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382625,"cells":{"repo_name":{"kind":"string","value":"jwdebelius/break_4w"},"path":{"kind":"string","value":"break4w/data_dictionary.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"22592"},"content":{"kind":"string","value":"\"\"\"\nI need really good documentation for this module that I haven't begun to\nwrite yet. This will be really important?\n\"\"\"\n\nfrom collections import OrderedDict\nimport datetime\nimport pydoc\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas.api.types import CategoricalDtype\n\nfrom break4w.question import Question\nfrom break4w.categorical import Categorical\nfrom break4w.bool import Bool\nfrom break4w.continous import Continous\nimport break4w._defaults as b4wdefaults\n\ntype_lookup = {'continous': Continous,\n 'categorical': Categorical,\n 'multiple choice': Categorical,\n 'ordinal': Categorical,\n 'bool': Bool,\n 'boolean': Bool,\n 'yes/no': Bool,\n }\n\n\n\nclass DataDictionary(OrderedDict):\n \"\"\"\n Generates a data dictionary object\n\n Parameters\n ----------\n columns: list of dicts\n A list of dictionaries representing each column in the metadata. \n The dictionaries must contain a `name` key, describing the column \n name. The values in the dictionary should the variables needed \n for each type of question object in a data dictionary.\n types: list of strings\n A description of the type of question being asked. These come \n from a relatively controlled vocabulary and include types such as \n `\"continous\", \"categorical\", \"bool\"`. If the question type does \n not conform to the controlled vocabulary, the column will be \n read as a Question object with limited functionality.\n description: str\n A description of the data dictionary or study of no more than\n 80 characters.\n\n \"\"\"\n default_cols = ['name', 'description', 'type', 'dtype', 'order', \n 'units', 'ambigious', 'missing', 'notes']\n\n def __init__(self, columns, types, description=None):\n \"\"\"Initializes the dictionary object\n\n This is a very basic prototype of the data dictionary object\n \"\"\"\n\n self.log = []\n if description is None:\n self.description = ''\n elif len(description) > 80:\n raise ValueError('The dictionary description cannot be more than '\n '80 characters')\n else:\n self.description = description\n\n # Adds the question objects to the dictionary\n for col_, type_ in zip(*(columns, types)):\n self.add_question(question_data=col_,\n question_type=type_,\n record=False,\n check=False)\n self.columns = list(self.keys())\n\n def __str__(self):\n \"\"\"\n Generates printed summary\n \"\"\"\n summary = ['Data Dictionary with %i columns' % len(self)]\n if len(self.description) > 0:\n summary.append('\\t%s' % self.description)\n summary.append('-----------------------------------------------------'\n '-------------------------------')\n \n for col in self.values():\n summary.append('%s (%s)\\n\\t%s' % (col.name, col.type, col.description))\n summary.append('-----------------------------------------------------'\n '-------------------------------')\n return '\\n'.join(summary)\n\n def _update_log(self, command, column=None,\n transform_type=None, transformation=None):\n \"\"\"Used for internal tracking of the columns and data\n\n Every time a Question acts on data, a record should be made of\n the transformation. (See break4w.question.Question._update_log).\n However, this also tracks the transformation on the dictionary\n level.\n\n Parameters\n ----------\n command : str\n A short textual description of the command performed. This\n may be the function name in text format.\n column : str, optional\n The column in the metadata being explored.\n transform_type: str, optional\n A more general description of the type of action that was\n performed. Ideally, this comes for a preset list of possible\n actions, and the descriptions are consistent.\n transformation: str, optional\n Explains exactly how values were changed.\n\n \"\"\"\n self.log.append({\n 'timestamp': datetime.datetime.now(),\n 'column': column,\n 'command': command,\n 'transform_type': transform_type,\n 'transformation': transformation,\n })\n\n def _pull_question_log(self, column=None):\n \"\"\"Adds information from the specified column to the log.\"\"\"\n raise NotImplementedError\n\n def add_question(self, question_data, question_type='',\n check=True, record=True, var_delim=' | ', code_delim='=', \n null_value='None'):\n \"\"\"\n Adds a new question object to the data dictionary\n\n Parameters\n ----------\n question_data: Dict, Question\n Describes the data dictionary entry for the question. This can\n be a break4w question object created directly, or a dictionary\n objecting with information like the name in the metadata\n representation, data type, a description, and specific information\n for the type of question. For instance, `question_type` specified\n the qustion was `\"continous\"`, the `question_data` must also\n describe units for the question.\n question_type: str, optional\n Describes the type of question object that should be selected\n for the question. If `question_data` is a `Question` object, then\n no `question_type` is needed.\n check: bool, optional\n Checks whether a name already exists in the question name space.\n If this is true, then the function will check if the column \n already exists in the dictionary. If the column does exist and \n check is true, an error will be raised. If check is not true, the\n data dictionary entry for the column will be overwritten and any\n information in that column will be lost.\n record, bool, optional\n Indicates where the addition should be logged.\n read_numeric_codes: bool, optional\n Whether columns should be read with a numerical delimiter (i.e\n \"=\") to parse a numeric value into a categorical one. For example,\n if numeric is true, then \"0=female | 1=male\" would be parsed that\n any data encoded as 0 maps to female, any data encoded as 1 maps\n to male, and the order of hte values is `[0, 1]` (corresponding to\n `['female', 'male']`). Otherwise, the line would be read\n literally, and the order is read as `[\"0=female\", \"1=male\"]`.\n val_delim: str, optional\n The seperator between values in the \"order\" column.\n code_delim: str, optional\n The delimiter between a numericly coded categorical variable and\n the value it maps to.\n\n Raises\n ------\n ValueError\n When the function is checking for the column and the column name\n is already in the dictionary. If this is the case, the dictionary\n entry should be adjusted using `update_question`, not\n `add_question`, since this function will otherwise over write the\n existing column.\n\n \"\"\"\n error1 = False\n\n # Converts to a Question object\n question_object = type_lookup.get(question_type.lower(), Question)\n\n if isinstance(question_data, pd.Series):\n question_data.dropna(inplace=True)\n question_data = question_object._read_series(\n question_data, var_delim=var_delim, \n code_delim=code_delim, null_value=null_value,\n )\n elif isinstance(question_data, dict):\n question_data = question_object(**question_data)\n elif isinstance(question_data, Question):\n pass\n else:\n message = ('question_data must be a Question, dict, or'\n ' Series')\n if record:\n self._update_log('add column', \n column=None, \n transformation=message,\n transform_type='error')\n raise ValueError(message)\n\n name = question_data.name\n\n # Checks if the question is in the dictionary\n if (name in self.keys()) and check:\n error1 = True\n message = '%s already has a dictionary entry' % name\n transform_type = 'error'\n else:\n message = '%s was added to the dictionary' % name\n transform_type = None\n\n # Updates the log\n if record:\n self._update_log('add column', \n column=name, \n transformation=message,\n transform_type=transform_type)\n\n # Raises an error or updates the dictionary, as appropriate\n if error1:\n raise ValueError(message)\n else:\n self[name] = question_data\n self.columns = list(self.keys())\n\n def get_question(self, name):\n \"\"\"\n Returns the data dictionary column\n\n Parameters\n ----------\n name: str\n The name of the dictionary column to be returned\n\n Returns\n -------\n Question\n The question object for the appropriate dictionary\n object\n\n Raises\n ------\n ValueError\n When the column being asked for does not exist.\n \"\"\"\n if name not in self.keys():\n message = 'There is no entry for %s' % name\n self._update_log(column=name,\n command='get question',\n transform_type='error',\n transformation=message)\n raise ValueError(message)\n self._update_log(column=name, command='get question')\n return self[name]\n\n def drop_question(self, name):\n \"\"\"\n Removes a dictionary entry for the specified column.\n\n Parameters\n ----------\n name: str\n The name of the dictionary column to be returned\n \"\"\"\n if name in self.keys():\n del self[name]\n self.columns = list(self.keys())\n self._update_log(command='remove question', column=name)\n\n def update_question(self, update, name=None):\n \"\"\"\n Updates dictionary entry for the data\n\n Parameters\n ----------\n update: Dict, Question\n Describes the data dictionary entry for the question. This can\n be a break4w question object created directly, or a dictionary\n objecting with information like the name in the metadata\n representation, data type, a description, and specific information\n for the type of question. For instance, `question_type` specified\n the qustion was `\"continous\"`, the `question_data` must also\n describe units for the question.\n name: str, optional\n The name of the dictionary column to be returned. If `update` is\n a Question object, this can be infered from the question.\n \"\"\"\n \n # Gets the dictionary of the new column and column name\n if isinstance(update, Question):\n update = vars(update)\n\n if name is None:\n name = update['name']\n\n # Checks if the data is already in the dictionary\n if name not in self.keys():\n message = ('%s is not a question in the current dictionary.\\n'\n 'Have you tried adding the question?') % name\n self._update_log(command='update question',\n column=name,\n transform_type='error',\n transformation=message)\n raise ValueError(message)\n current = vars(self[name])\n diff = {k: v for k, v in update.items()\n if (((k not in current) or (v != current[k])) and\n (k not in {'log'}))\n }\n change_keys = {}\n for k, v in diff.items():\n if k in current:\n change_keys[k] = (current[k], v)\n else:\n change_keys[k] = ('add', v)\n setattr(self[name], k, v)\n if 'log' in update:\n self[name].log.extend(update['log'])\n self._update_log(\n command='update question',\n column=name,\n transform_type='update dictionary values',\n transformation=' | '.join(['%s : %s > %s' % (k, v[0], v[1])\n for k, v in change_keys.items()]))\n\n def validate(self, map_, check_order=True):\n \"\"\"\n Checks columns appear in the mapping file in the appropriate order\n and conform to the standards set in the data dictionary.\n\n Parameters\n ----------\n map_ : DataFrame\n A pandas object containing the metadata being analyzed.\n check_order: bool, optional\n Do the order of columns in the data dictionary and metadata have\n to match?\n \"\"\"\n pass_ = True\n failures = []\n fail_message = []\n self._validate_question_order(map_, check_order)\n for name, question in self.items():\n if question.type == 'Question':\n continue\n try:\n question.validate(map_)\n except:\n pass_ = False\n failures.append(\n '\\t%s - %s' % (name, question.log[-1]['transformation'])\n )\n self.log.append(question.log[-1])\n\n if pass_:\n self._update_log('validate', transform_type='pass',\n transformation='All columns passed')\n else:\n message = ('There were issues with the following columns:\\n%s'\n % '\\n'.join(failures))\n message_l = (('There were issues with the following columns:\\n%s'\n '\\nPlease See the log for more details.') \n % '\\n'.join([fail.split(' - ')[0].replace('\\t', '') \n for fail in failures]))\n self._update_log('validate', transform_type='error',\n transformation=message_l)\n raise ValueError(message)\n\n def _validate_question_order(self, map_, check_order=True, record=True,\n verbose=False):\n \"\"\"\n Checks all the required questions are present in the mapping file\n and that they are in the correct order.\n\n Parameters\n ----------\n map_ : DataFrame\n A pandas object containing the metadata being analyzed.\n check_order: bool, optional\n Do the order of columns in the data dictionary and metadata have\n to match?\n record: bool, optional\n Indicates where the addition should be logged.\n verbose: bool, optional\n Provides more detailed information about the error\n\n Raises\n ------\n ValueError\n\n \"\"\"\n pass_ = True\n message = ('The columns in the mapping file match the columns in '\n 'the data dictionary.')\n map_columns = list(map_.columns)\n dict_columns = list(self.keys())\n\n if not set(map_columns) == set(dict_columns):\n pass_ = False\n in_map = list(set(map_columns) - set(dict_columns))\n in_dict = list(set(dict_columns) - set(map_columns))\n text = ('There are %i columns in the data dictionary '\n 'not in the mapping file, and %i from the mapping'\n ' file not in the data dictionary.'\n % (len(in_dict), len(in_map)))\n if len(in_dict) > 0:\n not_map = ('In the dictionary but not in the map: \\n\\t%s\\n' \n % '; '.join(in_dict))\n else:\n not_map = ''\n\n if len(in_map) > 0:\n t_ = '\\nIn the map but not in the dictionary:\\n\\t%s\\n'\n not_dict = t_ % '; '.join(in_map)\n else:\n not_dict = ''\n\n if verbose:\n message = '%s%s%s' % (text, not_map, not_dict)\n # message = not_dict\n else:\n message = text\n\n elif not (map_columns == dict_columns) and check_order:\n pass_ = False\n message = ('The columns in the dictionary and map are not in'\n ' the same order.')\n\n if record and pass_:\n self._update_log(command='validate', transform_type='pass',\n transformation=message)\n elif record and not pass_:\n self._update_log(command='validate', transform_type='fail',\n transformation=message)\n raise ValueError(message)\n elif not pass_:\n raise ValueError(message)\n\n def to_dataframe(self, clean=False, val_delim=' | ', code_delim='='):\n u\"\"\"Converts data dictionary to a pandas dataframe\n\n Parameters\n ----------\n clean: bool, optional\n Returns a subset of columns for the data dictionary. When True,\n the data dicitonary will return the following columns:\n * `name` -- the name of the column\n * `description` -- the 80 character description\n * `type` -- the type of question (Continous, Question, \n Categorical, or Boolean)\n * `dtype` -- the datatype for the pandas column.\n * `order` -- the order of data for categorical objects or\n range of values for continous values\n * `units` -- units for continous values\n * `ambigious` -- values for ambigious results\n * `missing` -- values for missing values\n * `notes` -- any notes passed into the data dictionary\n object to be retained\n val_delim: str, optional\n The seperator between values in the \"order\" column.\n code_delim: str, optional\n The delimiter between a numericly coded categorical variable and\n the value it maps to.\n\n Returns\n -------\n DataFrame\n A dataframe mapping the variable name to its description, question\n type, datatype, and order. \n\n Example\n -------\n\n \"\"\"\n\n cols = []\n for col in self.values():\n ser_ = col._to_series()\n # if isinstance(col, Continous):\n ser_.rename({'limits': 'order'}, inplace=True)\n cols.append(ser_)\n\n df_ = pd.concat(axis=1, sort=False, objs=cols).T\n\n if ('var_labels' in df_.columns):\n df_.loc[df_['var_labels'].notna(), 'order'] = \\\n df_.loc[df_['var_labels'].notna(), 'var_labels']\n df_.drop(columns=['var_labels'], inplace=True)\n\n if clean:\n cols = [c for c in self.default_cols if c in df_]\n df_ = df_[cols]\n df_.drop(columns=df_.columns[df_.isna().all(axis=0)], \n inplace=True)\n\n return df_.set_index('name')\n\n def to_pandas_stata(self):\n \"\"\"\n Generates strings and dictionary compatible with writing to stata\n\n Returns\n -------\n str\n A stata-compatible dataset description for `pandas.write_stata`\n dictionary\n A stata-compatible description for each variable, compatible with\n `pandas.write_stata`.\n \"\"\"\n\n variable_desc = {k: v.description for k,v in self.items()}\n\n return self.description, variable_desc\n\n def to_ddi_xml(self):\n pass\n\n @classmethod\n def read_dataframe(cls, df_, description=None, var_delim=' | ', \n code_delim='=', null_value='None'):\n \"\"\"Builds the data dictionary from a dataframe\n\n Parameters\n ----------\n df_ : DataFrame\n A pandas dataframe version of the data dictionary where the data\n is indexed by `name`\n description: str\n A description of the data dictionary or study of no more than\n 80 characters.\n read_codes: bool, optional\n Whether columns should be read with a numerical delimiter (i.e\n \"=\") to parse a numeric value into a categorical one. For example,\n if numeric is true, then \"0=female | 1=male\" would be parsed that\n any data encoded as 0 maps to female, any data encoded as 1 maps\n to male, and the order of hte values is `[0, 1]` (corresponding to\n `['female', 'male']`). Otherwise, the line would be read\n literally, and the order is read as `[\"0=female\", \"1=male\"]`.\n val_delim: str, optional\n The seperator between values in the \"order\" column.\n code_delim: str, optional\n The delimiter between a numericly coded categorical variable and\n the value it maps to.\n\n Returns\n -------\n DataDictionary\n A data dictionary object with the newly described study.\n\n Examples\n --------\n\n \"\"\"\n types = []\n cols = []\n if 'name' not in df_.columns:\n df_.reset_index(inplace=True)\n\n for name_, var_ in df_.iterrows():\n # Describes the question type\n type_ = var_['type']\n qclass = type_lookup.get(type_.lower(), Question)\n var_.drop('type', inplace=True)\n\n # Updates the column and type objects\n types.append(type_)\n cols.append(qclass._read_series(var_.dropna(), \n var_delim=var_delim, \n code_delim=code_delim, \n null_value=null_value))\n\n return cls(columns=cols, types=types, description=description)\n\n # @classmethod\n def to_usgs_xml(self):\n \"\"\"Converts the data dictionary to a usgs xlm format\"\"\"\n pass\n\n @classmethod\n def read_stata(cls, iter_, ):\n pass\n\n\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":382626,"cells":{"repo_name":{"kind":"string","value":"kewitz/mestrado"},"path":{"kind":"string","value":"Eletromagnetismo Computacional II/MOM.carganofio.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1063"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 1 15:09:54 2014\n\n@author: leo\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\n\n# Defs\nirange = lambda a: zip(range(len(a)),a)\npi = np.pi\n\n# Parâmetros\nL = np.float64(1.0) # Comprimento do fio em m.\na = np.float64(1E-3) # Raio do condutor.\nDelta = np.float64(.01) # Delta.\nV0 = np.float64(1.0)\nEps = np.float64(8.854E-12)\n\n# Funções\nl = lambda m, n: Delta/np.abs(y[m-1]-y[n-1]) if m != n else 2*np.log(Delta/a)\ng = lambda m: 4*pi*Eps*V0\n\n# Domínios e constantes\ny = np.arange(0,L,Delta, dtype=np.float64)\nns = y.size\nms = ns\nL = np.matrix(np.zeros((ms,ns)))\nG = np.matrix(np.zeros((ns)))\n\n# Processamento\nfor i in range(ms):\n m = np.float64(i+1.0)\n G[0,i] = g(m) # Monta o vetor de tensões\n for j in range(ns):\n n = np.float64(j+1.0)\n L[i,j] = l(m,n) # Monta a matriz de ?\n\nrho = np.linalg.solve(L,G.T) # Obtem o vetor 5de Permeabilidades\nQ = rho.sum() * Delta\nprint \"Carga total no condutor %.3eC\" % Q\n\n# Plots\nplt.plot(y,rho)"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382627,"cells":{"repo_name":{"kind":"string","value":"astroML/astroML"},"path":{"kind":"string","value":"astroML/plotting/regression.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3574"},"content":{"kind":"string","value":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy import optimize\nfrom astroML.linear_model import TLS_logL, LinearRegression\n\n\n# TLS:\ndef get_m_b(beta):\n b = np.dot(beta, beta) / beta[1]\n m = -beta[0] / beta[1]\n return m, b\n\n\ndef plot_regressions(ksi, eta, x, y, sigma_x, sigma_y, add_regression_lines=False,\n alpha_in=1, beta_in=0.5, basis='linear'):\n\n figure = plt.figure(figsize=(8, 6))\n ax = figure.add_subplot(111)\n ax.scatter(x, y, alpha=0.5)\n ax.errorbar(x, y, xerr=sigma_x, yerr=sigma_y, alpha=0.3, ls='')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n\n x0 = np.linspace(np.min(x) - 0.5, np.max(x) + 0.5, 20)\n\n # True regression line\n\n if alpha_in is not None and beta_in is not None:\n if basis == 'linear':\n y0 = alpha_in + x0 * beta_in\n elif basis == 'poly':\n y0 = alpha_in + beta_in[0] * x0 + beta_in[1] * x0 * x0 + beta_in[2] * x0 * x0 * x0\n\n ax.plot(x0, y0, color='black', label='True regression')\n else:\n y0 = None\n\n if add_regression_lines:\n for label, data, *target in [['fit no errors', x, y, 1],\n ['fit y errors only', x, y, sigma_y],\n ['fit x errors only', y, x, sigma_x]]:\n linreg = LinearRegression()\n linreg.fit(data[:, None], *target)\n if label == 'fit x errors only' and y0 is not None:\n x_fit = linreg.predict(y0[:, None])\n ax.plot(x_fit, y0, label=label)\n else:\n y_fit = linreg.predict(x0[:, None])\n ax.plot(x0, y_fit, label=label)\n\n # TLS\n X = np.vstack((x, y)).T\n dX = np.zeros((len(x), 2, 2))\n dX[:, 0, 0] = sigma_x\n dX[:, 1, 1] = sigma_y\n\n def min_func(beta): return -TLS_logL(beta, X, dX)\n beta_fit = optimize.fmin(min_func, x0=[-1, 1])\n m_fit, b_fit = get_m_b(beta_fit)\n x_fit = np.linspace(-10, 10, 20)\n ax.plot(x_fit, m_fit * x_fit + b_fit, label='TLS')\n\n ax.set_xlim(np.min(x)-0.5, np.max(x)+0.5)\n ax.set_ylim(np.min(y)-0.5, np.max(y)+0.5)\n ax.legend()\n\n\ndef plot_regression_from_trace(fitted, observed, ax=None, chains=None, multidim_ind=None):\n\n traces = [fitted.trace, ]\n xi, yi, sigx, sigy = observed\n\n if multidim_ind is not None:\n xi = xi[multidim_ind]\n\n x = np.linspace(np.min(xi)-0.5, np.max(xi)+0.5, 50)\n\n for i, trace in enumerate(traces):\n if 'theta' in trace.varnames and 'slope' not in trace.varnames:\n trace.add_values({'slope': np.tan(trace['theta'])})\n\n if multidim_ind is not None:\n trace_slope = trace['slope'][:, multidim_ind]\n else:\n trace_slope = trace['slope'][:, 0]\n\n if chains is not None:\n for chain in range(100, len(trace) * trace.nchains, chains):\n y = trace['inter'][chain] + trace_slope[chain] * x\n ax.plot(x, y, alpha=0.03, c='red')\n\n # plot the best-fit line only\n H2D, bins1, bins2 = np.histogram2d(trace_slope,\n trace['inter'], bins=50)\n\n w = np.where(H2D == H2D.max())\n\n # choose the maximum posterior slope and intercept\n slope_best = bins1[w[0][0]]\n intercept_best = bins2[w[1][0]]\n\n print(\"beta:\", slope_best, \"alpha:\", intercept_best)\n y = intercept_best + slope_best * x\n\n # y_pre = fitted.predict(x[:, None])\n ax.plot(x, y, ':', label='fitted')\n\n ax.legend()\n break\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":382628,"cells":{"repo_name":{"kind":"string","value":"yuanagain/seniorthesis"},"path":{"kind":"string","value":"venv/lib/python2.7/site-packages/matplotlib/tests/test_delaunay.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"7137"},"content":{"kind":"string","value":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom matplotlib.externals import six\nfrom matplotlib.externals.six.moves import xrange\nimport warnings\n\nimport numpy as np\nfrom matplotlib.testing.decorators import image_comparison, knownfailureif\nfrom matplotlib.cbook import MatplotlibDeprecationWarning\n\nwith warnings.catch_warnings():\n # the module is deprecated. The tests should be removed when the module is.\n warnings.simplefilter('ignore', MatplotlibDeprecationWarning)\n from matplotlib.delaunay.triangulate import Triangulation\nfrom matplotlib import pyplot as plt\nimport matplotlib as mpl\n\ndef constant(x, y):\n return np.ones(x.shape, x.dtype)\nconstant.title = 'Constant'\n\ndef xramp(x, y):\n return x\nxramp.title = 'X Ramp'\n\ndef yramp(x, y):\n return y\nyramp.title = 'Y Ramp'\n\ndef exponential(x, y):\n x = x*9\n y = y*9\n x1 = x+1.0\n x2 = x-2.0\n x4 = x-4.0\n x7 = x-7.0\n y1 = x+1.0\n y2 = y-2.0\n y3 = y-3.0\n y7 = y-7.0\n f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +\n 0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +\n 0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -\n 0.2 * np.exp(-x4*x4 -y7*y7))\n return f\nexponential.title = 'Exponential and Some Gaussians'\n\ndef cliff(x, y):\n f = np.tanh(9.0*(y-x) + 1.0)/9.0\n return f\ncliff.title = 'Cliff'\n\ndef saddle(x, y):\n f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)\n return f\nsaddle.title = 'Saddle'\n\ndef gentle(x, y):\n f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0\n return f\ngentle.title = 'Gentle Peak'\n\ndef steep(x, y):\n f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0\n return f\nsteep.title = 'Steep Peak'\n\ndef sphere(x, y):\n circle = 64-81*((x-0.5)**2 + (y-0.5)**2)\n f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)\n return f\nsphere.title = 'Sphere'\n\ndef trig(x, y):\n f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)\n return f\ntrig.title = 'Cosines and Sines'\n\ndef gauss(x, y):\n x = 5.0-10.0*x\n y = 5.0-10.0*y\n g1 = np.exp(-x*x/2)\n g2 = np.exp(-y*y/2)\n f = g1 + 0.75*g2*(1 + g1)\n return f\ngauss.title = 'Gaussian Peak and Gaussian Ridges'\n\ndef cloverleaf(x, y):\n ex = np.exp((10.0-20.0*x)/3.0)\n ey = np.exp((10.0-20.0*y)/3.0)\n logitx = 1.0/(1.0+ex)\n logity = 1.0/(1.0+ey)\n f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *\n (ex-2.0*logitx)*(ey-2.0*logity))\n return f\ncloverleaf.title = 'Cloverleaf'\n\ndef cosine_peak(x, y):\n circle = np.hypot(80*x-40.0, 90*y-45.)\n f = np.exp(-0.04*circle) * np.cos(0.15*circle)\n return f\ncosine_peak.title = 'Cosine Peak'\n\nallfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]\n\n\nclass LinearTester(object):\n name = 'Linear'\n def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):\n self.xrange = xrange\n self.yrange = yrange\n self.nrange = nrange\n self.npoints = npoints\n\n rng = np.random.RandomState(1234567890)\n self.x = rng.uniform(xrange[0], xrange[1], size=npoints)\n self.y = rng.uniform(yrange[0], yrange[1], size=npoints)\n self.tri = Triangulation(self.x, self.y)\n\n def replace_data(self, dataset):\n self.x = dataset.x\n self.y = dataset.y\n self.tri = Triangulation(self.x, self.y)\n\n def interpolator(self, func):\n z = func(self.x, self.y)\n return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)\n\n def plot(self, func, interp=True, plotter='imshow'):\n if interp:\n lpi = self.interpolator(func)\n z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),\n self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]\n else:\n y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),\n self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]\n z = func(x, y)\n\n z = np.where(np.isinf(z), 0.0, z)\n\n extent = (self.xrange[0], self.xrange[1],\n self.yrange[0], self.yrange[1])\n fig = plt.figure()\n plt.hot() # Some like it hot\n if plotter == 'imshow':\n plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')\n elif plotter == 'contour':\n Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),\n self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]\n plt.contour(np.ravel(X), np.ravel(Y), z, 20)\n x = self.x\n y = self.y\n lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))\n for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])\n ax = plt.gca()\n ax.add_collection(lc)\n\n if interp:\n title = '%s Interpolant' % self.name\n else:\n title = 'Reference'\n if hasattr(func, 'title'):\n plt.title('%s: %s' % (func.title, title))\n else:\n plt.title(title)\n\nclass NNTester(LinearTester):\n name = 'Natural Neighbors'\n def interpolator(self, func):\n z = func(self.x, self.y)\n return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)\n\ndef make_all_2d_testfuncs(allfuncs=allfuncs):\n def make_test(func):\n filenames = [\n '%s-%s' % (func.__name__, x) for x in\n ['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]\n\n # We only generate PNGs to save disk space -- we just assume\n # that any backend differences are caught by other tests.\n @image_comparison(filenames, extensions=['png'],\n freetype_version=('2.4.5', '2.4.9'),\n remove_text=True)\n def reference_test():\n nnt.plot(func, interp=False, plotter='imshow')\n nnt.plot(func, interp=True, plotter='imshow')\n lpt.plot(func, interp=True, plotter='imshow')\n nnt.plot(func, interp=False, plotter='contour')\n nnt.plot(func, interp=True, plotter='contour')\n lpt.plot(func, interp=True, plotter='contour')\n\n tester = reference_test\n tester.__name__ = str('test_%s' % func.__name__)\n return tester\n\n nnt = NNTester(npoints=1000)\n lpt = LinearTester(npoints=1000)\n for func in allfuncs:\n globals()['test_%s' % func.__name__] = make_test(func)\n\nmake_all_2d_testfuncs()\n\n# 1d and 0d grid tests\n\nref_interpolator = Triangulation([0,10,10,0],\n [0,0,10,10]).linear_interpolator([1,10,5,2.0])\n\ndef test_1d_grid():\n res = ref_interpolator[3:6:2j,1:1:1j]\n assert np.allclose(res, [[1.6],[1.9]], rtol=0)\n\ndef test_0d_grid():\n res = ref_interpolator[3:3:1j,1:1:1j]\n assert np.allclose(res, [[1.6]], rtol=0)\n\n@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])\ndef test_1d_plots():\n x_range = slice(0.25,9.75,20j)\n x = np.mgrid[x_range]\n ax = plt.gca()\n for y in xrange(2,10,2):\n plt.plot(x, ref_interpolator[x_range,y:y:1j])\n ax.set_xticks([])\n ax.set_yticks([])\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382629,"cells":{"repo_name":{"kind":"string","value":"kobejean/tensorflow"},"path":{"kind":"string","value":"tensorflow/examples/get_started/regression/test.py"},"copies":{"kind":"string","value":"41"},"size":{"kind":"string","value":"4037"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simple smoke test that runs these examples for 1 training iteration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport pandas as pd\n\nfrom six.moves import StringIO\n\nimport tensorflow.examples.get_started.regression.imports85 as imports85\n\nsys.modules[\"imports85\"] = imports85\n\n# pylint: disable=g-bad-import-order,g-import-not-at-top\nimport tensorflow.contrib.data as data\n\nimport tensorflow.examples.get_started.regression.dnn_regression as dnn_regression\nimport tensorflow.examples.get_started.regression.linear_regression as linear_regression\nimport tensorflow.examples.get_started.regression.linear_regression_categorical as linear_regression_categorical\nimport tensorflow.examples.get_started.regression.custom_regression as custom_regression\n\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\n# pylint: disable=g-bad-import-order,g-import-not-at-top\n\n\n# pylint: disable=line-too-long\nFOUR_LINES = \"\\n\".join([\n \"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500\",\n \"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950\",\n \"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450\",\n \"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250\",])\n\n# pylint: enable=line-too-long\n\n\ndef four_lines_dataframe():\n text = StringIO(FOUR_LINES)\n\n return pd.read_csv(text, names=imports85.types.keys(),\n dtype=imports85.types, na_values=\"?\")\n\n\ndef four_lines_dataset(*args, **kwargs):\n del args, kwargs\n return data.Dataset.from_tensor_slices(FOUR_LINES.split(\"\\n\"))\n\n\nclass RegressionTest(googletest.TestCase):\n \"\"\"Test the regression examples in this directory.\"\"\"\n\n @test.mock.patch.dict(data.__dict__,\n {\"TextLineDataset\": four_lines_dataset})\n @test.mock.patch.dict(imports85.__dict__, {\"_get_imports85\": (lambda: None)})\n @test.mock.patch.dict(linear_regression.__dict__, {\"STEPS\": 1})\n def test_linear_regression(self):\n linear_regression.main([\"\"])\n\n @test.mock.patch.dict(data.__dict__,\n {\"TextLineDataset\": four_lines_dataset})\n @test.mock.patch.dict(imports85.__dict__, {\"_get_imports85\": (lambda: None)})\n @test.mock.patch.dict(linear_regression_categorical.__dict__, {\"STEPS\": 1})\n def test_linear_regression_categorical(self):\n linear_regression_categorical.main([\"\"])\n\n @test.mock.patch.dict(data.__dict__,\n {\"TextLineDataset\": four_lines_dataset})\n @test.mock.patch.dict(imports85.__dict__, {\"_get_imports85\": (lambda: None)})\n @test.mock.patch.dict(dnn_regression.__dict__, {\"STEPS\": 1})\n def test_dnn_regression(self):\n dnn_regression.main([\"\"])\n\n @test.mock.patch.dict(data.__dict__, {\"TextLineDataset\": four_lines_dataset})\n @test.mock.patch.dict(imports85.__dict__, {\"_get_imports85\": (lambda: None)})\n @test.mock.patch.dict(custom_regression.__dict__, {\"STEPS\": 1})\n def test_custom_regression(self):\n custom_regression.main([\"\"])\n\n\nif __name__ == \"__main__\":\n googletest.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382630,"cells":{"repo_name":{"kind":"string","value":"BhallaLab/moose-full"},"path":{"kind":"string","value":"moose-examples/snippets/MULTI/midchan.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"13452"},"content":{"kind":"string","value":"# midchan.py --- \n# Upi Bhalla, NCBS Bangalore 2014.\n#\n# Commentary: \n# \n# This loads in a medium-detail model incorporating \n# reac-diff and elec signaling in neurons. The reac-diff model\n# has just Ca and CaM in it, and there are no-cross-compartment\n# reactions though Ca diffuses everywhere. The elec model controls the\n# Ca levels in the chem compartments.\n# \n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 3, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street, Fifth\n# Floor, Boston, MA 02110-1301, USA.\n# \n\n# Code:\n\nimport sys\nsys.path.append('../../python')\nimport os\nos.environ['NUMPTHREADS'] = '1'\nimport math\nimport numpy\nimport matplotlib.pyplot as plt\nimport moose\nimport proto18\n\nEREST_ACT = -70e-3\n\ndef loadElec():\n\tlibrary = moose.Neutral( '/library' )\n\tmoose.setCwe( '/library' )\n\tproto18.make_Ca()\n\tproto18.make_Ca_conc()\n\tproto18.make_K_AHP()\n\tproto18.make_K_C()\n\tproto18.make_Na()\n\tproto18.make_K_DR()\n\tproto18.make_K_A()\n\tproto18.make_glu()\n\tproto18.make_NMDA()\n\tproto18.make_Ca_NMDA()\n\tproto18.make_NMDA_Ca_conc()\n\tproto18.make_axon()\n\tmoose.setCwe( '/library' )\n\tmodel = moose.Neutral( '/model' )\n\tcellId = moose.loadModel( 'ca1_asym.p', '/model/elec', \"Neutral\" )\n\treturn cellId\n\ndef loadChem( diffLength ):\n\tchem = moose.Neutral( '/model/chem' )\n\tneuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )\n\tneuroCompt.separateSpines = 1\n\tneuroCompt.geometryPolicy = 'cylinder'\n\tspineCompt = moose.SpineMesh( '/model/chem/compartment_1' )\n\tmoose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )\n\tpsdCompt = moose.PsdMesh( '/model/chem/compartment_2' )\n\t#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume\n\tmoose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )\n\tmodelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )\n\t#modelId = moose.loadModel( 'psd_merged31d.g', '/model/chem', 'ee' )\n neuroCompt.name = 'dend'\n spineCompt.name = 'spine'\n psdCompt.name = 'psd'\n\ndef makeNeuroMeshModel():\n\tdiffLength = 10e-6 # Aim for 2 soma compartments.\n\telec = loadElec()\n\tloadChem( diffLength )\n neuroCompt = moose.element( '/model/chem/dend' )\n\tneuroCompt.diffLength = diffLength\n\tneuroCompt.cellPortion( elec, '/model/elec/#' )\n\tfor x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):\n\t\tif (x.diffConst > 0):\n\t\t\tx.diffConst = 1e-11\n\tfor x in moose.wildcardFind( '/model/chem/##/Ca' ):\n\t\tx.diffConst = 1e-10\n\n\t# Put in dend solvers\n\tns = neuroCompt.numSegments\n\tndc = neuroCompt.numDiffCompts\n print 'ns = ', ns, ', ndc = ', ndc\n assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )\n\tassert( ns == 36 ) # \n\tassert( ndc == 278 ) # \n\tnmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )\n\tnmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )\n nmstoich = moose.Stoich( '/model/chem/dend/stoich' )\n nmstoich.compartment = neuroCompt\n nmstoich.ksolve = nmksolve\n nmstoich.dsolve = nmdsolve\n nmstoich.path = \"/model/chem/dend/##\"\n print 'done setting path, numPools = ', nmdsolve.numPools\n assert( nmdsolve.numPools == 1 )\n assert( nmdsolve.numAllVoxels == ndc )\n assert( nmstoich.numAllPools == 1 )\n\t# oddly, numLocalFields does not work.\n\tca = moose.element( '/model/chem/dend/DEND/Ca' )\n\tassert( ca.numData == ndc )\n \n # Put in spine solvers. Note that these get info from the neuroCompt\n spineCompt = moose.element( '/model/chem/spine' )\n\tsdc = spineCompt.mesh.num\n print 'sdc = ', sdc\n\tassert( sdc == 13 )\n\tsmksolve = moose.Ksolve( '/model/chem/spine/ksolve' )\n\tsmdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )\n smstoich = moose.Stoich( '/model/chem/spine/stoich' )\n smstoich.compartment = spineCompt\n smstoich.ksolve = smksolve\n smstoich.dsolve = smdsolve\n smstoich.path = \"/model/chem/spine/##\"\n print 'spine num Pools = ', smstoich.numAllPools\n assert( smstoich.numAllPools == 3 )\n assert( smdsolve.numPools == 3 )\n assert( smdsolve.numAllVoxels == sdc )\n \n # Put in PSD solvers. Note that these get info from the neuroCompt\n psdCompt = moose.element( '/model/chem/psd' )\n\tpdc = psdCompt.mesh.num\n\tassert( pdc == 13 )\n\tpmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )\n\tpmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )\n pmstoich = moose.Stoich( '/model/chem/psd/stoich' )\n pmstoich.compartment = psdCompt\n pmstoich.ksolve = pmksolve\n pmstoich.dsolve = pmdsolve\n pmstoich.path = \"/model/chem/psd/##\"\n assert( pmstoich.numAllPools == 3 )\n assert( pmdsolve.numPools == 3 )\n assert( pmdsolve.numAllVoxels == pdc )\n foo = moose.element( '/model/chem/psd/Ca' )\n print 'PSD: numfoo = ', foo.numData\n print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels\n\n # Put in junctions between the diffusion solvers\n nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )\n\n\t\"\"\"\n\tCaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )\n\tprint 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume\n\tCaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )\n\tprint 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume\n\t\"\"\"\n\n ##################################################################\n\t# set up adaptors\n\taCa = moose.Adaptor( '/model/chem/spine/adaptCa', sdc )\n\tadaptCa = moose.vec( '/model/chem/spine/adaptCa' )\n\tchemCa = moose.vec( '/model/chem/spine/Ca' )\n\t#print 'aCa = ', aCa, ' foo = ', foo, \"len( ChemCa ) = \", len( chemCa ), \", numData = \", chemCa.numData, \"len( adaptCa ) = \", len( adaptCa )\n\tassert( len( adaptCa ) == sdc )\n\tassert( len( chemCa ) == sdc )\n for i in range( sdc ):\n elecCa = moose.element( '/model/elec/spine_head_14_' + str(i+1) + '/NMDA_Ca_conc' )\n #print elecCa\n\t moose.connect( elecCa, 'concOut', adaptCa[i], 'input', 'Single' )\n\tmoose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )\n\tadaptCa.inputOffset = 0.0\t# \n\tadaptCa.outputOffset = 0.00008\t# 80 nM offset in chem.\n \tadaptCa.scale = 1e-4\t# 520 to 0.0052 mM\n\t#print adaptCa.outputOffset\n\n\n moose.le( '/model/chem/dend/DEND' )\n\n\n compts = neuroCompt.elecComptList\n begin = neuroCompt.startVoxelInCompt\n end = neuroCompt.endVoxelInCompt\n\taCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', len( compts))\n\tadaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )\n\tchemCa = moose.vec( '/model/chem/dend/DEND/Ca' )\n\t#print 'aCa = ', aCa, ' foo = ', foo, \"len( ChemCa ) = \", len( chemCa ), \", numData = \", chemCa.numData, \"len( adaptCa ) = \", len( adaptCa )\n\tassert( len( chemCa ) == ndc )\n for i in zip( compts, adaptCa, begin, end ):\n name = i[0].path + '/Ca_conc'\n if ( moose.exists( name ) ):\n elecCa = moose.element( name )\n #print i[2], i[3], ' ', elecCa\n #print i[1]\n moose.connect( elecCa, 'concOut', i[1], 'input', 'Single' ) \n for j in range( i[2], i[3] ):\n moose.connect( i[1], 'output', chemCa[j], 'setConc', 'Single' )\n\tadaptCa.inputOffset = 0.0\t# \n\tadaptCa.outputOffset = 0.00008\t# 80 nM offset in chem.\n \tadaptCa.scale = 20e-6\t# 10 arb units to 2 uM.\n\ndef addPlot( objpath, field, plot ):\n\t#assert moose.exists( objpath )\n\tif moose.exists( objpath ):\n\t\ttab = moose.Table( '/graphs/' + plot )\n\t\tobj = moose.element( objpath )\n\t\tif obj.className == 'Neutral':\n\t\t\tprint \"addPlot failed: object is a Neutral: \", objpath\n\t\t\treturn moose.element( '/' )\n\t\telse:\n\t\t\t#print \"object was found: \", objpath, obj.className\n\t\t\tmoose.connect( tab, 'requestOut', obj, field )\n\t\t\treturn tab\n\telse:\n\t\tprint \"addPlot failed: object not found: \", objpath\n\t\treturn moose.element( '/' )\n\ndef makeElecPlots():\n graphs = moose.Neutral( '/graphs' )\n elec = moose.Neutral( '/graphs/elec' )\n addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )\n addPlot( '/model/elec/spine_head_14_4', 'getVm', 'elec/spineVm' )\n addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'elec/somaCa' )\n addPlot( '/model/elec/lat_11_2/Ca_conc', 'getCa', 'elec/lat11Ca' )\n addPlot( '/model/elec/spine_head_14_4/NMDA_Ca_conc', 'getCa', 'elec/spine4Ca' )\n addPlot( '/model/elec/spine_head_14_12/NMDA_Ca_conc', 'getCa', 'elec/spine12Ca' )\n\ndef makeChemPlots():\n\tgraphs = moose.Neutral( '/graphs' )\n\tchem = moose.Neutral( '/graphs/chem' )\n\taddPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )\n\taddPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )\n\taddPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )\n\taddPlot( '/model/chem/spine/Ca[3]', 'getConc', 'chem/spine4Ca' )\n\taddPlot( '/model/chem/spine/Ca[11]', 'getConc', 'chem/spine12Ca' )\n\taddPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )\n\taddPlot( '/model/chem/dend/DEND/Ca[20]', 'getConc', 'chem/dendCa20' )\n\ndef testNeuroMeshMultiscale():\n\telecDt = 50e-6\n\tchemDt = 0.005\n\tePlotDt = 0.5e-3\n\tcPlotDt = 0.005\n\tplotName = 'nm.plot'\n\n\tmakeNeuroMeshModel()\n\tprint \"after model is completely done\"\n\tfor i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):\n\t\tprint i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb\n\n\tmakeChemPlots()\n\tmakeElecPlots()\n\tmoose.setClock( 0, elecDt )\n\tmoose.setClock( 1, elecDt )\n\tmoose.setClock( 2, elecDt )\n\tmoose.setClock( 4, chemDt )\n\tmoose.setClock( 5, chemDt )\n\tmoose.setClock( 6, chemDt )\n\tmoose.setClock( 7, cPlotDt )\n\tmoose.setClock( 8, ePlotDt )\n\tmoose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )\n\tmoose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' )\n\tmoose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )\n\tmoose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')\n\t#moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )\n\t#moose.useClock( 4, '/model/chem/##[ISA=Adaptor]', 'process' )\n\tmoose.useClock( 4, '/model/chem/#/dsolve', 'process' )\n\tmoose.useClock( 5, '/model/chem/#/ksolve', 'process' )\n\tmoose.useClock( 6, '/model/chem/spine/adaptCa', 'process' )\n\tmoose.useClock( 6, '/model/chem/dend/DEND/adaptCa', 'process' )\n\tmoose.useClock( 7, '/graphs/chem/#', 'process' )\n\tmoose.useClock( 8, '/graphs/elec/#', 'process' )\n\t#hsolve = moose.HSolve( '/model/elec/hsolve' )\n\t#moose.useClock( 1, '/model/elec/hsolve', 'process' )\n\t#hsolve.dt = elecDt\n\t#hsolve.target = '/model/elec/compt'\n\t#moose.reinit()\n moose.element( '/model/elec/soma' ).inject = 2e-10\n moose.element( '/model/chem/psd/Ca' ).concInit = 0.001\n moose.element( '/model/chem/spine/Ca' ).concInit = 0.002\n moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003\n\tmoose.reinit()\n\n\tmoose.start( 0.25 )\n# moose.element( '/model/elec/soma' ).inject = 0\n#\tmoose.start( 0.25 )\n plt.ion()\n fig = plt.figure( figsize=(8,8) )\n chem = fig.add_subplot( 311 )\n chem.set_ylim( 0, 0.002 )\n plt.ylabel( 'Conc (mM)' )\n plt.xlabel( 'time (seconds)' )\n for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):\n pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt\n line1, = chem.plot( pos, x.vector, label=x.name )\n plt.legend()\n\n elec = fig.add_subplot( 312 )\n plt.ylabel( 'Vm (V)' )\n plt.xlabel( 'time (seconds)' )\n for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):\n pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt\n line1, = elec.plot( pos, x.vector, label=x.name )\n plt.legend()\n\n lenplot = fig.add_subplot( 313 )\n plt.ylabel( 'Ca (mM )' )\n plt.xlabel( 'Voxel#)' )\n\n\tspineCa = moose.vec( '/model/chem/spine/Ca' )\n\tdendCa = moose.vec( '/model/chem/dend/DEND/Ca' )\n line1, = lenplot.plot( range( len( spineCa ) ), spineCa.conc, label='spine' )\n line2, = lenplot.plot( range( len( dendCa ) ), dendCa.conc, label='dend' )\n\n ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConc]') ]\n line3, = lenplot.plot( range( len( ca ) ), ca, label='elec' )\n\n\tspineCaM = moose.vec( '/model/chem/spine/Ca_CaM' )\n line4, = lenplot.plot( range( len( spineCaM ) ), spineCaM.conc, label='spineCaM' )\n\tpsdCaM = moose.vec( '/model/chem/psd/Ca_CaM' )\n line5, = lenplot.plot( range( len( psdCaM ) ), psdCaM.conc, label='psdCaM' )\n plt.legend()\n\n\n fig.canvas.draw()\n raw_input()\n \n '''\n for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ):\n t = numpy.arange( 0, x.vector.size, 1 )\n pylab.plot( t, x.vector, label=x.name )\n pylab.legend()\n pylab.show()\n '''\n\n\tprint 'All done'\n\n\ndef main():\n\ttestNeuroMeshMultiscale()\n\nif __name__ == '__main__':\n\tmain()\n\n# \n# minimal.py ends here.\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":382631,"cells":{"repo_name":{"kind":"string","value":"timqian/sms-tools"},"path":{"kind":"string","value":"software/transformations_interface/hpsTransformations_function.py"},"copies":{"kind":"string","value":"23"},"size":{"kind":"string","value":"6610"},"content":{"kind":"string","value":"# function call to the transformation functions of relevance for the hpsModel\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import get_window\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))\nimport hpsModel as HPS\nimport hpsTransformations as HPST\nimport harmonicTransformations as HT\nimport utilFunctions as UF\n\ndef analysis(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100, \n\tminSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01, stocf=0.1):\n\t\"\"\"\n\tAnalyze a sound with the harmonic plus stochastic model\n\tinputFile: input sound file (monophonic with sampling rate of 44100)\n\twindow: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)\t\n\tM: analysis window size \n\tN: fft size (power of two, bigger or equal than M)\n\tt: magnitude threshold of spectral peaks \n\tminSineDur: minimum duration of sinusoidal tracks\n\tnH: maximum number of harmonics\n\tminf0: minimum fundamental frequency in sound\n\tmaxf0: maximum fundamental frequency in sound\n\tf0et: maximum error accepted in f0 detection algorithm \n\tharmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation\n\tstocf: decimation factor used for the stochastic approximation\n\treturns inputFile: input file name; fs: sampling rate of input file,\n\t hfreq, hmag: harmonic frequencies, magnitude; mYst: stochastic residual\n\t\"\"\"\n\n\t# size of fft used in synthesis\n\tNs = 512\n\n\t# hop size (has to be 1/4 of Ns)\n\tH = 128\n\n\t# read input sound\n\t(fs, x) = UF.wavread(inputFile)\n\n\t# compute analysis window\n\tw = get_window(window, M)\n\n\t# compute the harmonic plus stochastic model of the whole sound\n\thfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)\n\n\t# synthesize the harmonic plus stochastic model without original phases\n\ty, yh, yst = HPS.hpsModelSynth(hfreq, hmag, np.array([]), mYst, Ns, H, fs)\n\n\t# write output sound \n\toutputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel.wav'\n\tUF.wavwrite(y,fs, outputFile)\n\n\t# create figure to plot\n\tplt.figure(figsize=(12, 9))\n\n\t# frequency range to plot\n\tmaxplotfreq = 15000.0\n\n\t# plot the input sound\n\tplt.subplot(3,1,1)\n\tplt.plot(np.arange(x.size)/float(fs), x)\n\tplt.axis([0, x.size/float(fs), min(x), max(x)])\n\tplt.ylabel('amplitude')\n\tplt.xlabel('time (sec)')\n\tplt.title('input sound: x')\n\n\t# plot spectrogram stochastic compoment\n\tplt.subplot(3,1,2)\n\tnumFrames = int(mYst[:,0].size)\n\tsizeEnv = int(mYst[0,:].size)\n\tfrmTime = H*np.arange(numFrames)/float(fs)\n\tbinFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv \n\tplt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))\n\tplt.autoscale(tight=True)\n\n\t# plot harmonic on top of stochastic spectrogram\n\tif (hfreq.shape[1] > 0):\n\t\tharms = hfreq*np.less(hfreq,maxplotfreq)\n\t\tharms[harms==0] = np.nan\n\t\tnumFrames = int(harms[:,0].size)\n\t\tfrmTime = H*np.arange(numFrames)/float(fs) \n\t\tplt.plot(frmTime, harms, color='k', ms=3, alpha=1)\n\t\tplt.xlabel('time (sec)')\n\t\tplt.ylabel('frequency (Hz)')\n\t\tplt.autoscale(tight=True)\n\t\tplt.title('harmonics + stochastic spectrogram')\n\n\t# plot the output sound\n\tplt.subplot(3,1,3)\n\tplt.plot(np.arange(y.size)/float(fs), y)\n\tplt.axis([0, y.size/float(fs), min(y), max(y)])\n\tplt.ylabel('amplitude')\n\tplt.xlabel('time (sec)')\n\tplt.title('output sound: y')\n\n\tplt.tight_layout()\n\tplt.show(block=False)\n\n\treturn inputFile, fs, hfreq, hmag, mYst\n\n\ndef transformation_synthesis(inputFile, fs, hfreq, hmag, mYst, freqScaling = np.array([0, 1.2, 2.01, 1.2, 2.679, .7, 3.146, .7]), \n\tfreqStretching = np.array([0, 1, 2.01, 1, 2.679, 1.5, 3.146, 1.5]), timbrePreservation = 1, \n\ttimeScaling = np.array([0, 0, 2.138, 2.138-1.0, 3.146, 3.146])):\n\t\"\"\"\n\ttransform the analysis values returned by the analysis function and synthesize the sound\n\tinputFile: name of input file\n\tfs: sampling rate of input file\t\n\thfreq, hmag: harmonic frequencies and magnitudes\n\tmYst: stochastic residual\n\tfreqScaling: frequency scaling factors, in time-value pairs (value of 1 no scaling)\n\tfreqStretching: frequency stretching factors, in time-value pairs (value of 1 no stretching)\n\ttimbrePreservation: 1 preserves original timbre, 0 it does not\n\ttimeScaling: time scaling factors, in time-value pairs\n\t\"\"\"\n\t\n\t# size of fft used in synthesis\n\tNs = 512\n\n\t# hop size (has to be 1/4 of Ns)\n\tH = 128\n\t\n\t# frequency scaling of the harmonics \n\thfreqt, hmagt = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)\n\n\t# time scaling the sound\n\tyhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreqt, hmagt, mYst, timeScaling)\n\n\t# synthesis from the trasformed hps representation \n\ty, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)\n\n\t# write output sound \n\toutputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModelTransformation.wav'\n\tUF.wavwrite(y,fs, outputFile)\n\n\t# create figure to plot\n\tplt.figure(figsize=(12, 6))\n\n\t# frequency range to plot\n\tmaxplotfreq = 15000.0\n\n\t# plot spectrogram of transformed stochastic compoment\n\tplt.subplot(2,1,1)\n\tnumFrames = int(ystocEnv[:,0].size)\n\tsizeEnv = int(ystocEnv[0,:].size)\n\tfrmTime = H*np.arange(numFrames)/float(fs)\n\tbinFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv \n\tplt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))\n\tplt.autoscale(tight=True)\n\n\t# plot transformed harmonic on top of stochastic spectrogram\n\tif (yhfreq.shape[1] > 0):\n\t\tharms = yhfreq*np.less(yhfreq,maxplotfreq)\n\t\tharms[harms==0] = np.nan\n\t\tnumFrames = int(harms[:,0].size)\n\t\tfrmTime = H*np.arange(numFrames)/float(fs) \n\t\tplt.plot(frmTime, harms, color='k', ms=3, alpha=1)\n\t\tplt.xlabel('time (sec)')\n\t\tplt.ylabel('frequency (Hz)')\n\t\tplt.autoscale(tight=True)\n\t\tplt.title('harmonics + stochastic spectrogram')\n\n\t# plot the output sound\n\tplt.subplot(2,1,2)\n\tplt.plot(np.arange(y.size)/float(fs), y)\n\tplt.axis([0, y.size/float(fs), min(y), max(y)])\n\tplt.ylabel('amplitude')\n\tplt.xlabel('time (sec)')\n\tplt.title('output sound: y')\n\n\tplt.tight_layout()\n\tplt.show()\n\nif __name__ == \"__main__\":\n\t\n\t# analysis\n\tinputFile, fs, hfreq, hmag, mYst = analysis()\n\n\t# transformation and synthesis\n\ttransformation_synthesis(inputFile, fs, hfreq, hmag, mYst)\n\n\tplt.show()\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":382632,"cells":{"repo_name":{"kind":"string","value":"adammenges/statsmodels"},"path":{"kind":"string","value":"statsmodels/regression/linear_model.py"},"copies":{"kind":"string","value":"16"},"size":{"kind":"string","value":"93645"},"content":{"kind":"string","value":"# TODO: Determine which tests are valid for GLSAR, and under what conditions\n# TODO: Fix issue with constant and GLS\n# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None\n# TODO: GLS: default if sigma is none should be two-step GLS\n# TODO: Check nesting when performing model based tests, lr, wald, lm\n\"\"\"\nThis module implements standard regression models:\n\nGeneralized Least Squares (GLS)\nOrdinary Least Squares (OLS)\nWeighted Least Squares (WLS)\nGeneralized Least Squares with autoregressive error terms GLSAR(p)\n\nModels are specified with an endogenous response variable and an\nexogenous design matrix and are fit using their `fit` method.\n\nSubclasses that have more complicated covariance matrices\nshould write over the 'whiten' method as the fit method\nprewhitens the response by calling 'whiten'.\n\nGeneral reference for regression models:\n\nD. C. Montgomery and E.A. Peck. \"Introduction to Linear Regression\n Analysis.\" 2nd. Ed., Wiley, 1992.\n\nEconometrics references for regression models:\n\nR. Davidson and J.G. MacKinnon. \"Econometric Theory and Methods,\" Oxford,\n 2004.\n\nW. Green. \"Econometric Analysis,\" 5th ed., Pearson, 2003.\n\"\"\"\n\nfrom __future__ import print_function\nfrom statsmodels.compat.python import lrange, lzip, range\n__docformat__ = 'restructuredtext en'\n\n__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR']\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.linalg import toeplitz\nfrom scipy import stats\nfrom scipy import optimize\n\nfrom statsmodels.compat.numpy import np_matrix_rank\nfrom statsmodels.tools.data import _is_using_pandas\nfrom statsmodels.tools.tools import add_constant, chain_dot, pinv_extended\nfrom statsmodels.tools.decorators import (resettable_cache,\n cache_readonly,\n cache_writable)\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.emplike.elregress import _ELRegOpts\nimport warnings\nfrom statsmodels.tools.sm_exceptions import InvalidTestWarning\n\n# need import in module instead of lazily to copy `__doc__`\nfrom . import _prediction as pred\n\ndef _get_sigma(sigma, nobs):\n \"\"\"\n Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its\n Cholesky decomposition. Handles dimensions and checks integrity.\n If sigma is None, returns None, None. Otherwise returns sigma,\n cholsigmainv.\n \"\"\"\n if sigma is None:\n return None, None\n sigma = np.asarray(sigma).squeeze()\n if sigma.ndim == 0:\n sigma = np.repeat(sigma, nobs)\n if sigma.ndim == 1:\n if sigma.shape != (nobs,):\n raise ValueError(\"Sigma must be a scalar, 1d of length %s or a 2d \"\n \"array of shape %s x %s\" % (nobs, nobs, nobs))\n cholsigmainv = 1/np.sqrt(sigma)\n else:\n if sigma.shape != (nobs, nobs):\n raise ValueError(\"Sigma must be a scalar, 1d of length %s or a 2d \"\n \"array of shape %s x %s\" % (nobs, nobs, nobs))\n cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T\n\n return sigma, cholsigmainv\n\n\nclass RegressionModel(base.LikelihoodModel):\n \"\"\"\n Base class for linear regression models. Should not be directly called.\n\n Intended for subclassing.\n \"\"\"\n def __init__(self, endog, exog, **kwargs):\n super(RegressionModel, self).__init__(endog, exog, **kwargs)\n self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])\n\n def initialize(self):\n self.wexog = self.whiten(self.exog)\n self.wendog = self.whiten(self.endog)\n # overwrite nobs from class Model:\n self.nobs = float(self.wexog.shape[0])\n\n self._df_model = None\n self._df_resid = None\n self.rank = None\n\n @property\n def df_model(self):\n \"\"\"\n The model degree of freedom, defined as the rank of the regressor\n matrix minus 1 if a constant is included.\n \"\"\"\n if self._df_model is None:\n if self.rank is None:\n self.rank = np_matrix_rank(self.exog)\n self._df_model = float(self.rank - self.k_constant)\n return self._df_model\n\n @df_model.setter\n def df_model(self, value):\n self._df_model = value\n\n @property\n def df_resid(self):\n \"\"\"\n The residual degree of freedom, defined as the number of observations\n minus the rank of the regressor matrix.\n \"\"\"\n\n if self._df_resid is None:\n if self.rank is None:\n self.rank = np_matrix_rank(self.exog)\n self._df_resid = self.nobs - self.rank\n return self._df_resid\n\n @df_resid.setter\n def df_resid(self, value):\n self._df_resid = value\n\n\n def whiten(self, X):\n raise NotImplementedError(\"Subclasses should implement.\")\n\n def fit(self, method=\"pinv\", cov_type='nonrobust', cov_kwds=None,\n use_t=None, **kwargs):\n \"\"\"\n Full fit of the model.\n\n The results include an estimate of covariance matrix, (whitened)\n residuals and an estimate of scale.\n\n Parameters\n ----------\n method : str, optional\n Can be \"pinv\", \"qr\". \"pinv\" uses the Moore-Penrose pseudoinverse\n to solve the least squares problem. \"qr\" uses the QR\n factorization.\n cov_type : str, optional\n See `regression.linear_model.RegressionResults` for a description\n of the available covariance estimators\n cov_kwds : list or None, optional\n See `linear_model.RegressionResults.get_robustcov_results` for a\n description required keywords for alternative covariance estimators\n use_t : bool, optional\n Flag indicating to use the Student's t distribution when computing\n p-values. Default behavior depends on cov_type. See\n `linear_model.RegressionResults.get_robustcov_results` for\n implementation details.\n\n Returns\n -------\n A RegressionResults class instance.\n\n See Also\n ---------\n regression.linear_model.RegressionResults\n regression.linear_model.RegressionResults.get_robustcov_results\n\n Notes\n -----\n The fit method uses the pseudoinverse of the design/exogenous variables\n to solve the least squares minimization.\n \"\"\"\n if method == \"pinv\":\n if ((not hasattr(self, 'pinv_wexog')) or\n (not hasattr(self, 'normalized_cov_params')) or\n (not hasattr(self, 'rank'))):\n\n self.pinv_wexog, singular_values = pinv_extended(self.wexog)\n self.normalized_cov_params = np.dot(self.pinv_wexog,\n np.transpose(self.pinv_wexog))\n\n # Cache these singular values for use later.\n self.wexog_singular_values = singular_values\n self.rank = np_matrix_rank(np.diag(singular_values))\n\n beta = np.dot(self.pinv_wexog, self.wendog)\n\n elif method == \"qr\":\n if ((not hasattr(self, 'exog_Q')) or\n (not hasattr(self, 'exog_R')) or\n (not hasattr(self, 'normalized_cov_params')) or\n (getattr(self, 'rank', None) is None)):\n Q, R = np.linalg.qr(self.wexog)\n self.exog_Q, self.exog_R = Q, R\n self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))\n\n # Cache singular values from R.\n self.wexog_singular_values = np.linalg.svd(R, 0, 0)\n self.rank = np_matrix_rank(R)\n else:\n Q, R = self.exog_Q, self.exog_R\n\n # used in ANOVA\n self.effects = effects = np.dot(Q.T, self.wendog)\n beta = np.linalg.solve(R, effects)\n\n if self._df_model is None:\n self._df_model = float(self.rank - self.k_constant)\n if self._df_resid is None:\n self.df_resid = self.nobs - self.rank\n\n if isinstance(self, OLS):\n lfit = OLSResults(self, beta,\n normalized_cov_params=self.normalized_cov_params,\n cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)\n else:\n lfit = RegressionResults(self, beta,\n normalized_cov_params=self.normalized_cov_params,\n cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,\n **kwargs)\n return RegressionResultsWrapper(lfit)\n\n def fit_regularized(self, method=\"coord_descent\", maxiter=1000,\n alpha=0., L1_wt=1., start_params=None,\n cnvrg_tol=1e-8, zero_tol=1e-8, **kwargs):\n \"\"\"\n Return a regularized fit to a linear regression model.\n\n Parameters\n ----------\n method : string\n Only the coordinate descent algorithm is implemented.\n maxiter : integer\n The maximum number of iteration cycles (an iteration cycle\n involves running coordinate descent on all variables).\n alpha : scalar or array-like\n The penalty weight. If a scalar, the same penalty weight\n applies to all variables in the model. If a vector, it\n must have the same length as `params`, and contains a\n penalty weight for each coefficient.\n L1_wt : scalar\n The fraction of the penalty given to the L1 penalty term.\n Must be between 0 and 1 (inclusive). If 0, the fit is\n ridge regression. If 1, the fit is the lasso.\n start_params : array-like\n Starting values for ``params``.\n cnvrg_tol : scalar\n If ``params`` changes by less than this amount (in sup-norm)\n in once iteration cycle, the algorithm terminates with\n convergence.\n zero_tol : scalar\n Any estimated coefficient smaller than this value is\n replaced with zero.\n\n Returns\n -------\n A RegressionResults object, of the same type returned by\n ``fit``.\n\n Notes\n -----\n The approach closely follows that implemented in the glmnet\n package in R. The penalty is the \"elastic net\" penalty, which\n is a convex combination of L1 and L2 penalties.\n\n The function that is minimized is: ..math::\n\n 0.5*RSS/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)\n\n where RSS is the usual regression sum of squares, n is the\n sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2\n norms.\n\n Post-estimation results are based on the same data used to\n select variables, hence may be subject to overfitting biases.\n\n References\n ----------\n Friedman, Hastie, Tibshirani (2008). Regularization paths for\n generalized linear models via coordinate descent. Journal of\n Statistical Software 33(1), 1-22 Feb 2010.\n \"\"\"\n\n k_exog = self.wexog.shape[1]\n\n if np.isscalar(alpha):\n alpha = alpha * np.ones(k_exog, dtype=np.float64)\n\n # Below we work with RSS + penalty, so we need to rescale.\n alpha *= 2 * self.wexog.shape[0]\n\n if start_params is None:\n params = np.zeros(k_exog, dtype=np.float64)\n else:\n params = start_params.copy()\n\n converged = False\n xxprod = 2*(self.wexog**2).sum(0)\n\n # Coordinate descent\n for itr in range(maxiter):\n\n params_save = params.copy()\n for k in range(self.wexog.shape[1]):\n\n params[k] = 0.\n wendog_adj = self.wendog - np.dot(self.wexog, params)\n xyprod = 2*np.dot(self.wexog[:,k], wendog_adj)\n den = xxprod[k] + alpha[k] * (1 - L1_wt)\n a = alpha[k] * L1_wt\n if a >= np.abs(xyprod):\n params[k] = 0.\n elif xyprod > 0:\n params[k] = (xyprod - a) / den\n else:\n params[k] = (xyprod + a) / den\n\n # Check for convergence\n pchange = np.max(np.abs(params - params_save))\n if pchange < cnvrg_tol:\n converged = True\n break\n\n # Set approximate zero coefficients to be exactly zero\n params *= np.abs(params) >= zero_tol\n\n # Fit the reduced model to get standard errors and other\n # post-estimation results.\n ii = np.flatnonzero(params)\n cov = np.zeros((k_exog, k_exog), dtype=np.float64)\n if len(ii) > 0:\n model = self.__class__(self.wendog, self.wexog[:,ii])\n rslt = model.fit()\n cov[np.ix_(ii, ii)] = rslt.normalized_cov_params\n\n lfit = RegressionResults(self, params,\n normalized_cov_params=cov)\n lfit.converged = converged\n return RegressionResultsWrapper(lfit)\n\n def predict(self, params, exog=None):\n \"\"\"\n Return linear predicted values from a design matrix.\n\n Parameters\n ----------\n params : array-like\n Parameters of a linear model\n exog : array-like, optional.\n Design / exogenous data. Model exog is used if None.\n\n Returns\n -------\n An array of fitted values\n\n Notes\n -----\n If the model has not yet been fit, params is not optional.\n \"\"\"\n #JP: this doesn't look correct for GLMAR\n #SS: it needs its own predict method\n if exog is None:\n exog = self.exog\n return np.dot(exog, params)\n\nclass GLS(RegressionModel):\n __doc__ = \"\"\"\n Generalized least squares model with a general covariance structure.\n\n %(params)s\n sigma : scalar or array\n `sigma` is the weighting matrix of the covariance.\n The default is None for no scaling. If `sigma` is a scalar, it is\n assumed that `sigma` is an n x n diagonal matrix with the given\n scalar, `sigma` as the value of each diagonal element. If `sigma`\n is an n-length vector, then `sigma` is assumed to be a diagonal\n matrix with the given `sigma` on the diagonal. This should be the\n same as WLS.\n %(extra_params)s\n\n **Attributes**\n\n pinv_wexog : array\n `pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.\n cholsimgainv : array\n The transpose of the Cholesky decomposition of the pseudoinverse.\n df_model : float\n p - 1, where p is the number of regressors including the intercept.\n of freedom.\n df_resid : float\n Number of observations n less the number of parameters p.\n llf : float\n The value of the likelihood function of the fitted model.\n nobs : float\n The number of observations n.\n normalized_cov_params : array\n p x p array :math:`(X^{T}\\Sigma^{-1}X)^{-1}`\n results : RegressionResults instance\n A property that returns the RegressionResults class if fit.\n sigma : array\n `sigma` is the n x n covariance structure of the error terms.\n wexog : array\n Design matrix whitened by `cholsigmainv`\n wendog : array\n Response variable whitened by `cholsigmainv`\n\n Notes\n -----\n If sigma is a function of the data making one of the regressors\n a constant, then the current postestimation statistics will not be correct.\n\n\n Examples\n --------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.longley.load()\n >>> data.exog = sm.add_constant(data.exog)\n >>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid\n >>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()\n >>> rho = res_fit.params\n\n `rho` is a consistent estimator of the correlation of the residuals from\n an OLS fit of the longley data. It is assumed that this is the true rho\n of the AR process data.\n\n >>> from scipy.linalg import toeplitz\n >>> order = toeplitz(np.arange(16))\n >>> sigma = rho**order\n\n `sigma` is an n x n matrix of the autocorrelation structure of the\n data.\n\n >>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)\n >>> gls_results = gls_model.fit()\n >>> print(gls_results.summary()))\n\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n\n def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,\n **kwargs):\n #TODO: add options igls, for iterative fgls if sigma is None\n #TODO: default if sigma is none should be two-step GLS\n sigma, cholsigmainv = _get_sigma(sigma, len(endog))\n\n super(GLS, self).__init__(endog, exog, missing=missing,\n hasconst=hasconst, sigma=sigma,\n cholsigmainv=cholsigmainv, **kwargs)\n\n #store attribute names for data arrays\n self._data_attr.extend(['sigma', 'cholsigmainv'])\n\n\n def whiten(self, X):\n \"\"\"\n GLS whiten method.\n\n Parameters\n -----------\n X : array-like\n Data to be whitened.\n\n Returns\n -------\n np.dot(cholsigmainv,X)\n\n See Also\n --------\n regression.GLS\n \"\"\"\n X = np.asarray(X)\n if self.sigma is None or self.sigma.shape == ():\n return X\n elif self.sigma.ndim == 1:\n if X.ndim == 1:\n return X * self.cholsigmainv\n else:\n return X * self.cholsigmainv[:, None]\n else:\n return np.dot(self.cholsigmainv, X)\n\n\n\n def loglike(self, params):\n \"\"\"\n Returns the value of the Gaussian log-likelihood function at params.\n\n Given the whitened design matrix, the log-likelihood is evaluated\n at the parameter vector `params` for the dependent variable `endog`.\n\n Parameters\n ----------\n params : array-like\n The parameter estimates\n\n Returns\n -------\n loglike : float\n The value of the log-likelihood function for a GLS Model.\n\n\n Notes\n -----\n The log-likelihood function for the normal distribution is\n\n .. math:: -\\\\frac{n}{2}\\\\log\\\\left(\\\\left(Y-\\\\hat{Y}\\\\right)^{\\\\prime}\\\\left(Y-\\\\hat{Y}\\\\right)\\\\right)-\\\\frac{n}{2}\\\\left(1+\\\\log\\\\left(\\\\frac{2\\\\pi}{n}\\\\right)\\\\right)-\\\\frac{1}{2}\\\\log\\\\left(\\\\left|\\\\Sigma\\\\right|\\\\right)\n\n Y and Y-hat are whitened.\n\n \"\"\"\n #TODO: combine this with OLS/WLS loglike and add _det_sigma argument\n nobs2 = self.nobs / 2.0\n SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)\n llf = -np.log(SSR) * nobs2 # concentrated likelihood\n llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant\n if np.any(self.sigma):\n #FIXME: robust-enough check? unneeded if _det_sigma gets defined\n if self.sigma.ndim==2:\n det = np.linalg.slogdet(self.sigma)\n llf -= .5*det[1]\n else:\n llf -= 0.5*np.sum(np.log(self.sigma))\n # with error covariance matrix\n return llf\n\nclass WLS(RegressionModel):\n __doc__ = \"\"\"\n A regression model with diagonal but non-identity covariance structure.\n\n The weights are presumed to be (proportional to) the inverse of the\n variance of the observations. That is, if the variables are to be\n transformed by 1/sqrt(W) you must supply weights = 1/W.\n\n %(params)s\n weights : array-like, optional\n 1d array of weights. If you supply 1/W then the variables are pre-\n multiplied by 1/sqrt(W). If no weights are supplied the default value\n is 1 and WLS reults are the same as OLS.\n %(extra_params)s\n\n Attributes\n ----------\n weights : array\n The stored weights supplied as an argument.\n\n See regression.GLS\n\n Examples\n ---------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n >>> Y = [1,3,4,5,2,3,4]\n >>> X = range(1,8)\n >>> X = sm.add_constant(X)\n >>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))\n >>> results = wls_model.fit()\n >>> results.params\n array([ 2.91666667, 0.0952381 ])\n >>> results.tvalues\n array([ 2.0652652 , 0.35684428])\n >>> print(results.t_test([1, 0]))\n \n >>> print(results.f_test([0, 1]))\n \n\n Notes\n -----\n If the weights are a function of the data, then the post estimation\n statistics such as fvalue and mse_model might not be correct, as the\n package does not yet support no-constant regression.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n\n def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,\n **kwargs):\n weights = np.array(weights)\n if weights.shape == ():\n if (missing == 'drop' and 'missing_idx' in kwargs and\n kwargs['missing_idx'] is not None):\n # patsy may have truncated endog\n weights = np.repeat(weights, len(kwargs['missing_idx']))\n else:\n weights = np.repeat(weights, len(endog))\n # handle case that endog might be of len == 1\n if len(weights) == 1:\n weights = np.array([weights.squeeze()])\n else:\n weights = weights.squeeze()\n super(WLS, self).__init__(endog, exog, missing=missing,\n weights=weights, hasconst=hasconst, **kwargs)\n nobs = self.exog.shape[0]\n weights = self.weights\n # Experimental normalization of weights\n weights = weights / np.sum(weights) * nobs\n if weights.size != nobs and weights.shape[0] != nobs:\n raise ValueError('Weights must be scalar or same length as design')\n\n def whiten(self, X):\n \"\"\"\n Whitener for WLS model, multiplies each column by sqrt(self.weights)\n\n Parameters\n ----------\n X : array-like\n Data to be whitened\n\n Returns\n -------\n sqrt(weights)*X\n \"\"\"\n #print(self.weights.var()))\n X = np.asarray(X)\n if X.ndim == 1:\n return X * np.sqrt(self.weights)\n elif X.ndim == 2:\n return np.sqrt(self.weights)[:, None]*X\n\n def loglike(self, params):\n \"\"\"\n Returns the value of the gaussian log-likelihood function at params.\n\n Given the whitened design matrix, the log-likelihood is evaluated\n at the parameter vector `params` for the dependent variable `Y`.\n\n Parameters\n ----------\n params : array-like\n The parameter estimates.\n\n Returns\n -------\n llf : float\n The value of the log-likelihood function for a WLS Model.\n\n Notes\n --------\n .. math:: -\\\\frac{n}{2}\\\\log\\\\left(Y-\\\\hat{Y}\\\\right)-\\\\frac{n}{2}\\\\left(1+\\\\log\\\\left(\\\\frac{2\\\\pi}{n}\\\\right)\\\\right)-\\\\frac{1}{2}log\\\\left(\\\\left|W\\\\right|\\\\right)\n\n where :math:`W` is a diagonal matrix\n \"\"\"\n nobs2 = self.nobs / 2.0\n SSR = np.sum((self.wendog - np.dot(self.wexog,params))**2, axis=0)\n llf = -np.log(SSR) * nobs2 # concentrated likelihood\n llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant\n llf += 0.5 * np.sum(np.log(self.weights))\n return llf\n\n\nclass OLS(WLS):\n __doc__ = \"\"\"\n A simple ordinary least squares model.\n\n %(params)s\n %(extra_params)s\n\n Attributes\n ----------\n weights : scalar\n Has an attribute weights = array(1.0) due to inheritance from WLS.\n\n See Also\n --------\n GLS\n\n Examples\n --------\n >>> import numpy as np\n >>>\n >>> import statsmodels.api as sm\n >>>\n >>> Y = [1,3,4,5,2,3,4]\n >>> X = range(1,8)\n >>> X = sm.add_constant(X)\n >>>\n >>> model = sm.OLS(Y,X)\n >>> results = model.fit()\n >>> results.params\n array([ 2.14285714, 0.25 ])\n >>> results.tvalues\n array([ 1.87867287, 0.98019606])\n >>> print(results.t_test([1, 0])))\n \n >>> print(results.f_test(np.identity(2)))\n \n\n Notes\n -----\n No constant is added by the model unless you are using formulas.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n #TODO: change example to use datasets. This was the point of datasets!\n def __init__(self, endog, exog=None, missing='none', hasconst=None,\n **kwargs):\n super(OLS, self).__init__(endog, exog, missing=missing,\n hasconst=hasconst, **kwargs)\n if \"weights\" in self._init_keys:\n self._init_keys.remove(\"weights\")\n\n def loglike(self, params):\n \"\"\"\n The likelihood function for the clasical OLS model.\n\n Parameters\n ----------\n params : array-like\n The coefficients with which to estimate the log-likelihood.\n\n Returns\n -------\n The concentrated likelihood function evaluated at params.\n \"\"\"\n nobs2 = self.nobs / 2.0\n return -nobs2*np.log(2*np.pi)-nobs2*np.log(1/(2*nobs2) *\\\n np.dot(np.transpose(self.endog -\n np.dot(self.exog, params)),\n (self.endog - np.dot(self.exog,params)))) -\\\n nobs2\n\n def whiten(self, Y):\n \"\"\"\n OLS model whitener does nothing: returns Y.\n \"\"\"\n return Y\n\nclass GLSAR(GLS):\n __doc__ = \"\"\"\n A regression model with an AR(p) covariance structure.\n\n %(params)s\n rho : int\n Order of the autoregressive covariance\n %(extra_params)s\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> X = range(1,8)\n >>> X = sm.add_constant(X)\n >>> Y = [1,3,4,5,8,10,9]\n >>> model = sm.GLSAR(Y, X, rho=2)\n >>> for i in range(6):\n ... results = model.fit()\n ... print(\"AR coefficients: {0}\".format(model.rho))\n ... rho, sigma = sm.regression.yule_walker(results.resid,\n ... order=model.order)\n ... model = sm.GLSAR(Y, X, rho)\n ...\n AR coefficients: [ 0. 0.]\n AR coefficients: [-0.52571491 -0.84496178]\n AR coefficients: [-0.6104153 -0.86656458]\n AR coefficients: [-0.60439494 -0.857867 ]\n AR coefficients: [-0.6048218 -0.85846157]\n AR coefficients: [-0.60479146 -0.85841922]\n >>> results.params\n array([-0.66661205, 1.60850853])\n >>> results.tvalues\n array([ -2.10304127, 21.8047269 ])\n >>> print(results.t_test([1, 0]))\n \n >>> print(results.f_test(np.identity(2)))\n \n\n Or, equivalently\n\n >>> model2 = sm.GLSAR(Y, X, rho=2)\n >>> res = model2.iterative_fit(maxiter=6)\n >>> model2.rho\n array([-0.60479146, -0.85841922])\n\n Notes\n -----\n GLSAR is considered to be experimental.\n The linear autoregressive process of order p--AR(p)--is defined as:\n TODO\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : base._missing_param_doc + base._extra_param_doc}\n def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs):\n #this looks strange, interpreting rho as order if it is int\n if isinstance(rho, np.int):\n self.order = rho\n self.rho = np.zeros(self.order, np.float64)\n else:\n self.rho = np.squeeze(np.asarray(rho))\n if len(self.rho.shape) not in [0,1]:\n raise ValueError(\"AR parameters must be a scalar or a vector\")\n if self.rho.shape == ():\n self.rho.shape = (1,)\n self.order = self.rho.shape[0]\n if exog is None:\n #JP this looks wrong, should be a regression on constant\n #results for rho estimate now identical to yule-walker on y\n #super(AR, self).__init__(endog, add_constant(endog))\n super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)),\n missing=missing, **kwargs)\n else:\n super(GLSAR, self).__init__(endog, exog, missing=missing,\n **kwargs)\n\n def iterative_fit(self, maxiter=3, rtol=1e-4, **kwds):\n \"\"\"\n Perform an iterative two-stage procedure to estimate a GLS model.\n\n The model is assumed to have AR(p) errors, AR(p) parameters and\n regression coefficients are estimated iteratively.\n\n Parameters\n ----------\n maxiter : integer, optional\n the number of iterations\n rtol : float, optional\n Relative tolerance between estimated coefficients to stop the\n estimation. Stops if\n\n max(abs(last - current) / abs(last)) < rtol\n\n \"\"\"\n # TODO: update this after going through example.\n converged = False\n i = -1 # need to initialize for maxiter < 1 (skip loop)\n history = {'params': [], 'rho':[self.rho]}\n for i in range(maxiter - 1):\n if hasattr(self, 'pinv_wexog'):\n del self.pinv_wexog\n self.initialize()\n results = self.fit()\n history['params'].append(results.params)\n if i == 0:\n last = results.params\n else:\n diff = np.max(np.abs(last - results.params) / np.abs(last))\n if diff < rtol:\n converged = True\n break\n last = results.params\n self.rho, _ = yule_walker(results.resid,\n order=self.order, df=None)\n history['rho'].append(self.rho)\n\n # why not another call to self.initialize\n # Use kwarg to insert history\n if not converged and maxiter > 0:\n # maxiter <= 0 just does OLS\n if hasattr(self, 'pinv_wexog'):\n del self.pinv_wexog\n self.initialize()\n\n # if converged then this is a duplicate fit, because we didn't update rho\n results = self.fit(history=history, **kwds)\n results.iter = i + 1\n # add last fit to history, not if duplicate fit\n if not converged:\n results.history['params'].append(results.params)\n results.iter += 1\n\n results.converged = converged\n\n return results\n\n\n def whiten(self, X):\n \"\"\"\n Whiten a series of columns according to an AR(p)\n covariance structure. This drops initial p observations.\n\n Parameters\n ----------\n X : array-like\n The data to be whitened,\n\n Returns\n -------\n whitened array\n\n \"\"\"\n #TODO: notation for AR process\n X = np.asarray(X, np.float64)\n _X = X.copy()\n\n #the following loops over the first axis, works for 1d and nd\n for i in range(self.order):\n _X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]\n return _X[self.order:]\n\n\ndef yule_walker(X, order=1, method=\"unbiased\", df=None, inv=False, demean=True):\n \"\"\"\n Estimate AR(p) parameters from a sequence X using Yule-Walker equation.\n\n Unbiased or maximum-likelihood estimator (mle)\n\n See, for example:\n\n http://en.wikipedia.org/wiki/Autoregressive_moving_average_model\n\n Parameters\n ----------\n X : array-like\n 1d array\n order : integer, optional\n The order of the autoregressive process. Default is 1.\n method : string, optional\n Method can be \"unbiased\" or \"mle\" and this determines denominator in\n estimate of autocorrelation function (ACF) at lag k. If \"mle\", the\n denominator is n=X.shape[0], if \"unbiased\" the denominator is n-k.\n The default is unbiased.\n df : integer, optional\n Specifies the degrees of freedom. If `df` is supplied, then it is assumed\n the X has `df` degrees of freedom rather than `n`. Default is None.\n inv : bool\n If inv is True the inverse of R is also returned. Default is False.\n demean : bool\n True, the mean is subtracted from `X` before estimation.\n\n Returns\n -------\n rho\n The autoregressive coefficients\n sigma\n TODO\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> from statsmodels.datasets.sunspots import load\n >>> data = load()\n >>> rho, sigma = sm.regression.yule_walker(data.endog,\n order=4, method=\"mle\")\n\n >>> rho\n array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])\n >>> sigma\n 16.808022730464351\n\n \"\"\"\n #TODO: define R better, look back at notes and technical notes on YW.\n #First link here is useful\n #http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm\n method = str(method).lower()\n if method not in [\"unbiased\", \"mle\"]:\n raise ValueError(\"ACF estimation method must be 'unbiased' or 'MLE'\")\n X = np.array(X, dtype=np.float64)\n if demean:\n X -= X.mean() # automatically demean's X\n n = df or X.shape[0]\n\n if method == \"unbiased\": # this is df_resid ie., n - p\n denom = lambda k: n - k\n else:\n denom = lambda k: n\n if X.ndim > 1 and X.shape[1] != 1:\n raise ValueError(\"expecting a vector to estimate AR parameters\")\n r = np.zeros(order+1, np.float64)\n r[0] = (X**2).sum() / denom(0)\n for k in range(1,order+1):\n r[k] = (X[0:-k]*X[k:]).sum() / denom(k)\n R = toeplitz(r[:-1])\n\n rho = np.linalg.solve(R, r[1:])\n sigmasq = r[0] - (r[1:]*rho).sum()\n if inv==True:\n return rho, np.sqrt(sigmasq), np.linalg.inv(R)\n else:\n return rho, np.sqrt(sigmasq)\n\n\nclass RegressionResults(base.LikelihoodModelResults):\n \"\"\"\n This class summarizes the fit of a linear regression model.\n\n It handles the output of contrasts, estimates of covariance, etc.\n\n Returns\n -------\n **Attributes**\n\n aic\n Aikake's information criteria. For a model with a constant\n :math:`-2llf + 2(df_model + 1)`. For a model without a constant\n :math:`-2llf + 2(df_model)`.\n bic\n Bayes' information criteria For a model with a constant\n :math:`-2llf + \\log(n)(df_model+1)`. For a model without a constant\n :math:`-2llf + \\log(n)(df_model)`\n bse\n The standard errors of the parameter estimates.\n pinv_wexog\n See specific model class docstring\n centered_tss\n The total (weighted) sum of squares centered about the mean.\n cov_HC0\n Heteroscedasticity robust covariance matrix. See HC0_se below.\n cov_HC1\n Heteroscedasticity robust covariance matrix. See HC1_se below.\n cov_HC2\n Heteroscedasticity robust covariance matrix. See HC2_se below.\n cov_HC3\n Heteroscedasticity robust covariance matrix. See HC3_se below.\n cov_type\n Parameter covariance estimator used for standard errors and t-stats\n df_model\n Model degress of freedom. The number of regressors `p`. Does not\n include the constant if one is present\n df_resid\n Residual degrees of freedom. `n - p - 1`, if a constant is present.\n `n - p` if a constant is not included.\n ess\n Explained sum of squares. If a constant is present, the centered\n total sum of squares minus the sum of squared residuals. If there is\n no constant, the uncentered total sum of squares is used.\n fvalue\n F-statistic of the fully specified model. Calculated as the mean\n squared error of the model divided by the mean squared error of the\n residuals.\n f_pvalue\n p-value of the F-statistic\n fittedvalues\n The predicted the values for the original (unwhitened) design.\n het_scale\n adjusted squared residuals for heteroscedasticity robust standard\n errors. Is only available after `HC#_se` or `cov_HC#` is called.\n See HC#_se for more information.\n history\n Estimation history for iterative estimators\n HC0_se\n White's (1980) heteroskedasticity robust standard errors.\n Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)\n where e_i = resid[i]\n HC0_se is a cached property.\n When HC0_se or cov_HC0 is called the RegressionResults instance will\n then have another attribute `het_scale`, which is in this case is just\n resid**2.\n HC1_se\n MacKinnon and White's (1985) alternative heteroskedasticity robust\n standard errors.\n Defined as sqrt(diag(n/(n-p)*HC_0)\n HC1_see is a cached property.\n When HC1_se or cov_HC1 is called the RegressionResults instance will\n then have another attribute `het_scale`, which is in this case is\n n/(n-p)*resid**2.\n HC2_se\n MacKinnon and White's (1985) alternative heteroskedasticity robust\n standard errors.\n Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)\n where h_ii = x_i(X.T X)^(-1)x_i.T\n HC2_see is a cached property.\n When HC2_se or cov_HC2 is called the RegressionResults instance will\n then have another attribute `het_scale`, which is in this case is\n resid^(2)/(1-h_ii).\n HC3_se\n MacKinnon and White's (1985) alternative heteroskedasticity robust\n standard errors.\n Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)\n where h_ii = x_i(X.T X)^(-1)x_i.T\n HC3_see is a cached property.\n When HC3_se or cov_HC3 is called the RegressionResults instance will\n then have another attribute `het_scale`, which is in this case is\n resid^(2)/(1-h_ii)^(2).\n model\n A pointer to the model instance that called fit() or results.\n mse_model\n Mean squared error the model. This is the explained sum of squares\n divided by the model degrees of freedom.\n mse_resid\n Mean squared error of the residuals. The sum of squared residuals\n divided by the residual degrees of freedom.\n mse_total\n Total mean squared error. Defined as the uncentered total sum of\n squares divided by n the number of observations.\n nobs\n Number of observations n.\n normalized_cov_params\n See specific model class docstring\n params\n The linear coefficients that minimize the least squares criterion. This\n is usually called Beta for the classical linear model.\n pvalues\n The two-tailed p values for the t-stats of the params.\n resid\n The residuals of the model.\n resid_pearson\n `wresid` normalized to have unit variance.\n rsquared\n R-squared of a model with an intercept. This is defined here as\n 1 - `ssr`/`centered_tss` if the constant is included in the model and\n 1 - `ssr`/`uncentered_tss` if the constant is omitted.\n rsquared_adj\n Adjusted R-squared. This is defined here as\n 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included\n and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included.\n scale\n A scale factor for the covariance matrix.\n Default value is ssr/(n-p). Note that the square root of `scale` is\n often called the standard error of the regression.\n ssr\n Sum of squared (whitened) residuals.\n uncentered_tss\n Uncentered sum of squares. Sum of the squared values of the\n (whitened) endogenous response variable.\n wresid\n The residuals of the transformed/whitened regressand and regressor(s)\n \"\"\"\n\n _cache = {} # needs to be a class attribute for scale setter?\n\n def __init__(self, model, params, normalized_cov_params=None, scale=1.,\n cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):\n super(RegressionResults, self).__init__(model, params,\n normalized_cov_params,\n scale)\n\n self._cache = resettable_cache()\n if hasattr(model, 'wexog_singular_values'):\n self._wexog_singular_values = model.wexog_singular_values\n else:\n self._wexog_singular_values = None\n\n self.df_model = model.df_model\n self.df_resid = model.df_resid\n\n\n\n if cov_type == 'nonrobust':\n self.cov_type = 'nonrobust'\n self.cov_kwds = {'description' : 'Standard Errors assume that the ' +\n 'covariance matrix of the errors is correctly ' +\n 'specified.'}\n if use_t is None:\n self.use_t = True # TODO: class default\n else:\n if cov_kwds is None:\n cov_kwds = {}\n if 'use_t' in cov_kwds:\n # TODO: we want to get rid of 'use_t' in cov_kwds\n use_t_2 = cov_kwds.pop('use_t')\n if use_t is None:\n use_t = use_t_2\n # TODO: warn or not?\n self.get_robustcov_results(cov_type=cov_type, use_self=True,\n use_t=use_t, **cov_kwds)\n for key in kwargs:\n setattr(self, key, kwargs[key])\n\n def __str__(self):\n self.summary()\n\n def conf_int(self, alpha=.05, cols=None):\n \"\"\"\n Returns the confidence interval of the fitted parameters.\n\n Parameters\n ----------\n alpha : float, optional\n The `alpha` level for the confidence interval.\n ie., The default `alpha` = .05 returns a 95% confidence interval.\n cols : array-like, optional\n `cols` specifies which confidence intervals to return\n\n Notes\n -----\n The confidence interval is based on Student's t-distribution.\n \"\"\"\n # keep method for docstring for now\n ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)\n return ci\n\n\n @cache_readonly\n def nobs(self):\n return float(self.model.wexog.shape[0])\n\n @cache_readonly\n def fittedvalues(self):\n return self.model.predict(self.params, self.model.exog)\n\n @cache_readonly\n def wresid(self):\n return self.model.wendog - self.model.predict(self.params,\n self.model.wexog)\n\n @cache_readonly\n def resid(self):\n return self.model.endog - self.model.predict(self.params,\n self.model.exog)\n\n #TODO: fix writable example\n @cache_writable()\n def scale(self):\n wresid = self.wresid\n return np.dot(wresid, wresid) / self.df_resid\n\n @cache_readonly\n def ssr(self):\n wresid = self.wresid\n return np.dot(wresid, wresid)\n\n @cache_readonly\n def centered_tss(self):\n model = self.model\n weights = getattr(model, 'weights', None)\n if weights is not None:\n return np.sum(weights*(model.endog - np.average(model.endog,\n weights=weights))**2)\n else: # this is probably broken for GLS\n centered_endog = model.wendog - model.wendog.mean()\n return np.dot(centered_endog, centered_endog)\n\n @cache_readonly\n def uncentered_tss(self):\n wendog = self.model.wendog\n return np.dot(wendog, wendog)\n\n @cache_readonly\n def ess(self):\n if self.k_constant:\n return self.centered_tss - self.ssr\n else:\n return self.uncentered_tss - self.ssr\n\n @cache_readonly\n def rsquared(self):\n if self.k_constant:\n return 1 - self.ssr/self.centered_tss\n else:\n return 1 - self.ssr/self.uncentered_tss\n\n @cache_readonly\n def rsquared_adj(self):\n return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared)\n\n @cache_readonly\n def mse_model(self):\n return self.ess/self.df_model\n\n @cache_readonly\n def mse_resid(self):\n return self.ssr/self.df_resid\n\n @cache_readonly\n def mse_total(self):\n if self.k_constant:\n return self.centered_tss / (self.df_resid + self.df_model)\n else:\n return self.uncentered_tss / (self.df_resid + self.df_model)\n\n @cache_readonly\n def fvalue(self):\n if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust':\n # with heteroscedasticity or correlation robustness\n k_params = self.normalized_cov_params.shape[0]\n mat = np.eye(k_params)\n const_idx = self.model.data.const_idx\n # TODO: What if model includes implcit constant, e.g. all dummies but no constant regressor?\n # TODO: Restats as LM test by projecting orthogonalizing to constant?\n if self.model.data.k_constant == 1:\n # if constant is implicit, return nan see #2444\n if const_idx is None:\n return np.nan\n\n idx = lrange(k_params)\n idx.pop(const_idx)\n mat = mat[idx] # remove constant\n ft = self.f_test(mat)\n # using backdoor to set another attribute that we already have\n self._cache['f_pvalue'] = ft.pvalue\n return ft.fvalue\n else:\n # for standard homoscedastic case\n return self.mse_model/self.mse_resid\n\n @cache_readonly\n def f_pvalue(self):\n return stats.f.sf(self.fvalue, self.df_model, self.df_resid)\n\n @cache_readonly\n def bse(self):\n return np.sqrt(np.diag(self.cov_params()))\n\n\n @cache_readonly\n def aic(self):\n return -2 * self.llf + 2 * (self.df_model + self.k_constant)\n\n @cache_readonly\n def bic(self):\n return (-2 * self.llf + np.log(self.nobs) * (self.df_model +\n self.k_constant))\n\n @cache_readonly\n def eigenvals(self):\n \"\"\"\n Return eigenvalues sorted in decreasing order.\n \"\"\"\n if self._wexog_singular_values is not None:\n eigvals = self._wexog_singular_values ** 2\n else:\n eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T, self.model.wexog))\n return np.sort(eigvals)[::-1]\n\n @cache_readonly\n def condition_number(self):\n \"\"\"\n Return condition number of exogenous matrix.\n\n Calculated as ratio of largest to smallest eigenvalue.\n \"\"\"\n eigvals = self.eigenvals\n return np.sqrt(eigvals[0]/eigvals[-1])\n\n #TODO: make these properties reset bse\n def _HCCM(self, scale):\n H = np.dot(self.model.pinv_wexog,\n scale[:,None]*self.model.pinv_wexog.T)\n return H\n\n\n @cache_readonly\n def cov_HC0(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n\n self.het_scale = self.wresid**2\n cov_HC0 = self._HCCM(self.het_scale)\n return cov_HC0\n\n\n @cache_readonly\n def cov_HC1(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n\n self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)\n cov_HC1 = self._HCCM(self.het_scale)\n return cov_HC1\n\n\n @cache_readonly\n def cov_HC2(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n\n # probably could be optimized\n h = np.diag(chain_dot(self.model.wexog,\n self.normalized_cov_params,\n self.model.wexog.T))\n self.het_scale = self.wresid**2/(1-h)\n cov_HC2 = self._HCCM(self.het_scale)\n return cov_HC2\n\n\n @cache_readonly\n def cov_HC3(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n h = np.diag(chain_dot(self.model.wexog,\n self.normalized_cov_params,\n self.model.wexog.T))\n self.het_scale=(self.wresid/(1-h))**2\n cov_HC3 = self._HCCM(self.het_scale)\n return cov_HC3\n\n\n @cache_readonly\n def HC0_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n return np.sqrt(np.diag(self.cov_HC0))\n\n\n @cache_readonly\n def HC1_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n return np.sqrt(np.diag(self.cov_HC1))\n\n\n @cache_readonly\n def HC2_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n return np.sqrt(np.diag(self.cov_HC2))\n\n\n @cache_readonly\n def HC3_se(self):\n \"\"\"\n See statsmodels.RegressionResults\n \"\"\"\n return np.sqrt(np.diag(self.cov_HC3))\n\n\n @cache_readonly\n def resid_pearson(self):\n \"\"\"\n Residuals, normalized to have unit variance.\n\n Returns\n -------\n An array wresid/sqrt(scale)\n \"\"\"\n\n if not hasattr(self, 'resid'):\n raise ValueError('Method requires residuals.')\n eps = np.finfo(self.wresid.dtype).eps\n if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():\n # don't divide if scale is zero close to numerical precision\n from warnings import warn\n warn(\"All residuals are 0, cannot compute normed residuals.\",\n RuntimeWarning)\n return self.wresid\n else:\n return self.wresid / np.sqrt(self.scale)\n\n def _is_nested(self, restricted):\n \"\"\"\n Parameters\n ----------\n restricted : Result instance\n The restricted model is assumed to be nested in the current\n model. The result instance of the restricted model is required to\n have two attributes, residual sum of squares, `ssr`, residual\n degrees of freedom, `df_resid`.\n\n Returns\n -------\n nested : bool\n True if nested, otherwise false\n\n Notes\n -----\n A most nests another model if the regressors in the smaller model are spanned\n by the regressors in the larger model and the regressand is identical.\n \"\"\"\n\n if self.model.nobs != restricted.model.nobs:\n return False\n\n full_rank = self.model.rank\n restricted_rank = restricted.model.rank\n if full_rank <= restricted_rank:\n return False\n\n restricted_exog = restricted.model.wexog\n full_wresid = self.wresid\n\n scores = restricted_exog * full_wresid[:,None]\n score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))\n # TODO: Could be improved, and may fail depending on scale of regressors\n return np.allclose(score_l2,0)\n\n\n def compare_lm_test(self, restricted, demean=True, use_lr=False):\n \"\"\"Use Lagrange Multiplier test to test whether restricted model is correct\n\n Parameters\n ----------\n restricted : Result instance\n The restricted model is assumed to be nested in the current\n model. The result instance of the restricted model is required to\n have two attributes, residual sum of squares, `ssr`, residual\n degrees of freedom, `df_resid`.\n\n demean : bool\n Flag indicating whether the demean the scores based on the residuals\n from the restricted model. If True, the covariance of the scores\n are used and the LM test is identical to the large sample version\n of the LR test.\n\n Returns\n -------\n lm_value : float\n test statistic, chi2 distributed\n p_value : float\n p-value of the test statistic\n df_diff : int\n degrees of freedom of the restriction, i.e. difference in df between\n models\n\n Notes\n -----\n TODO: explain LM text\n \"\"\"\n import statsmodels.stats.sandwich_covariance as sw\n from numpy.linalg import inv\n\n if not self._is_nested(restricted):\n raise ValueError(\"Restricted model is not nested by full model.\")\n\n wresid = restricted.wresid\n wexog = self.model.wexog\n scores = wexog * wresid[:,None]\n\n n = self.nobs\n df_full = self.df_resid\n df_restr = restricted.df_resid\n df_diff = (df_restr - df_full)\n\n s = scores.mean(axis=0)\n if use_lr:\n scores = wexog * self.wresid[:,None]\n demean = False\n\n if demean:\n scores = scores - scores.mean(0)[None,:]\n # Form matters here. If homoskedastics can be sigma^2 (X'X)^-1\n # If Heteroskedastic then the form below is fine\n # If HAC then need to use HAC\n # If Cluster, shoudl use cluster\n\n cov_type = getattr(self, 'cov_type', 'nonrobust')\n if cov_type == 'nonrobust':\n sigma2 = np.mean(wresid**2)\n XpX = np.dot(wexog.T,wexog) / n\n Sinv = inv(sigma2 * XpX)\n elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):\n Sinv = inv(np.dot(scores.T,scores) / n)\n elif cov_type == 'HAC':\n print(\"HAC\")\n maxlags = self.cov_kwds['maxlags']\n Sinv = inv(sw.S_hac_simple(scores, maxlags) / n)\n elif cov_type == 'cluster':\n #cluster robust standard errors\n groups = self.cov_kwds['groups']\n # TODO: Might need demean option in S_crosssection by group?\n Sinv = inv(sw.S_crosssection(scores, groups))\n else:\n raise ValueError('Only nonrobust, HC, HAC and cluster are ' +\n 'currently connected')\n\n lm_value = n * chain_dot(s,Sinv,s.T)\n p_value = stats.chi2.sf(lm_value, df_diff)\n return lm_value, p_value, df_diff\n\n\n\n def compare_f_test(self, restricted):\n \"\"\"use F test to test whether restricted model is correct\n\n Parameters\n ----------\n restricted : Result instance\n The restricted model is assumed to be nested in the current\n model. The result instance of the restricted model is required to\n have two attributes, residual sum of squares, `ssr`, residual\n degrees of freedom, `df_resid`.\n\n Returns\n -------\n f_value : float\n test statistic, F distributed\n p_value : float\n p-value of the test statistic\n df_diff : int\n degrees of freedom of the restriction, i.e. difference in df between\n models\n\n Notes\n -----\n See mailing list discussion October 17,\n\n This test compares the residual sum of squares of the two models.\n This is not a valid test, if there is unspecified heteroscedasticity\n or correlation. This method will issue a warning if this is detected\n but still return the results under the assumption of homoscedasticity\n and no autocorrelation (sphericity).\n\n \"\"\"\n\n has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust'\n has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=\n 'nonrobust')\n\n if has_robust1 or has_robust2:\n warnings.warn('F test for comparison is likely invalid with ' +\n 'robust covariance, proceeding anyway',\n InvalidTestWarning)\n\n ssr_full = self.ssr\n ssr_restr = restricted.ssr\n df_full = self.df_resid\n df_restr = restricted.df_resid\n\n df_diff = (df_restr - df_full)\n f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full\n p_value = stats.f.sf(f_value, df_diff, df_full)\n return f_value, p_value, df_diff\n\n def compare_lr_test(self, restricted, large_sample=False):\n \"\"\"\n Likelihood ratio test to test whether restricted model is correct\n\n Parameters\n ----------\n restricted : Result instance\n The restricted model is assumed to be nested in the current model.\n The result instance of the restricted model is required to have two\n attributes, residual sum of squares, `ssr`, residual degrees of\n freedom, `df_resid`.\n\n large_sample : bool\n Flag indicating whether to use a heteroskedasticity robust version\n of the LR test, which is a modified LM test.\n\n Returns\n -------\n lr_stat : float\n likelihood ratio, chisquare distributed with df_diff degrees of\n freedom\n p_value : float\n p-value of the test statistic\n df_diff : int\n degrees of freedom of the restriction, i.e. difference in df between\n models\n\n Notes\n -----\n\n The exact likelihood ratio is valid for homoskedastic data, and is\n defined as\n\n .. math:: D=-2\\\\log\\\\left(\\\\frac{\\\\mathcal{L}_{null}}\n {\\\\mathcal{L}_{alternative}}\\\\right)\n\n where :math:`\\mathcal{L}` is the likelihood of the model. With :math:`D`\n distributed as chisquare with df equal to difference in number of\n parameters or equivalently difference in residual degrees of freedom.\n\n The large sample version of the likelihood ratio is defined as\n\n .. math:: D=n s^{\\\\prime}S^{-1}s\n\n where :math:`s=n^{-1}\\\\sum_{i=1}^{n} s_{i}`\n\n .. math:: s_{i} = x_{i,alternative} \\\\epsilon_{i,null}\n\n is the average score of the model evaluated using the residuals from\n null model and the regressors from the alternative model and :math:`S`\n is the covariance of the scores, :math:`s_{i}`. The covariance of the\n scores is estimated using the same estimator as in the alternative model.\n\n This test compares the loglikelihood of the two models.\n This may not be a valid test, if there is unspecified heteroscedasticity\n or correlation. This method will issue a warning if this is detected\n but still return the results without taking unspecified\n heteroscedasticity or correlation into account.\n\n This test compares the loglikelihood of the two models.\n This may not be a valid test, if there is unspecified heteroscedasticity\n or correlation. This method will issue a warning if this is detected\n but still return the results without taking unspecified\n heteroscedasticity or correlation into account.\n\n is the average score of the model evaluated using the residuals from\n null model and the regressors from the alternative model and :math:`S`\n is the covariance of the scores, :math:`s_{i}`. The covariance of the\n scores is estimated using the same estimator as in the alternative model.\n\n TODO: put into separate function, needs tests\n \"\"\"\n\n # See mailing list discussion October 17,\n\n if large_sample:\n return self.compare_lm_test(restricted, use_lr=True)\n\n has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust')\n has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') !=\n 'nonrobust')\n\n if has_robust1 or has_robust2:\n warnings.warn('Likelihood Ratio test is likely invalid with ' +\n 'robust covariance, proceeding anyway',\n InvalidTestWarning)\n\n llf_full = self.llf\n llf_restr = restricted.llf\n df_full = self.df_resid\n df_restr = restricted.df_resid\n\n lrdf = (df_restr - df_full)\n lrstat = -2*(llf_restr - llf_full)\n lr_pvalue = stats.chi2.sf(lrstat, lrdf)\n\n return lrstat, lr_pvalue, lrdf\n\n\n def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):\n \"\"\"create new results instance with robust covariance as default\n\n Parameters\n ----------\n cov_type : string\n the type of robust sandwich estimator to use. see Notes below\n use_t : bool\n If true, then the t distribution is used for inference.\n If false, then the normal distribution is used.\n If `use_t` is None, then an appropriate default is used, which is\n `true` if the cov_type is nonrobust, and `false` in all other cases.\n kwds : depends on cov_type\n Required or optional arguments for robust covariance calculation.\n see Notes below\n\n Returns\n -------\n results : results instance\n This method creates a new results instance with the requested\n robust covariance as the default covariance of the parameters.\n Inferential statistics like p-values and hypothesis tests will be\n based on this covariance matrix.\n\n Notes\n -----\n The following covariance types and required or optional arguments are\n currently available:\n\n - 'fixed scale' and optional keyword argument 'scale' which uses\n a predefined scale estimate with default equal to one.\n - 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:\n heteroscedasticity robust covariance\n - 'HAC' and keywords\n\n - `maxlag` integer (required) : number of lags to use\n - `kernel` string (optional) : kernel, default is Bartlett\n - `use_correction` bool (optional) : If true, use small sample\n correction\n\n - 'cluster' and required keyword `groups`, integer group indicator\n\n - `groups` array_like, integer (required) :\n index of clusters or groups\n - `use_correction` bool (optional) :\n If True the sandwich covariance is calulated with a small\n sample correction.\n If False the the sandwich covariance is calulated without\n small sample correction.\n - `df_correction` bool (optional)\n If True (default), then the degrees of freedom for the\n inferential statistics and hypothesis tests, such as\n pvalues, f_pvalue, conf_int, and t_test and f_test, are\n based on the number of groups minus one instead of the\n total number of observations minus the number of explanatory\n variables. `df_resid` of the results instance is adjusted.\n If False, then `df_resid` of the results instance is not\n adjusted.\n\n - 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and\n autocorrelation robust standard errors in panel data\n keywords\n\n - `time` array_like (required) : index of time periods\n - `maxlag` integer (required) : number of lags to use\n - `kernel` string (optional) : kernel, default is Bartlett\n - `use_correction` False or string in ['hac', 'cluster'] (optional) :\n If False the the sandwich covariance is calulated without\n small sample correction.\n If `use_correction = 'cluster'` (default), then the same\n small sample correction as in the case of 'covtype='cluster''\n is used.\n - `df_correction` bool (optional)\n adjustment to df_resid, see cov_type 'cluster' above\n #TODO: we need more options here\n\n - 'hac-panel' heteroscedasticity and autocorrelation robust standard\n errors in panel data.\n The data needs to be sorted in this case, the time series for\n each panel unit or cluster need to be stacked.\n keywords\n\n - `time` array_like (required) : index of time periods\n\n - `maxlag` integer (required) : number of lags to use\n - `kernel` string (optional) : kernel, default is Bartlett\n - `use_correction` False or string in ['hac', 'cluster'] (optional) :\n If False the the sandwich covariance is calulated without\n small sample correction.\n - `df_correction` bool (optional)\n adjustment to df_resid, see cov_type 'cluster' above\n #TODO: we need more options here\n\n Reminder:\n `use_correction` in \"nw-groupsum\" and \"nw-panel\" is not bool,\n needs to be in [False, 'hac', 'cluster']\n\n TODO: Currently there is no check for extra or misspelled keywords,\n except in the case of cov_type `HCx`\n\n \"\"\"\n\n import statsmodels.stats.sandwich_covariance as sw\n\n # TODO: make separate function that returns a robust cov plus info\n use_self = kwds.pop('use_self', False)\n if use_self:\n res = self\n else:\n res = self.__class__(self.model, self.params,\n normalized_cov_params=self.normalized_cov_params,\n scale=self.scale)\n\n res.cov_type = cov_type\n # use_t might already be defined by the class, and already set\n if use_t is None:\n use_t = self.use_t\n res.cov_kwds = {'use_t':use_t} # store for information\n res.use_t = use_t\n\n adjust_df = False\n if cov_type in ['cluster', 'nw-panel', 'nw-groupsum']:\n df_correction = kwds.get('df_correction', None)\n # TODO: check also use_correction, do I need all combinations?\n if df_correction is not False: # i.e. in [None, True]:\n # user didn't explicitely set it to False\n adjust_df = True\n\n res.cov_kwds['adjust_df'] = adjust_df\n\n # verify and set kwds, and calculate cov\n # TODO: this should be outsourced in a function so we can reuse it in\n # other models\n # TODO: make it DRYer repeated code for checking kwds\n if cov_type in ['fixed scale', 'fixed_scale']:\n res.cov_kwds['description'] = ('Standard Errors are based on ' +\n 'fixed scale')\n\n res.cov_kwds['scale'] = scale = kwds.get('scale', 1.)\n res.cov_params_default = scale * res.normalized_cov_params\n elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):\n if kwds:\n raise ValueError('heteroscedasticity robust covarians ' +\n 'does not use keywords')\n res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +\n 'robust ' + '(' + cov_type + ')')\n # TODO cannot access cov without calling se first\n getattr(self, cov_type.upper() + '_se')\n res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())\n elif cov_type == 'HAC':\n maxlags = kwds['maxlags'] # required?, default in cov_hac_simple\n res.cov_kwds['maxlags'] = maxlags\n use_correction = kwds.get('use_correction', False)\n res.cov_kwds['use_correction'] = use_correction\n res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' +\n 'and autocorrelation robust (HAC) using %d lags and %s small ' +\n 'sample correction') % (maxlags, ['without', 'with'][use_correction])\n\n res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags,\n use_correction=use_correction)\n elif cov_type == 'cluster':\n #cluster robust standard errors, one- or two-way\n groups = kwds['groups']\n if not hasattr(groups, 'shape'):\n groups = np.asarray(groups).T\n\n if groups.ndim >= 2:\n groups = groups.squeeze()\n\n res.cov_kwds['groups'] = groups\n use_correction = kwds.get('use_correction', True)\n res.cov_kwds['use_correction'] = use_correction\n if groups.ndim == 1:\n if adjust_df:\n # need to find number of groups\n # duplicate work\n self.n_groups = n_groups = len(np.unique(groups))\n res.cov_params_default = sw.cov_cluster(self, groups,\n use_correction=use_correction)\n\n elif groups.ndim == 2:\n if hasattr(groups, 'values'):\n groups = groups.values\n\n if adjust_df:\n # need to find number of groups\n # duplicate work\n n_groups0 = len(np.unique(groups[:,0]))\n n_groups1 = len(np.unique(groups[:, 1]))\n self.n_groups = (n_groups0, n_groups1)\n n_groups = min(n_groups0, n_groups1) # use for adjust_df\n\n # Note: sw.cov_cluster_2groups has 3 returns\n res.cov_params_default = sw.cov_cluster_2groups(self, groups,\n use_correction=use_correction)[0]\n else:\n raise ValueError('only two groups are supported')\n res.cov_kwds['description'] = ('Standard Errors are robust to' +\n 'cluster correlation ' + '(' + cov_type + ')')\n\n elif cov_type == 'nw-panel':\n #cluster robust standard errors\n res.cov_kwds['time'] = time = kwds['time']\n #TODO: nlags is currently required\n #nlags = kwds.get('nlags', True)\n #res.cov_kwds['nlags'] = nlags\n #TODO: `nlags` or `maxlags`\n res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']\n use_correction = kwds.get('use_correction', 'hac')\n res.cov_kwds['use_correction'] = use_correction\n weights_func = kwds.get('weights_func', sw.weights_bartlett)\n res.cov_kwds['weights_func'] = weights_func\n # TODO: clumsy time index in cov_nw_panel\n tt = (np.nonzero(np.diff(time) < 0)[0] + 1).tolist()\n groupidx = lzip([0] + tt, tt + [len(time)])\n self.n_groups = n_groups = len(groupidx)\n res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,\n weights_func=weights_func,\n use_correction=use_correction)\n res.cov_kwds['description'] = ('Standard Errors are robust to' +\n 'cluster correlation ' + '(' + cov_type + ')')\n elif cov_type == 'nw-groupsum':\n # Driscoll-Kraay standard errors\n res.cov_kwds['time'] = time = kwds['time']\n #TODO: nlags is currently required\n #nlags = kwds.get('nlags', True)\n #res.cov_kwds['nlags'] = nlags\n #TODO: `nlags` or `maxlags`\n res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']\n use_correction = kwds.get('use_correction', 'cluster')\n res.cov_kwds['use_correction'] = use_correction\n weights_func = kwds.get('weights_func', sw.weights_bartlett)\n res.cov_kwds['weights_func'] = weights_func\n if adjust_df:\n # need to find number of groups\n tt = (np.nonzero(np.diff(time) < 0)[0] + 1)\n self.n_groups = n_groups = len(tt) + 1\n res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time,\n weights_func=weights_func,\n use_correction=use_correction)\n res.cov_kwds['description'] = (\n 'Driscoll and Kraay Standard Errors are robust to ' +\n 'cluster correlation ' + '(' + cov_type + ')')\n else:\n raise ValueError('cov_type not recognized. See docstring for ' +\n 'available options and spelling')\n\n if adjust_df:\n # Note: df_resid is used for scale and others, add new attribute\n res.df_resid_inference = n_groups - 1\n\n return res\n\n\n def get_prediction(self, exog=None, transform=True, weights=None,\n row_labels=None, **kwds):\n\n return pred.get_prediction(self, exog=exog, transform=transform,\n weights=weights, row_labels=row_labels, **kwds)\n\n get_prediction.__doc__ = pred.get_prediction.__doc__\n\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05):\n \"\"\"Summarize the Regression Results\n\n Parameters\n -----------\n yname : string, optional\n Default is `y`\n xname : list of strings, optional\n Default is `var_##` for ## in p the number of regressors\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n\n #TODO: import where we need it (for now), add as cached attributes\n from statsmodels.stats.stattools import (jarque_bera,\n omni_normtest, durbin_watson)\n jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)\n omni, omnipv = omni_normtest(self.wresid)\n\n eigvals = self.eigenvals\n condno = self.condition_number\n\n self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,\n omni=omni, omnipv=omnipv, condno=condno,\n mineigval=eigvals[-1])\n\n #TODO not used yet\n #diagn_left_header = ['Models stats']\n #diagn_right_header = ['Residual stats']\n\n #TODO: requiring list/iterable is a bit annoying\n #need more control over formatting\n #TODO: default don't work if it's not identically spelled\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['Least Squares']),\n ('Date:', None),\n ('Time:', None),\n ('No. Observations:', None),\n ('Df Residuals:', None), #[self.df_resid]), #TODO: spelling\n ('Df Model:', None), #[self.df_model])\n ]\n\n if hasattr(self, 'cov_type'):\n top_left.append(('Covariance Type:', [self.cov_type]))\n\n top_right = [('R-squared:', [\"%#8.3f\" % self.rsquared]),\n ('Adj. R-squared:', [\"%#8.3f\" % self.rsquared_adj]),\n ('F-statistic:', [\"%#8.4g\" % self.fvalue] ),\n ('Prob (F-statistic):', [\"%#6.3g\" % self.f_pvalue]),\n ('Log-Likelihood:', None), #[\"%#6.4g\" % self.llf]),\n ('AIC:', [\"%#8.4g\" % self.aic]),\n ('BIC:', [\"%#8.4g\" % self.bic])\n ]\n\n diagn_left = [('Omnibus:', [\"%#6.3f\" % omni]),\n ('Prob(Omnibus):', [\"%#6.3f\" % omnipv]),\n ('Skew:', [\"%#6.3f\" % skew]),\n ('Kurtosis:', [\"%#6.3f\" % kurtosis])\n ]\n\n diagn_right = [('Durbin-Watson:', [\"%#8.3f\" % durbin_watson(self.wresid)]),\n ('Jarque-Bera (JB):', [\"%#8.3f\" % jb]),\n ('Prob(JB):', [\"%#8.3g\" % jbpv]),\n ('Cond. No.', [\"%#8.3g\" % condno])\n ]\n\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Regression Results\"\n\n #create summary table instance\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n yname=yname, xname=xname,\n title=\"\")\n\n #add warnings/notes, added to text format only\n etext =[]\n if hasattr(self, 'cov_type'):\n etext.append(self.cov_kwds['description'])\n if self.model.exog.shape[0] < self.model.exog.shape[1]:\n wstr = \"The input rank is higher than the number of observations.\"\n etext.append(wstr)\n if eigvals[-1] < 1e-10:\n wstr = \"The smallest eigenvalue is %6.3g. This might indicate \"\n wstr += \"that there are\\n\"\n wstr += \"strong multicollinearity problems or that the design \"\n wstr += \"matrix is singular.\"\n wstr = wstr % eigvals[-1]\n etext.append(wstr)\n elif condno > 1000: #TODO: what is recommended\n wstr = \"The condition number is large, %6.3g. This might \"\n wstr += \"indicate that there are\\n\"\n wstr += \"strong multicollinearity or other numerical \"\n wstr += \"problems.\"\n wstr = wstr % condno\n etext.append(wstr)\n\n if etext:\n etext = [\"[{0}] {1}\".format(i + 1, text) for i, text in enumerate(etext)]\n etext.insert(0, \"Warnings:\")\n smry.add_extra_txt(etext)\n\n return smry\n\n #top = summary_top(self, gleft=topleft, gright=diagn_left, #[],\n # yname=yname, xname=xname,\n # title=self.model.__class__.__name__ + ' ' +\n # \"Regression Results\")\n #par = summary_params(self, yname=yname, xname=xname, alpha=.05,\n # use_t=False)\n #\n #diagn = summary_top(self, gleft=diagn_left, gright=diagn_right,\n # yname=yname, xname=xname,\n # title=\"Linear Model\")\n #\n #return summary_return([top, par, diagn], return_fmt=return_fmt)\n\n def summary2(self, yname=None, xname=None, title=None, alpha=.05,\n float_format=\"%.4f\"):\n \"\"\"Experimental summary function to summarize the regression results\n\n Parameters\n -----------\n xname : List of strings of length equal to the number of parameters\n Names of the independent variables (optional)\n yname : string\n Name of the dependent variable (optional)\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary\n results\n\n \"\"\"\n # Diagnostics\n from statsmodels.stats.stattools import (jarque_bera,\n omni_normtest,\n durbin_watson)\n\n from statsmodels.compat.collections import OrderedDict\n jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)\n omni, omnipv = omni_normtest(self.wresid)\n dw = durbin_watson(self.wresid)\n eigvals = self.eigenvals\n condno = self.condition_number\n eigvals = np.sort(eigvals) #in increasing order\n diagnostic = OrderedDict([\n ('Omnibus:', \"%.3f\" % omni),\n ('Prob(Omnibus):', \"%.3f\" % omnipv),\n ('Skew:', \"%.3f\" % skew),\n ('Kurtosis:', \"%.3f\" % kurtosis),\n ('Durbin-Watson:', \"%.3f\" % dw),\n ('Jarque-Bera (JB):', \"%.3f\" % jb),\n ('Prob(JB):', \"%.3f\" % jbpv),\n ('Condition No.:', \"%.0f\" % condno)\n ])\n\n # Summary\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n smry.add_base(results=self, alpha=alpha, float_format=float_format,\n xname=xname, yname=yname, title=title)\n smry.add_dict(diagnostic)\n\n # Warnings\n if eigvals[-1] < 1e-10:\n warn = \"The smallest eigenvalue is %6.3g. This might indicate that\\\n there are strong multicollinearity problems or that the design\\\n matrix is singular.\" % eigvals[-1]\n smry.add_text(warn)\n if condno > 1000:\n warn = \"* The condition number is large (%.g). This might indicate \\\n strong multicollinearity or other numerical problems.\" % condno\n smry.add_text(warn)\n\n return smry\n\n\nclass OLSResults(RegressionResults):\n \"\"\"\n Results class for for an OLS model.\n\n Most of the methods and attributes are inherited from RegressionResults.\n The special methods that are only available for OLS are:\n\n - get_influence\n - outlier_test\n - el_test\n - conf_int_el\n\n See Also\n --------\n RegressionResults\n\n \"\"\"\n\n def get_influence(self):\n \"\"\"\n get an instance of Influence with influence and outlier measures\n\n Returns\n -------\n infl : Influence instance\n the instance has methods to calculate the main influence and\n outlier measures for the OLS regression\n\n See also\n --------\n :class:`statsmodels.stats.outliers_influence.OLSInfluence`\n \"\"\"\n from statsmodels.stats.outliers_influence import OLSInfluence\n return OLSInfluence(self)\n\n def outlier_test(self, method='bonf', alpha=.05):\n \"\"\"\n Test observations for outliers according to method\n\n Parameters\n ----------\n method : str\n\n - `bonferroni` : one-step correction\n - `sidak` : one-step correction\n - `holm-sidak` :\n - `holm` :\n - `simes-hochberg` :\n - `hommel` :\n - `fdr_bh` : Benjamini/Hochberg\n - `fdr_by` : Benjamini/Yekutieli\n\n See `statsmodels.stats.multitest.multipletests` for details.\n alpha : float\n familywise error rate\n\n Returns\n -------\n table : ndarray or DataFrame\n Returns either an ndarray or a DataFrame if labels is not None.\n Will attempt to get labels from model_results if available. The\n columns are the Studentized residuals, the unadjusted p-value,\n and the corrected p-value according to method.\n\n Notes\n -----\n The unadjusted p-value is stats.t.sf(abs(resid), df) where\n df = df_resid - 1.\n \"\"\"\n from statsmodels.stats.outliers_influence import outlier_test\n return outlier_test(self, method, alpha)\n\n def el_test(self, b0_vals, param_nums, return_weights=0,\n ret_params=0, method='nm',\n stochastic_exog=1, return_params=0):\n \"\"\"\n Tests single or joint hypotheses of the regression parameters using\n Empirical Likelihood.\n\n Parameters\n ----------\n\n b0_vals : 1darray\n The hypothesized value of the parameter to be tested\n\n param_nums : 1darray\n The parameter number to be tested\n\n print_weights : bool\n If true, returns the weights that optimize the likelihood\n ratio at b0_vals. Default is False\n\n ret_params : bool\n If true, returns the parameter vector that maximizes the likelihood\n ratio at b0_vals. Also returns the weights. Default is False\n\n method : string\n Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The\n optimization method that optimizes over nuisance parameters.\n Default is 'nm'\n\n stochastic_exog : bool\n When TRUE, the exogenous variables are assumed to be stochastic.\n When the regressors are nonstochastic, moment conditions are\n placed on the exogenous variables. Confidence intervals for\n stochastic regressors are at least as large as non-stochastic\n regressors. Default = TRUE\n\n Returns\n -------\n\n res : tuple\n The p-value and -2 times the log-likelihood ratio for the\n hypothesized values.\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> data = sm.datasets.stackloss.load()\n >>> endog = data.endog\n >>> exog = sm.add_constant(data.exog)\n >>> model = sm.OLS(endog, exog)\n >>> fitted = model.fit()\n >>> fitted.params\n >>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252])\n >>> fitted.rsquared\n >>> 0.91357690446068196\n >>> # Test that the slope on the first variable is 0\n >>> fitted.test_beta([0], [1])\n >>> (1.7894660442330235e-07, 27.248146353709153)\n \"\"\"\n params = np.copy(self.params)\n opt_fun_inst = _ELRegOpts() # to store weights\n if len(param_nums) == len(params):\n llr = opt_fun_inst._opt_nuis_regress([],\n param_nums=param_nums,\n endog=self.model.endog,\n exog=self.model.exog,\n nobs=self.model.nobs,\n nvar=self.model.exog.shape[1],\n params=params,\n b0_vals=b0_vals,\n stochastic_exog=stochastic_exog)\n pval = 1 - stats.chi2.cdf(llr, len(param_nums))\n if return_weights:\n return llr, pval, opt_fun_inst.new_weights\n else:\n return llr, pval\n x0 = np.delete(params, param_nums)\n args = (param_nums, self.model.endog, self.model.exog,\n self.model.nobs, self.model.exog.shape[1], params,\n b0_vals, stochastic_exog)\n if method == 'nm':\n llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000,\n maxiter=10000, full_output=1, disp=0,\n args=args)[1]\n if method == 'powell':\n llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,\n full_output=1, disp=0,\n args=args)[1]\n\n pval = 1 - stats.chi2.cdf(llr, len(param_nums))\n if ret_params:\n return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params\n elif return_weights:\n return llr, pval, opt_fun_inst.new_weights\n else:\n return llr, pval\n\n def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None,\n method='nm', stochastic_exog=1):\n \"\"\"\n Computes the confidence interval for the parameter given by param_num\n using Empirical Likelihood\n\n Parameters\n ----------\n\n param_num : float\n The parameter for which the confidence interval is desired\n\n sig : float\n The significance level. Default is .05\n\n upper_bound : float\n The maximum value the upper limit can be. Default is the\n 99.9% confidence value under OLS assumptions.\n\n lower_bound : float\n The minimum value the lower limit can be. Default is the 99.9%\n confidence value under OLS assumptions.\n\n method : string\n Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The\n optimization method that optimizes over nuisance parameters.\n Default is 'nm'\n\n Returns\n -------\n\n ci : tuple\n The confidence interval\n\n See Also\n --------\n\n el_test\n\n Notes\n -----\n\n This function uses brentq to find the value of beta where\n test_beta([beta], param_num)[1] is equal to the critical\n value.\n\n The function returns the results of each iteration of brentq at\n each value of beta.\n\n The current function value of the last printed optimization\n should be the critical value at the desired significance level.\n For alpha=.05, the value is 3.841459.\n\n To ensure optimization terminated successfully, it is suggested to\n do el_test([lower_limit], [param_num])\n\n If the optimization does not terminate successfully, consider switching\n optimization algorithms.\n\n If optimization is still not successful, try changing the values of\n start_int_params. If the current function value repeatedly jumps\n from a number between 0 and the critical value and a very large number\n (>50), the starting parameters of the interior minimization need\n to be changed.\n \"\"\"\n r0 = stats.chi2.ppf(1 - sig, 1)\n if upper_bound is None:\n upper_bound = self.conf_int(.01)[param_num][1]\n if lower_bound is None:\n lower_bound = self.conf_int(.01)[param_num][0]\n f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),\n method=method,\n stochastic_exog=stochastic_exog)[0]-r0\n lowerl = optimize.brenth(f, lower_bound,\n self.params[param_num])\n upperl = optimize.brenth(f, self.params[param_num],\n upper_bound)\n # ^ Seems to be faster than brentq in most cases\n return (lowerl, upperl)\n\n\nclass RegressionResultsWrapper(wrap.ResultsWrapper):\n\n _attrs = {\n 'chisq' : 'columns',\n 'sresid' : 'rows',\n 'weights' : 'rows',\n 'wresid' : 'rows',\n 'bcov_unscaled' : 'cov',\n 'bcov_scaled' : 'cov',\n 'HC0_se' : 'columns',\n 'HC1_se' : 'columns',\n 'HC2_se' : 'columns',\n 'HC3_se' : 'columns',\n 'norm_resid' : 'rows',\n }\n\n _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,\n _attrs)\n\n _methods = {}\n\n _wrap_methods = wrap.union_dicts(\n base.LikelihoodResultsWrapper._wrap_methods,\n _methods)\n\nwrap.populate_wrapper(RegressionResultsWrapper,\n RegressionResults)\n\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n data = sm.datasets.longley.load()\n data.exog = add_constant(data.exog, prepend=False)\n ols_results = OLS(data.endog, data.exog).fit() #results\n gls_results = GLS(data.endog, data.exog).fit() #results\n print(ols_results.summary())\n tables = ols_results.summary(returns='tables')\n csv = ols_results.summary(returns='csv')\n\"\"\"\n Summary of Regression Results\n=======================================\n| Dependent Variable: ['y']|\n| Model: OLS|\n| Method: Least Squares|\n| Date: Tue, 29 Jun 2010|\n| Time: 22:32:21|\n| # obs: 16.0|\n| Df residuals: 9.0|\n| Df model: 6.0|\n===========================================================================\n| coefficient std. error t-statistic prob.|\n---------------------------------------------------------------------------\n| x1 15.0619 84.9149 0.1774 0.8631|\n| x2 -0.0358 0.0335 -1.0695 0.3127|\n| x3 -2.0202 0.4884 -4.1364 0.002535|\n| x4 -1.0332 0.2143 -4.8220 0.0009444|\n| x5 -0.0511 0.2261 -0.2261 0.8262|\n| x6 1829.1515 455.4785 4.0159 0.003037|\n| const -3482258.6346 890420.3836 -3.9108 0.003560|\n===========================================================================\n| Models stats Residual stats |\n---------------------------------------------------------------------------\n| R-squared: 0.995479 Durbin-Watson: 2.55949 |\n| Adjusted R-squared: 0.992465 Omnibus: 0.748615 |\n| F-statistic: 330.285 Prob(Omnibus): 0.687765 |\n| Prob (F-statistic): 4.98403e-10 JB: 0.352773 |\n| Log likelihood: -109.617 Prob(JB): 0.838294 |\n| AIC criterion: 233.235 Skew: 0.419984 |\n| BIC criterion: 238.643 Kurtosis: 2.43373 |\n---------------------------------------------------------------------------\n\"\"\"\n\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382633,"cells":{"repo_name":{"kind":"string","value":"Lucas-Armand/genetic-algorithm"},"path":{"kind":"string","value":"dev/8ºSemana/testes of speed.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"3255"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport os\nimport csv\nimport random\nimport numpy as np\nimport timeit\nimport time as Time\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom itertools import product, combinations\n\nclass Block:\n def __init__(self,point,a,b,c,weight,btype):\n self.p=point\n self.a=a\n self.b=b\n self.c=c\n self.w=weight\n self.t=btype\n \n\ndef csv_read(name):\t#Metodo de leitura, transforma um arquivo CSV em um vetor \n\n CSV=open(name,'r')\n dados=CSV.read()\n dados=dados.replace(',','.')\n dados=dados.replace(';',',')\n CSV.close()\n\n CSV=open(\"temp.csv\",'w')\n CSV.write(dados)\n CSV.close()\n\n CSV=open(\"temp.csv\",'r')\n dados=csv.reader(CSV)\n v=[]\n for i in dados:\n I=[]\n for j in i:\n try:\n j = float(j)\n except:\n pass\n I.append(j)\n v.append(I)\n CSV.close()\n os.remove(\"temp.csv\")\n return (v)\n\ndef defineGeometry(name):\n\n vect = csv_read(name)\n blockNumber ={}\n for i in vect:\n a = i[1]\n b = i[2]\n c = i[3]\n point = [i[4],i[5],i[6]]\n weight = i[7]\n btype = i[-1]\n block = Block(point,a,b,c,weight,btype)\n blockNumber[i[0]] = block\n\n return blockNumber\n\nbNumb=defineGeometry('GeometriaNavio.csv')\n \n# Define vicinity\n\n#deck\nvicinity={1:[2]} \nfor i in range(2,16):\n vicinity[i] = [i-1,i+1] \nvicinity[16] = [15] \n\n#side\nvicinity[17] = [18,19] \nvicinity[18] = [17,20] \nfor i in range(19,31):\n v = i-1 if i%2==0 else i+1\n vicinity[i] = [v,i-2,i+2]\nvicinity[31] = [29,32] \nvicinity[32] = [30,31] \n\n#bott\nvicinity[33] = [34,35] \nvicinity[34] = [33,36] \nfor i in range(35,63):\n v = i-1 if i%2==0 else i+1\n vicinity[i] = [v,i-2,i+2]\nvicinity[63] = [61,64] \nvicinity[64] = [63,62] \n\n#coff\nvicinity[65] = [66] \nfor i in range(66,70):\n vicinity[i] = [i-1,i+1] \nvicinity[70] = [69] \n\n\nalfa = 10\nbeta = 1\nbuilt = []\ntime = 0\nappend = built.append\n\ndef order(x): return vicinity[x]\n\n\ndef time(bNumb,vicinity,chromo):\n \n \n t_time = Time.time()\n \n alfa = 1\n built = []\n time = 0\n append = built.append\n \n def time_vector(x,y):\n for i in y:\n if i in built:\n time = alfa\n break\n try:time\n except: time = 10*alfa\n append(x)\n return time \n \n vic = [vicinity[x] for x in chromo]\n time = sum((time_vector(x,y) for x,y in zip(chromo,vic)))\n\n return time\n\nchromo = [44, 39, 56, 47, 49, 37, 42, 46, 51, 58, 60, 62, 52, 41, 35, 33, 50, 61, 54, 34, 59, 43, 48, 45, 55, 53, 38, 57, 64, 67, 68, 63, 40, 36, 21, 66, 22, 6, 20, 65, 18, 5, 17, 69, 28, 27, 70, 29, 1, 12, 30, 13, 14, 26, 31, 24, 19, 2, 3, 4, 25, 11, 32, 10, 15, 16, 9, 23, 7, 8]\n\n\nimport cProfile\ncProfile.run('time(bNumb,vicinity,chromo)')\n##\n##print timeit.timeit(setup='from __main__ import chromo;'+\n## 'from __main__ import bNumb;'+\n## 'from __main__ import time;'+\n## 'from __main__ import vicinity '\n## ,stmt='time(bNumb,vicinity,chromo)') \n#print t.timeit(number = 1000000)\n\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382634,"cells":{"repo_name":{"kind":"string","value":"jchodera/assaytools"},"path":{"kind":"string","value":"scripts/xml2png4scans.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"5177"},"content":{"kind":"string","value":"# This script takes xml data file output from the Tecan Infinite m1000 Pro plate reader\n# and makes quick and dirty images of the raw data.\n\n#But with scans and not just singlet reads.\n\n# The same procedure can be used to make matrices suitable for analysis using\n# matrix = dataframe.values\n\n# Made by Sonya Hanson, with some help from things that worked in xml2png.py\n# Friday, June 20,2014\n\n# Usage: python xml2png4scans.py *.xml\n\n############ For future to combine with xml2png.py\n#\n# for i, sect in enumerate(Sections):\n# reads = sect.xpath(\"*/Well\")\n# parameters = root.xpath(path)[0]\n# if reads[0].attrib['Type'] == \"Scan\":\n#\n##############\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom lxml import etree\nimport pandas as pd\nimport matplotlib.cm as cm\nimport seaborn\nimport sys\nimport os\n\n\n# Define extract function that extracts parameters\ndef extract(taglist):\n result = []\n for p in taglist:\n print \"Attempting to extract tag '%s'...\" % p\n try:\n param = parameters.xpath(\"*[@Name='\" + p + \"']\")[0]\n result.append( p + '=' + param.attrib['Value'])\n except:\n # tag not found\n result.append(None)\n\n return result\n\n\ndef process_files(xml_files):\n\n so_many = len(xml_files)\n print \"****This script is about to make png files for %s xml files. ****\" % so_many\n\n for file in xml_files:\n\n # Parse XML file.\n root = etree.parse(file)\n\n # Remove extension from xml filename.\n file_name = os.path.splitext(file)[0]\n\n # Extract plate type and barcode.\n plate = root.xpath(\"/*/Header/Parameters/Parameter[@Name='Plate']\")[0]\n plate_type = plate.attrib['Value']\n\n bar = root.xpath(\"/*/Plate/BC\")[0]\n barcode = bar.text\n\n # Define Sections.\n Sections = root.xpath(\"/*/Section\")\n much = len(Sections)\n print \"****The xml file \" + file + \" has %s data sections:****\" % much\n for sect in Sections:\n print sect.attrib['Name']\n\n data = []\n\n for i, sect in enumerate(Sections):\n\n # Extract Parameters for this section.\n path = \"/*/Section[@Name='\" + sect.attrib['Name'] + \"']/Parameters\"\n parameters = root.xpath(path)[0]\n\n # Parameters are extracted slightly differently depending on Absorbance or Fluorescence read.\n if parameters[0].attrib['Value'] == \"Absorbance\":\n result = extract([\"Mode\", \"Wavelength Start\", \"Wavelength End\", \"Wavelength Step Size\"])\n title = '%s, %s, %s, %s' % tuple(result)\n\n else:\n result = extract([\"Gain\", \"Excitation Wavelength\", \"Emission Wavelength\", \"Part of Plate\", \"Mode\"])\n title = '%s, %s, %s, \\n %s, %s' % tuple(result)\n\n print \"****The %sth section has the parameters:****\" %i\n print title\n\n # Extract Reads for this section.\n Sections = root.xpath(\"/*/Section\")\n\n reads = root.xpath(\"/*/*/*/Well\")\n\n wellIDs = [read.attrib['Pos'] for read in reads]\n\n data = [(float(s.text), float(s.attrib['WL']), r.attrib['Pos'])\n for r in reads\n for s in r]\n\n dataframe = pd.DataFrame(data, columns=['fluorescence','wavelength (nm)','Well'])\n\n dataframe_pivot = pd.pivot_table(dataframe, index = 'wavelength (nm)', columns = ['Well'])\n\n # Make plot, complete with separate png for each section.\n section_name = sect.attrib['Name']\n\n fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 12))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('A' + str(i)).plot(ax=axes[0,0], title='A', c=cm.hsv(i*15))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('B' + str(i)).plot(ax=axes[0,1], title='B', c=cm.hsv(i*15))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('C' + str(i)).plot(ax=axes[0,2], title='C', c=cm.hsv(i*15))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('D' + str(i)).plot(ax=axes[1,0], title='D', c=cm.hsv(i*15))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('E' + str(i)).plot(ax=axes[1,1], title='E', c=cm.hsv(i*15))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('F' + str(i)).plot(ax=axes[1,2], title='F', c=cm.hsv(i*15))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('G' + str(i)).plot(ax=axes[2,0], title='G', c=cm.hsv(i*15))\n for i in range(1,12):\n dataframe_pivot.fluorescence.get('H' + str(i)).plot(ax=axes[2,1], title='H', c=cm.hsv(i*15))\n fig.suptitle('%s \\n %s \\n Barcode = %s' % (title, plate_type, barcode), fontsize=14)\n fig.subplots_adjust(hspace=0.3)\n plt.savefig('%s_%s.png' % (file_name, section_name))\n\n return\n\ndef entry_point():\n xml_files = sys.argv[1:]\n process_files(xml_files)\n\nif __name__ == '__main__':\n xml_files = sys.argv[1:]\n process_files(xml_files)\n\n\n"},"license":{"kind":"string","value":"lgpl-2.1"}}},{"rowIdx":382635,"cells":{"repo_name":{"kind":"string","value":"kazemakase/scikit-learn"},"path":{"kind":"string","value":"examples/plot_multilabel.py"},"copies":{"kind":"string","value":"87"},"size":{"kind":"string","value":"4279"},"content":{"kind":"string","value":"# Authors: Vlad Niculae, Mathieu Blondel\n# License: BSD 3 clause\n\"\"\"\n=========================\nMultilabel classification\n=========================\n\nThis example simulates a multi-label document classification problem. The\ndataset is generated randomly based on the following process:\n\n - pick the number of labels: n ~ Poisson(n_labels)\n - n times, choose a class c: c ~ Multinomial(theta)\n - pick the document length: k ~ Poisson(length)\n - k times, choose a word: w ~ Multinomial(theta_c)\n\nIn the above process, rejection sampling is used to make sure that n is more\nthan 2, and that the document length is never zero. Likewise, we reject classes\nwhich have already been chosen. The documents that are assigned to both\nclasses are plotted surrounded by two colored circles.\n\nThe classification is performed by projecting to the first two principal\ncomponents found by PCA and CCA for visualisation purposes, followed by using\nthe :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two\nSVCs with linear kernels to learn a discriminative model for each class.\nNote that PCA is used to perform an unsupervised dimensionality reduction,\nwhile CCA is used to perform a supervised one.\n\nNote: in the plot, \"unlabeled samples\" does not mean that we don't know the\nlabels (as in semi-supervised learning) but that the samples simply do *not*\nhave a label.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_multilabel_classification\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.decomposition import PCA\nfrom sklearn.cross_decomposition import CCA\n\n\ndef plot_hyperplane(clf, min_x, max_x, linestyle, label):\n # get the separating hyperplane\n w = clf.coef_[0]\n a = -w[0] / w[1]\n xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough\n yy = a * xx - (clf.intercept_[0]) / w[1]\n plt.plot(xx, yy, linestyle, label=label)\n\n\ndef plot_subfigure(X, Y, subplot, title, transform):\n if transform == \"pca\":\n X = PCA(n_components=2).fit_transform(X)\n elif transform == \"cca\":\n X = CCA(n_components=2).fit(X, Y).transform(X)\n else:\n raise ValueError\n\n min_x = np.min(X[:, 0])\n max_x = np.max(X[:, 0])\n\n min_y = np.min(X[:, 1])\n max_y = np.max(X[:, 1])\n\n classif = OneVsRestClassifier(SVC(kernel='linear'))\n classif.fit(X, Y)\n\n plt.subplot(2, 2, subplot)\n plt.title(title)\n\n zero_class = np.where(Y[:, 0])\n one_class = np.where(Y[:, 1])\n plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')\n plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',\n facecolors='none', linewidths=2, label='Class 1')\n plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',\n facecolors='none', linewidths=2, label='Class 2')\n\n plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',\n 'Boundary\\nfor class 1')\n plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',\n 'Boundary\\nfor class 2')\n plt.xticks(())\n plt.yticks(())\n\n plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)\n plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)\n if subplot == 2:\n plt.xlabel('First principal component')\n plt.ylabel('Second principal component')\n plt.legend(loc=\"upper left\")\n\n\nplt.figure(figsize=(8, 6))\n\nX, Y = make_multilabel_classification(n_classes=2, n_labels=1,\n allow_unlabeled=True,\n return_indicator=True,\n random_state=1)\n\nplot_subfigure(X, Y, 1, \"With unlabeled samples + CCA\", \"cca\")\nplot_subfigure(X, Y, 2, \"With unlabeled samples + PCA\", \"pca\")\n\nX, Y = make_multilabel_classification(n_classes=2, n_labels=1,\n allow_unlabeled=False,\n return_indicator=True,\n random_state=1)\n\nplot_subfigure(X, Y, 3, \"Without unlabeled samples + CCA\", \"cca\")\nplot_subfigure(X, Y, 4, \"Without unlabeled samples + PCA\", \"pca\")\n\nplt.subplots_adjust(.04, .02, .97, .94, .09, .2)\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382636,"cells":{"repo_name":{"kind":"string","value":"andaag/scikit-learn"},"path":{"kind":"string","value":"examples/plot_johnson_lindenstrauss_bound.py"},"copies":{"kind":"string","value":"134"},"size":{"kind":"string","value":"7452"},"content":{"kind":"string","value":"\"\"\"\n=====================================================================\nThe Johnson-Lindenstrauss bound for embedding with random projections\n=====================================================================\n\n\nThe `Johnson-Lindenstrauss lemma`_ states that any high dimensional\ndataset can be randomly projected into a lower dimensional Euclidean\nspace while controlling the distortion in the pairwise distances.\n\n.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma\n\n\nTheoretical bounds\n==================\n\nThe distortion introduced by a random projection `p` is asserted by\nthe fact that `p` is defining an eps-embedding with good probability\nas defined by:\n\n (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2\n\nWhere u and v are any rows taken from a dataset of shape [n_samples,\nn_features] and p is a projection by a random Gaussian N(0, 1) matrix\nwith shape [n_components, n_features] (or a sparse Achlioptas matrix).\n\nThe minimum number of components to guarantees the eps-embedding is\ngiven by:\n\n n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)\n\n\nThe first plot shows that with an increasing number of samples ``n_samples``,\nthe minimal number of dimensions ``n_components`` increased logarithmically\nin order to guarantee an ``eps``-embedding.\n\nThe second plot shows that an increase of the admissible\ndistortion ``eps`` allows to reduce drastically the minimal number of\ndimensions ``n_components`` for a given number of samples ``n_samples``\n\n\nEmpirical validation\n====================\n\nWe validate the above bounds on the the digits dataset or on the 20 newsgroups\ntext document (TF-IDF word frequencies) dataset:\n\n- for the digits dataset, some 8x8 gray level pixels data for 500\n handwritten digits pictures are randomly projected to spaces for various\n larger number of dimensions ``n_components``.\n\n- for the 20 newsgroups dataset some 500 documents with 100k\n features in total are projected using a sparse random matrix to smaller\n euclidean spaces with various values for the target number of dimensions\n ``n_components``.\n\nThe default dataset is the digits dataset. To run the example on the twenty\nnewsgroups dataset, pass the --twenty-newsgroups command line argument to this\nscript.\n\nFor each value of ``n_components``, we plot:\n\n- 2D distribution of sample pairs with pairwise distances in original\n and projected spaces as x and y axis respectively.\n\n- 1D histogram of the ratio of those distances (projected / original).\n\nWe can see that for low values of ``n_components`` the distribution is wide\nwith many distorted pairs and a skewed distribution (due to the hard\nlimit of zero ratio on the left as distances are always positives)\nwhile for larger values of n_components the distortion is controlled\nand the distances are well preserved by the random projection.\n\n\nRemarks\n=======\n\nAccording to the JL lemma, projecting 500 samples without too much distortion\nwill require at least several thousands dimensions, irrespective of the\nnumber of features of the original dataset.\n\nHence using random projections on the digits dataset which only has 64 features\nin the input space does not make sense: it does not allow for dimensionality\nreduction in this case.\n\nOn the twenty newsgroups on the other hand the dimensionality can be decreased\nfrom 56436 down to 10000 while reasonably preserving pairwise distances.\n\n\"\"\"\nprint(__doc__)\n\nimport sys\nfrom time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.random_projection import johnson_lindenstrauss_min_dim\nfrom sklearn.random_projection import SparseRandomProjection\nfrom sklearn.datasets import fetch_20newsgroups_vectorized\nfrom sklearn.datasets import load_digits\nfrom sklearn.metrics.pairwise import euclidean_distances\n\n# Part 1: plot the theoretical dependency between n_components_min and\n# n_samples\n\n# range of admissible distortions\neps_range = np.linspace(0.1, 0.99, 5)\ncolors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))\n\n# range of number of samples (observation) to embed\nn_samples_range = np.logspace(1, 9, 9)\n\nplt.figure()\nfor eps, color in zip(eps_range, colors):\n min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)\n plt.loglog(n_samples_range, min_n_components, color=color)\n\nplt.legend([\"eps = %0.1f\" % eps for eps in eps_range], loc=\"lower right\")\nplt.xlabel(\"Number of observations to eps-embed\")\nplt.ylabel(\"Minimum number of dimensions\")\nplt.title(\"Johnson-Lindenstrauss bounds:\\nn_samples vs n_components\")\n\n# range of admissible distortions\neps_range = np.linspace(0.01, 0.99, 100)\n\n# range of number of samples (observation) to embed\nn_samples_range = np.logspace(2, 6, 5)\ncolors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))\n\nplt.figure()\nfor n_samples, color in zip(n_samples_range, colors):\n min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)\n plt.semilogy(eps_range, min_n_components, color=color)\n\nplt.legend([\"n_samples = %d\" % n for n in n_samples_range], loc=\"upper right\")\nplt.xlabel(\"Distortion eps\")\nplt.ylabel(\"Minimum number of dimensions\")\nplt.title(\"Johnson-Lindenstrauss bounds:\\nn_components vs eps\")\n\n# Part 2: perform sparse random projection of some digits images which are\n# quite low dimensional and dense or documents of the 20 newsgroups dataset\n# which is both high dimensional and sparse\n\nif '--twenty-newsgroups' in sys.argv:\n # Need an internet connection hence not enabled by default\n data = fetch_20newsgroups_vectorized().data[:500]\nelse:\n data = load_digits().data[:500]\n\nn_samples, n_features = data.shape\nprint(\"Embedding %d samples with dim %d using various random projections\"\n % (n_samples, n_features))\n\nn_components_range = np.array([300, 1000, 10000])\ndists = euclidean_distances(data, squared=True).ravel()\n\n# select only non-identical samples pairs\nnonzero = dists != 0\ndists = dists[nonzero]\n\nfor n_components in n_components_range:\n t0 = time()\n rp = SparseRandomProjection(n_components=n_components)\n projected_data = rp.fit_transform(data)\n print(\"Projected %d samples from %d to %d in %0.3fs\"\n % (n_samples, n_features, n_components, time() - t0))\n if hasattr(rp, 'components_'):\n n_bytes = rp.components_.data.nbytes\n n_bytes += rp.components_.indices.nbytes\n print(\"Random matrix with size: %0.3fMB\" % (n_bytes / 1e6))\n\n projected_dists = euclidean_distances(\n projected_data, squared=True).ravel()[nonzero]\n\n plt.figure()\n plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)\n plt.xlabel(\"Pairwise squared distances in original space\")\n plt.ylabel(\"Pairwise squared distances in projected space\")\n plt.title(\"Pairwise distances distribution for n_components=%d\" %\n n_components)\n cb = plt.colorbar()\n cb.set_label('Sample pairs counts')\n\n rates = projected_dists / dists\n print(\"Mean distances rate: %0.2f (%0.2f)\"\n % (np.mean(rates), np.std(rates)))\n\n plt.figure()\n plt.hist(rates, bins=50, normed=True, range=(0., 2.))\n plt.xlabel(\"Squared distances rate: projected / original\")\n plt.ylabel(\"Distribution of samples pairs\")\n plt.title(\"Histogram of pairwise distance rates for n_components=%d\" %\n n_components)\n\n # TODO: compute the expected value of eps and add them to the previous plot\n # as vertical lines / region\n\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382637,"cells":{"repo_name":{"kind":"string","value":"smorante/continuous-goal-directed-actions"},"path":{"kind":"string","value":"demonstration-feature-selection/src/alternatives/main_dtw_mds_dbscan.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3384"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Santiago Morante\nRobotics Lab. Universidad Carlos III de Madrid\n\"\"\"\n########################## DTW ####################################\nimport libmddtw\nimport matplotlib.pyplot as plt\nfrom dtw import dtw\n########################## MDS ####################################\nimport numpy as np\nfrom sklearn.metrics import euclidean_distances\nimport libmds\n\n########################## DBSCAN ####################################\nimport libdbscan\nfrom sklearn.preprocessing import StandardScaler # to normalize\n\nimport glob\nfrom sklearn import preprocessing\n\nEXPERIMENT = \"experiment-1\"\nPATH = \"../datasets/\" + EXPERIMENT +\"/raw/*.csv\"\n\ndef normalize(X):\n return StandardScaler().fit_transform(X)\n\ndef standardize(X):\n return preprocessing.scale(X)\n\ndef main():\n \n demons=[]\n demoNames = sorted(glob.glob(PATH))\n print demoNames\n\n for elem in demoNames:\n tmp = np.loadtxt(elem)\n tmp_clean = tmp[:,1:]\n tmp_clean = standardize(tmp_clean)\n demons.append(tmp_clean)\n \n dist=np.zeros((len(demoNames),len(demoNames)))\n \n ##########################################################################\n ########################## DTW ####################################\n ########################################################################## \n\n\n \n # fill distance matrix \n for i in range(len(demoNames)):\n for j in range(len(demoNames)):\n\n mddtw = libmddtw.Mddtw()\n x,y = mddtw.collapseRows(demons[i],demons[j])\n \n #fig = plt.figure()\n #plt.plot(x)\n #plt.plot(y)\n singleDist, singleCost, singlePath = mddtw.compute(demons[i],demons[j])\n dist[i][j]=singleDist\n # print 'Minimum distance found:', singleDist\n #fig = plt.figure()\n \n # plt.imshow(cost.T, origin='lower', cmap=plt.cm.gray, interpolation='nearest')\n # plt.plot(path[0], path[1], 'w')\n # plt.xlim((-0.5, cost.shape[0]-0.5))\n # plt.ylim((-0.5, cost.shape[1]-0.5))\n \n # print \"dist\", dist\n ###########################################################################\n ########################### MDS ####################################\n ###########################################################################\n\n md = libmds.Mds()\n md.create(n_components=2, metric=True, max_iter=3000, eps=1e-12, random_state=None,\n dissimilarity=\"precomputed\", n_jobs=-1, n_init=100)\n\n points = md.compute(dist)\n print \"points\", points\n # md.plot()\n \n \n \n \n ##########################################################################\n ########################## DBSCAN ####################################\n ##########################################################################\n # normalize\n normalizedPoints = normalize(points)\n \n # ########################## dbscan\n db = libdbscan.Dbscan()\n db.create(eps=1.5, min_samples=2)\n db.compute(normalizedPoints)\n db.plot()\n print \"[INFO] Detected outliers: \", db.getOutliers()\n \n\n##############################################################################\n##############################################################################\n\nif __name__ == \"__main__\":\n main()"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382638,"cells":{"repo_name":{"kind":"string","value":"sandeepgupta2k4/tensorflow"},"path":{"kind":"string","value":"tensorflow/examples/learn/iris.py"},"copies":{"kind":"string","value":"35"},"size":{"kind":"string","value":"1654"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example of DNNClassifier for Iris plant dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn import model_selection\n\nimport tensorflow as tf\n\n\ndef main(unused_argv):\n # Load dataset.\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = model_selection.train_test_split(\n iris.data, iris.target, test_size=0.2, random_state=42)\n\n # Build 3 layer DNN with 10, 20, 10 units respectively.\n feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(\n x_train)\n classifier = tf.contrib.learn.DNNClassifier(\n feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)\n\n # Fit and predict.\n classifier.fit(x_train, y_train, steps=200)\n predictions = list(classifier.predict(x_test, as_iterable=True))\n score = metrics.accuracy_score(y_test, predictions)\n print('Accuracy: {0:f}'.format(score))\n\n\nif __name__ == '__main__':\n tf.app.run()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382639,"cells":{"repo_name":{"kind":"string","value":"raincoatrun/ThinkStats2"},"path":{"kind":"string","value":"code/chap13soln.py"},"copies":{"kind":"string","value":"68"},"size":{"kind":"string","value":"2961"},"content":{"kind":"string","value":"\"\"\"This file contains code for use with \"Think Stats\",\nby Allen B. Downey, available from greenteapress.com\n\nCopyright 2014 Allen B. Downey\nLicense: GNU GPLv3 http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\nfrom __future__ import print_function\n\nimport pandas\nimport numpy as np\n\nimport thinkplot\nimport thinkstats2\nimport survival\n\n\ndef CleanData(resp):\n \"\"\"Cleans respondent data.\n\n resp: DataFrame\n \"\"\"\n resp.cmdivorcx.replace([9998, 9999], np.nan, inplace=True)\n\n resp['notdivorced'] = resp.cmdivorcx.isnull().astype(int)\n resp['duration'] = (resp.cmdivorcx - resp.cmmarrhx) / 12.0\n resp['durationsofar'] = (resp.cmintvw - resp.cmmarrhx) / 12.0\n\n month0 = pandas.to_datetime('1899-12-15')\n dates = [month0 + pandas.DateOffset(months=cm) \n for cm in resp.cmbirth]\n resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10\n\n\ndef ResampleDivorceCurve(resps):\n \"\"\"Plots divorce curves based on resampled data.\n\n resps: list of respondent DataFrames\n \"\"\"\n for _ in range(41):\n samples = [thinkstats2.ResampleRowsWeighted(resp) \n for resp in resps]\n sample = pandas.concat(samples, ignore_index=True)\n PlotDivorceCurveByDecade(sample, color='#225EA8', alpha=0.1)\n\n thinkplot.Show(xlabel='years',\n axis=[0, 28, 0, 1])\n\n\ndef ResampleDivorceCurveByDecade(resps):\n \"\"\"Plots divorce curves for each birth cohort.\n\n resps: list of respondent DataFrames \n \"\"\"\n for i in range(41):\n samples = [thinkstats2.ResampleRowsWeighted(resp) \n for resp in resps]\n sample = pandas.concat(samples, ignore_index=True)\n groups = sample.groupby('decade')\n if i == 0:\n survival.AddLabelsByDecade(groups, alpha=0.7)\n\n EstimateSurvivalByDecade(groups, alpha=0.1)\n\n thinkplot.Save(root='survival7',\n xlabel='years',\n axis=[0, 28, 0, 1])\n\n\ndef EstimateSurvivalByDecade(groups, **options):\n \"\"\"Groups respondents by decade and plots survival curves.\n\n groups: GroupBy object\n \"\"\"\n thinkplot.PrePlot(len(groups))\n for name, group in groups:\n print(name, len(group))\n _, sf = EstimateSurvival(group)\n thinkplot.Plot(sf, **options)\n\n\ndef EstimateSurvival(resp):\n \"\"\"Estimates the survival curve.\n\n resp: DataFrame of respondents\n\n returns: pair of HazardFunction, SurvivalFunction\n \"\"\"\n complete = resp[resp.notdivorced == 0].duration\n ongoing = resp[resp.notdivorced == 1].durationsofar\n\n hf = survival.EstimateHazardFunction(complete, ongoing)\n sf = hf.MakeSurvival()\n\n return hf, sf\n\n\ndef main():\n resp6 = survival.ReadFemResp2002()\n CleanData(resp6)\n married6 = resp6[resp6.evrmarry==1]\n\n resp7 = survival.ReadFemResp2010()\n CleanData(resp7)\n married7 = resp7[resp7.evrmarry==1]\n\n ResampleDivorceCurveByDecade([married6, married7])\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382640,"cells":{"repo_name":{"kind":"string","value":"mistercrunch/panoramix"},"path":{"kind":"string","value":"superset/db_engine_specs/hive.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"20297"},"content":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport logging\nimport os\nimport re\nimport tempfile\nimport time\nfrom datetime import datetime\nfrom typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING\nfrom urllib import parse\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom flask import current_app, g\nfrom sqlalchemy import Column, text\nfrom sqlalchemy.engine.base import Engine\nfrom sqlalchemy.engine.reflection import Inspector\nfrom sqlalchemy.engine.url import make_url, URL\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql.expression import ColumnClause, Select\n\nfrom superset.db_engine_specs.base import BaseEngineSpec\nfrom superset.db_engine_specs.presto import PrestoEngineSpec\nfrom superset.exceptions import SupersetException\nfrom superset.extensions import cache_manager\nfrom superset.models.sql_lab import Query\nfrom superset.sql_parse import ParsedQuery, Table\nfrom superset.utils import core as utils\n\nif TYPE_CHECKING:\n # prevent circular imports\n from superset.models.core import Database\n\n\nQueryStatus = utils.QueryStatus\nlogger = logging.getLogger(__name__)\n\n\ndef upload_to_s3(filename: str, upload_prefix: str, table: Table) -> str:\n \"\"\"\n Upload the file to S3.\n\n :param filename: The file to upload\n :param upload_prefix: The S3 prefix\n :param table: The table that will be created\n :returns: The S3 location of the table\n \"\"\"\n\n # Optional dependency\n import boto3 # pylint: disable=import-error\n\n bucket_path = current_app.config[\"CSV_TO_HIVE_UPLOAD_S3_BUCKET\"]\n\n if not bucket_path:\n logger.info(\"No upload bucket specified\")\n raise Exception(\n \"No upload bucket specified. You can specify one in the config file.\"\n )\n\n s3 = boto3.client(\"s3\")\n location = os.path.join(\"s3a://\", bucket_path, upload_prefix, table.table)\n s3.upload_file(\n filename,\n bucket_path,\n os.path.join(upload_prefix, table.table, os.path.basename(filename)),\n )\n return location\n\n\nclass HiveEngineSpec(PrestoEngineSpec):\n \"\"\"Reuses PrestoEngineSpec functionality.\"\"\"\n\n engine = \"hive\"\n engine_name = \"Apache Hive\"\n max_column_name_length = 767\n allows_alias_to_source_column = True\n allows_hidden_ordeby_agg = False\n\n # When running `SHOW FUNCTIONS`, what is the name of the column with the\n # function names?\n _show_functions_column = \"tab_name\"\n\n # pylint: disable=line-too-long\n _time_grain_expressions = {\n None: \"{col}\",\n \"PT1S\": \"from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:mm:ss')\",\n \"PT1M\": \"from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:mm:00')\",\n \"PT1H\": \"from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:00:00')\",\n \"P1D\": \"from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd 00:00:00')\",\n \"P1W\": \"date_format(date_sub({col}, CAST(7-from_unixtime(unix_timestamp({col}),'u') as int)), 'yyyy-MM-dd 00:00:00')\",\n \"P1M\": \"from_unixtime(unix_timestamp({col}), 'yyyy-MM-01 00:00:00')\",\n \"P0.25Y\": \"date_format(add_months(trunc({col}, 'MM'), -(month({col})-1)%3), 'yyyy-MM-dd 00:00:00')\",\n \"P1Y\": \"from_unixtime(unix_timestamp({col}), 'yyyy-01-01 00:00:00')\",\n \"P1W/1970-01-03T00:00:00Z\": \"date_format(date_add({col}, INT(6-from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')\",\n \"1969-12-28T00:00:00Z/P1W\": \"date_format(date_add({col}, -INT(from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')\",\n }\n\n # Scoping regex at class level to avoid recompiling\n # 17/02/07 19:36:38 INFO ql.Driver: Total jobs = 5\n jobs_stats_r = re.compile(r\".*INFO.*Total jobs = (?P[0-9]+)\")\n # 17/02/07 19:37:08 INFO ql.Driver: Launching Job 2 out of 5\n launching_job_r = re.compile(\n \".*INFO.*Launching Job (?P[0-9]+) out of \" \"(?P[0-9]+)\"\n )\n # 17/02/07 19:36:58 INFO exec.Task: 2017-02-07 19:36:58,152 Stage-18\n # map = 0%, reduce = 0%\n stage_progress_r = re.compile(\n r\".*INFO.*Stage-(?P[0-9]+).*\"\n r\"map = (?P[0-9]+)%.*\"\n r\"reduce = (?P[0-9]+)%.*\"\n )\n\n @classmethod\n def patch(cls) -> None:\n from pyhive import hive\n from TCLIService import (\n constants as patched_constants,\n TCLIService as patched_TCLIService,\n ttypes as patched_ttypes,\n )\n\n from superset.db_engines import hive as patched_hive\n\n hive.TCLIService = patched_TCLIService\n hive.constants = patched_constants\n hive.ttypes = patched_ttypes\n hive.Cursor.fetch_logs = patched_hive.fetch_logs\n\n @classmethod\n def get_all_datasource_names(\n cls, database: \"Database\", datasource_type: str\n ) -> List[utils.DatasourceName]:\n return BaseEngineSpec.get_all_datasource_names(database, datasource_type)\n\n @classmethod\n def fetch_data(\n cls, cursor: Any, limit: Optional[int] = None\n ) -> List[Tuple[Any, ...]]:\n import pyhive\n from TCLIService import ttypes\n\n state = cursor.poll()\n if state.operationState == ttypes.TOperationState.ERROR_STATE:\n raise Exception(\"Query error\", state.errorMessage)\n try:\n return super().fetch_data(cursor, limit)\n except pyhive.exc.ProgrammingError:\n return []\n\n @classmethod\n def df_to_sql(\n cls,\n database: \"Database\",\n table: Table,\n df: pd.DataFrame,\n to_sql_kwargs: Dict[str, Any],\n ) -> None:\n \"\"\"\n Upload data from a Pandas DataFrame to a database.\n\n The data is stored via the binary Parquet format which is both less problematic\n and more performant than a text file. More specifically storing a table as a\n CSV text file has severe limitations including the fact that the Hive CSV SerDe\n does not support multiline fields.\n\n Note this method does not create metadata for the table.\n\n :param database: The database to upload the data to\n :param: table The table to upload the data to\n :param df: The dataframe with data to be uploaded\n :param to_sql_kwargs: The kwargs to be passed to pandas.DataFrame.to_sql` method\n \"\"\"\n\n engine = cls.get_engine(database)\n\n if to_sql_kwargs[\"if_exists\"] == \"append\":\n raise SupersetException(\"Append operation not currently supported\")\n\n if to_sql_kwargs[\"if_exists\"] == \"fail\":\n\n # Ensure table doesn't already exist.\n if table.schema:\n table_exists = not database.get_df(\n f\"SHOW TABLES IN {table.schema} LIKE '{table.table}'\"\n ).empty\n else:\n table_exists = not database.get_df(\n f\"SHOW TABLES LIKE '{table.table}'\"\n ).empty\n\n if table_exists:\n raise SupersetException(\"Table already exists\")\n elif to_sql_kwargs[\"if_exists\"] == \"replace\":\n engine.execute(f\"DROP TABLE IF EXISTS {str(table)}\")\n\n def _get_hive_type(dtype: np.dtype) -> str:\n hive_type_by_dtype = {\n np.dtype(\"bool\"): \"BOOLEAN\",\n np.dtype(\"float64\"): \"DOUBLE\",\n np.dtype(\"int64\"): \"BIGINT\",\n np.dtype(\"object\"): \"STRING\",\n }\n\n return hive_type_by_dtype.get(dtype, \"STRING\")\n\n schema_definition = \", \".join(\n f\"`{name}` {_get_hive_type(dtype)}\" for name, dtype in df.dtypes.items()\n )\n\n with tempfile.NamedTemporaryFile(\n dir=current_app.config[\"UPLOAD_FOLDER\"], suffix=\".parquet\"\n ) as file:\n pq.write_table(pa.Table.from_pandas(df), where=file.name)\n\n engine.execute(\n text(\n f\"\"\"\n CREATE TABLE {str(table)} ({schema_definition})\n STORED AS PARQUET\n LOCATION :location\n \"\"\"\n ),\n location=upload_to_s3(\n filename=file.name,\n upload_prefix=current_app.config[\n \"CSV_TO_HIVE_UPLOAD_DIRECTORY_FUNC\"\n ](database, g.user, table.schema),\n table=table,\n ),\n )\n\n @classmethod\n def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:\n tt = target_type.upper()\n if tt == utils.TemporalType.DATE:\n return f\"CAST('{dttm.date().isoformat()}' AS DATE)\"\n if tt == utils.TemporalType.TIMESTAMP:\n return f\"\"\"CAST('{dttm\n .isoformat(sep=\" \", timespec=\"microseconds\")}' AS TIMESTAMP)\"\"\"\n return None\n\n @classmethod\n def adjust_database_uri(\n cls, uri: URL, selected_schema: Optional[str] = None\n ) -> None:\n if selected_schema:\n uri.database = parse.quote(selected_schema, safe=\"\")\n\n @classmethod\n def _extract_error_message(cls, ex: Exception) -> str:\n msg = str(ex)\n match = re.search(r'errorMessage=\"(.*?)(? int:\n total_jobs = 1 # assuming there's at least 1 job\n current_job = 1\n stages: Dict[int, float] = {}\n for line in log_lines:\n match = cls.jobs_stats_r.match(line)\n if match:\n total_jobs = int(match.groupdict()[\"max_jobs\"]) or 1\n match = cls.launching_job_r.match(line)\n if match:\n current_job = int(match.groupdict()[\"job_number\"])\n total_jobs = int(match.groupdict()[\"max_jobs\"]) or 1\n stages = {}\n match = cls.stage_progress_r.match(line)\n if match:\n stage_number = int(match.groupdict()[\"stage_number\"])\n map_progress = int(match.groupdict()[\"map_progress\"])\n reduce_progress = int(match.groupdict()[\"reduce_progress\"])\n stages[stage_number] = (map_progress + reduce_progress) / 2\n logger.info(\n \"Progress detail: {}, \" # pylint: disable=logging-format-interpolation\n \"current job {}, \"\n \"total jobs: {}\".format(stages, current_job, total_jobs)\n )\n\n stage_progress = sum(stages.values()) / len(stages.values()) if stages else 0\n\n progress = 100 * (current_job - 1) / total_jobs + stage_progress / total_jobs\n return int(progress)\n\n @classmethod\n def get_tracking_url(cls, log_lines: List[str]) -> Optional[str]:\n lkp = \"Tracking URL = \"\n for line in log_lines:\n if lkp in line:\n return line.split(lkp)[1]\n return None\n\n @classmethod\n def handle_cursor( # pylint: disable=too-many-locals\n cls, cursor: Any, query: Query, session: Session\n ) -> None:\n \"\"\"Updates progress information\"\"\"\n from pyhive import hive\n\n unfinished_states = (\n hive.ttypes.TOperationState.INITIALIZED_STATE,\n hive.ttypes.TOperationState.RUNNING_STATE,\n )\n polled = cursor.poll()\n last_log_line = 0\n tracking_url = None\n job_id = None\n query_id = query.id\n while polled.operationState in unfinished_states:\n query = session.query(type(query)).filter_by(id=query_id).one()\n if query.status == QueryStatus.STOPPED:\n cursor.cancel()\n break\n\n log = cursor.fetch_logs() or \"\"\n if log:\n log_lines = log.splitlines()\n progress = cls.progress(log_lines)\n logger.info(\n \"Query %s: Progress total: %s\", str(query_id), str(progress)\n )\n needs_commit = False\n if progress > query.progress:\n query.progress = progress\n needs_commit = True\n if not tracking_url:\n tracking_url = cls.get_tracking_url(log_lines)\n if tracking_url:\n job_id = tracking_url.split(\"/\")[-2]\n logger.info(\n \"Query %s: Found the tracking url: %s\",\n str(query_id),\n tracking_url,\n )\n tracking_url = current_app.config[\"TRACKING_URL_TRANSFORMER\"]\n logger.info(\n \"Query %s: Transformation applied: %s\",\n str(query_id),\n tracking_url,\n )\n query.tracking_url = tracking_url\n logger.info(\"Query %s: Job id: %s\", str(query_id), str(job_id))\n needs_commit = True\n if job_id and len(log_lines) > last_log_line:\n # Wait for job id before logging things out\n # this allows for prefixing all log lines and becoming\n # searchable in something like Kibana\n for l in log_lines[last_log_line:]:\n logger.info(\"Query %s: [%s] %s\", str(query_id), str(job_id), l)\n last_log_line = len(log_lines)\n if needs_commit:\n session.commit()\n time.sleep(current_app.config[\"HIVE_POLL_INTERVAL\"])\n polled = cursor.poll()\n\n @classmethod\n def get_columns(\n cls, inspector: Inspector, table_name: str, schema: Optional[str]\n ) -> List[Dict[str, Any]]:\n return inspector.get_columns(table_name, schema)\n\n @classmethod\n def where_latest_partition( # pylint: disable=too-many-arguments\n cls,\n table_name: str,\n schema: Optional[str],\n database: \"Database\",\n query: Select,\n columns: Optional[List[Dict[str, str]]] = None,\n ) -> Optional[Select]:\n try:\n col_names, values = cls.latest_partition(\n table_name, schema, database, show_first=True\n )\n except Exception: # pylint: disable=broad-except\n # table is not partitioned\n return None\n if values is not None and columns is not None:\n for col_name, value in zip(col_names, values):\n for clm in columns:\n if clm.get(\"name\") == col_name:\n query = query.where(Column(col_name) == value)\n\n return query\n return None\n\n @classmethod\n def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]:\n return BaseEngineSpec._get_fields(cols) # pylint: disable=protected-access\n\n @classmethod\n def latest_sub_partition(\n cls, table_name: str, schema: Optional[str], database: \"Database\", **kwargs: Any\n ) -> str:\n # TODO(bogdan): implement`\n pass\n\n @classmethod\n def _latest_partition_from_df(cls, df: pd.DataFrame) -> Optional[List[str]]:\n \"\"\"Hive partitions look like ds={partition name}\"\"\"\n if not df.empty:\n return [df.ix[:, 0].max().split(\"=\")[1]]\n return None\n\n @classmethod\n def _partition_query( # pylint: disable=too-many-arguments\n cls,\n table_name: str,\n database: \"Database\",\n limit: int = 0,\n order_by: Optional[List[Tuple[str, bool]]] = None,\n filters: Optional[Dict[Any, Any]] = None,\n ) -> str:\n return f\"SHOW PARTITIONS {table_name}\"\n\n @classmethod\n def select_star( # pylint: disable=too-many-arguments\n cls,\n database: \"Database\",\n table_name: str,\n engine: Engine,\n schema: Optional[str] = None,\n limit: int = 100,\n show_cols: bool = False,\n indent: bool = True,\n latest_partition: bool = True,\n cols: Optional[List[Dict[str, Any]]] = None,\n ) -> str:\n return super( # pylint: disable=bad-super-call\n PrestoEngineSpec, cls\n ).select_star(\n database,\n table_name,\n engine,\n schema,\n limit,\n show_cols,\n indent,\n latest_partition,\n cols,\n )\n\n @classmethod\n def modify_url_for_impersonation(\n cls, url: URL, impersonate_user: bool, username: Optional[str]\n ) -> None:\n \"\"\"\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Flag indicating if impersonation is enabled\n :param username: Effective username\n \"\"\"\n # Do nothing in the URL object since instead this should modify\n # the configuraiton dictionary. See get_configuration_for_impersonation\n\n @classmethod\n def update_impersonation_config(\n cls, connect_args: Dict[str, Any], uri: str, username: Optional[str],\n ) -> None:\n \"\"\"\n Update a configuration dictionary\n that can set the correct properties for impersonating users\n :param connect_args:\n :param uri: URI string\n :param impersonate_user: Flag indicating if impersonation is enabled\n :param username: Effective username\n :return: None\n \"\"\"\n url = make_url(uri)\n backend_name = url.get_backend_name()\n\n # Must be Hive connection, enable impersonation, and set optional param\n # auth=LDAP|KERBEROS\n # this will set hive.server2.proxy.user=$effective_username on connect_args['configuration']\n if backend_name == \"hive\" and username is not None:\n configuration = connect_args.get(\"configuration\", {})\n configuration[\"hive.server2.proxy.user\"] = username\n connect_args[\"configuration\"] = configuration\n\n @staticmethod\n def execute( # type: ignore\n cursor, query: str, async_: bool = False\n ): # pylint: disable=arguments-differ\n kwargs = {\"async\": async_}\n cursor.execute(query, **kwargs)\n\n @classmethod\n @cache_manager.cache.memoize()\n def get_function_names(cls, database: \"Database\") -> List[str]:\n \"\"\"\n Get a list of function names that are able to be called on the database.\n Used for SQL Lab autocomplete.\n\n :param database: The database to get functions for\n :return: A list of function names useable in the database\n \"\"\"\n df = database.get_df(\"SHOW FUNCTIONS\")\n if cls._show_functions_column in df:\n return df[cls._show_functions_column].tolist()\n\n columns = df.columns.values.tolist()\n logger.error(\n \"Payload from `SHOW FUNCTIONS` has the incorrect format. \"\n \"Expected column `%s`, found: %s.\",\n cls._show_functions_column,\n \", \".join(columns),\n exc_info=True,\n )\n # if the results have a single column, use that\n if len(columns) == 1:\n return df[columns[0]].tolist()\n\n # otherwise, return no function names to prevent errors\n return []\n\n @classmethod\n def is_readonly_query(cls, parsed_query: ParsedQuery) -> bool:\n \"\"\"Pessimistic readonly, 100% sure statement won't mutate anything\"\"\"\n return (\n super().is_readonly_query(parsed_query)\n or parsed_query.is_set()\n or parsed_query.is_show()\n )\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382641,"cells":{"repo_name":{"kind":"string","value":"aurelieladier/openturns"},"path":{"kind":"string","value":"validation/src/optimal_lhs/validate_MC_small.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"1877"},"content":{"kind":"string","value":"#! /usr/bin/env python\n\nimport openturns as ot\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom openturns.viewer import View\nimport time\n\not.Log.Show(ot.Log.INFO)\n\n# Bounds are [0,1]^dimension\ndimension = 2\nbounds = ot.Interval(dimension)\n\nnSimu = 10000\n\nc2 = ot.SpaceFillingC2()\n# Size of sample\nsize = 10\n\nprint(\"dimension=%d, size=%d\"%(dimension, size))\nfor nSimu in [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200, 102400, 204800, 409600]:\n ot.RandomGenerator.SetSeed(0)\n # Factory: lhs generates\n lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size)\n lhsDesign.setAlwaysShuffle(True) # randomized\n\n mc = ot.MonteCarloLHS(lhsDesign, nSimu, c2)\n tic = time.time()\n design = mc.generate()\n result = mc.getResult()\n toc = time.time()\n print(\"%d %f %f\"%(nSimu,result.getOptimalValue(), toc-tic))\n\npp = PdfPages('small_mc_OTLHS.pdf')\n# plot criterion & save it\ncrit = result.drawHistoryCriterion()\nfig = View(crit, plot_kwargs={'color':'blue'}).getFigure()\npp.savefig(fig)\nplt.close(fig)\n \nminDist = ot.SpaceFillingMinDist()\n\n# Factory: lhs generates\nlhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size)\nlhsDesign.setAlwaysShuffle(True) # randomized\n\nmc = ot.MonteCarloLHS(lhsDesign, nSimu, minDist)\ntic = time.time()\ndesign = mc.generate()\nresult = mc.getResult()\ntoc = time.time()\nprint(\"cpu time=%f\"%(toc-tic))\nprint(\"dimension=%d, size=%d,mc=%s\"%(dimension, size, mc))\nprint(\"optimal value=\"+ str(result.getOptimalValue())+\" c2=\"+str(result.getC2())+\" phiP=\"+str(result.getPhiP())+\" minDist=\"+str(result.getMinDist()))\n# plot criterion & save it\ncrit = result.drawHistoryCriterion()\nfig = View(crit, plot_kwargs={'color':'blue'}).getFigure()\npp.savefig(fig)\nplt.close(fig)\n\npp.close()\n"},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":382642,"cells":{"repo_name":{"kind":"string","value":"ueshin/apache-spark"},"path":{"kind":"string","value":"python/pyspark/pandas/tests/test_spark_functions.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"2127"},"content":{"kind":"string","value":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.utils import spark_column_equals\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import (\n ByteType,\n FloatType,\n IntegerType,\n LongType,\n)\nfrom pyspark.testing.pandasutils import PandasOnSparkTestCase\n\n\nclass SparkFunctionsTests(PandasOnSparkTestCase):\n def test_lit(self):\n self.assertTrue(spark_column_equals(SF.lit(np.int64(1)), F.lit(1).astype(LongType())))\n self.assertTrue(spark_column_equals(SF.lit(np.int32(1)), F.lit(1).astype(IntegerType())))\n self.assertTrue(spark_column_equals(SF.lit(np.int8(1)), F.lit(1).astype(ByteType())))\n self.assertTrue(spark_column_equals(SF.lit(np.byte(1)), F.lit(1).astype(ByteType())))\n self.assertTrue(\n spark_column_equals(SF.lit(np.float32(1)), F.lit(float(1)).astype(FloatType()))\n )\n self.assertTrue(spark_column_equals(SF.lit(1), F.lit(1)))\n\n\nif __name__ == \"__main__\":\n import unittest\n from pyspark.pandas.tests.test_spark_functions import * # noqa: F401\n\n try:\n import xmlrunner # type: ignore[import]\n\n testRunner = xmlrunner.XMLTestRunner(output=\"target/test-reports\", verbosity=2)\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382643,"cells":{"repo_name":{"kind":"string","value":"btabibian/scikit-learn"},"path":{"kind":"string","value":"examples/hetero_feature_union.py"},"copies":{"kind":"string","value":"81"},"size":{"kind":"string","value":"6241"},"content":{"kind":"string","value":"\"\"\"\n=============================================\nFeature Union with Heterogeneous Data Sources\n=============================================\n\nDatasets can often contain components of that require different feature\nextraction and processing pipelines. This scenario might occur when:\n\n1. Your dataset consists of heterogeneous data types (e.g. raster images and\n text captions)\n2. Your dataset is stored in a Pandas DataFrame and different columns\n require different processing pipelines.\n\nThis example demonstrates how to use\n:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing\ndifferent types of features. We use the 20-newsgroups dataset and compute\nstandard bag-of-words features for the subject line and body in separate\npipelines as well as ad hoc features on the body. We combine them (with\nweights) using a FeatureUnion and finally train a classifier on the combined\nset of features.\n\nThe choice of features is not particularly helpful, but serves to illustrate\nthe technique.\n\"\"\"\n\n# Author: Matt Terry \n#\n# License: BSD 3 clause\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer\nfrom sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import SVC\n\n\nclass ItemSelector(BaseEstimator, TransformerMixin):\n \"\"\"For data grouped by feature, select subset of data at a provided key.\n\n The data is expected to be stored in a 2D data structure, where the first\n index is over features and the second is over samples. i.e.\n\n >> len(data[key]) == n_samples\n\n Please note that this is the opposite convention to scikit-learn feature\n matrixes (where the first index corresponds to sample).\n\n ItemSelector only requires that the collection implement getitem\n (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas\n DataFrame, numpy record array, etc.\n\n >> data = {'a': [1, 5, 2, 5, 2, 8],\n 'b': [9, 4, 1, 4, 1, 3]}\n >> ds = ItemSelector(key='a')\n >> data['a'] == ds.transform(data)\n\n ItemSelector is not designed to handle data grouped by sample. (e.g. a\n list of dicts). If your data is structured this way, consider a\n transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.\n\n Parameters\n ----------\n key : hashable, required\n The key corresponding to the desired value in a mappable.\n \"\"\"\n def __init__(self, key):\n self.key = key\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, data_dict):\n return data_dict[self.key]\n\n\nclass TextStats(BaseEstimator, TransformerMixin):\n \"\"\"Extract features from each document for DictVectorizer\"\"\"\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, posts):\n return [{'length': len(text),\n 'num_sentences': text.count('.')}\n for text in posts]\n\n\nclass SubjectBodyExtractor(BaseEstimator, TransformerMixin):\n \"\"\"Extract the subject & body from a usenet post in a single pass.\n\n Takes a sequence of strings and produces a dict of sequences. Keys are\n `subject` and `body`.\n \"\"\"\n def fit(self, x, y=None):\n return self\n\n def transform(self, posts):\n features = np.recarray(shape=(len(posts),),\n dtype=[('subject', object), ('body', object)])\n for i, text in enumerate(posts):\n headers, _, bod = text.partition('\\n\\n')\n bod = strip_newsgroup_footer(bod)\n bod = strip_newsgroup_quoting(bod)\n features['body'][i] = bod\n\n prefix = 'Subject:'\n sub = ''\n for line in headers.split('\\n'):\n if line.startswith(prefix):\n sub = line[len(prefix):]\n break\n features['subject'][i] = sub\n\n return features\n\n\npipeline = Pipeline([\n # Extract the subject & body\n ('subjectbody', SubjectBodyExtractor()),\n\n # Use FeatureUnion to combine the features from subject and body\n ('union', FeatureUnion(\n transformer_list=[\n\n # Pipeline for pulling features from the post's subject line\n ('subject', Pipeline([\n ('selector', ItemSelector(key='subject')),\n ('tfidf', TfidfVectorizer(min_df=50)),\n ])),\n\n # Pipeline for standard bag-of-words model for body\n ('body_bow', Pipeline([\n ('selector', ItemSelector(key='body')),\n ('tfidf', TfidfVectorizer()),\n ('best', TruncatedSVD(n_components=50)),\n ])),\n\n # Pipeline for pulling ad hoc features from post's body\n ('body_stats', Pipeline([\n ('selector', ItemSelector(key='body')),\n ('stats', TextStats()), # returns a list of dicts\n ('vect', DictVectorizer()), # list of dicts -> feature matrix\n ])),\n\n ],\n\n # weight components in FeatureUnion\n transformer_weights={\n 'subject': 0.8,\n 'body_bow': 0.5,\n 'body_stats': 1.0,\n },\n )),\n\n # Use a SVC classifier on the combined features\n ('svc', SVC(kernel='linear')),\n])\n\n# limit the list of categories to make running this example faster.\ncategories = ['alt.atheism', 'talk.religion.misc']\ntrain = fetch_20newsgroups(random_state=1,\n subset='train',\n categories=categories,\n )\ntest = fetch_20newsgroups(random_state=1,\n subset='test',\n categories=categories,\n )\n\npipeline.fit(train.data, train.target)\ny = pipeline.predict(test.data)\nprint(classification_report(y, test.target))\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382644,"cells":{"repo_name":{"kind":"string","value":"wbengine/SPMILM"},"path":{"kind":"string","value":"egs/1-billion/run_trf_2.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6271"},"content":{"kind":"string","value":"import os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nsys.path.insert(0, os.getcwd() + '/../../tools/')\nimport wb\nimport trf\n\n\n# revise this function to config the dataset used to train different model\ndef data(tskdir):\n train = tskdir + 'data/train.txt'\n valid = tskdir + 'data/valid.txt'\n test = tskdir + 'data/test.txt'\n return data_verfy([train, valid, test]) + data_wsj92nbest()\n\n\ndef data_verfy(paths):\n for w in paths:\n if not os.path.isfile(w):\n print('[ERROR] no such file: ' + w)\n return paths\n\n\ndef data_wsj92nbest():\n root = './data/WSJ92-test-data/'\n nbest = root + '1000best.sent'\n trans = root + 'transcript.txt'\n ac = root + '1000best.acscore'\n lm = root + '1000best.lmscore'\n return data_verfy([nbest, trans, ac, lm])\n\n\ndef evaulate_trf(model, vocab, read_model, tsize, fres):\n res_name = '{}:'.format(int(tsize)) + os.path.split(read_model)[-1]\n tskdir = '{}/'.format(tsize)\n\n # rescore\n config = ' -vocab {} '.format(vocab)\n config += ' -read {}.model '.format(read_model)\n config += ' -nbest {} '.format(data(tskdir)[3])\n config += ' -lmscore {0}.lmscore'.format(read_model)\n model.use(config)\n # WER\n [read_nbest, read_templ, read_acscore, read_lmscore] = data(tskdir)[3:7]\n read_lmscore = read_model + '.lmscore'\n\n [wer, lmscale, acscale] = wb.TuneWER(read_nbest, read_templ,\n wb.LoadScore(read_lmscore),\n wb.LoadScore(read_acscore), np.linspace(0.1,0.9,9))\n print('wer={:.4f} lmscale={:.2f} acscale={:.2f}'.format(wer, lmscale, acscale))\n # calculate the ppl on wsj test\n templ_txt = model.workdir + os.path.split(read_templ)[-1] + '.rmlabel'\n wb.file_rmlabel(read_templ, templ_txt)\n PPL_templ = model.ppl(vocab, read_model+'.model', templ_txt)\n LL_templ = -wb.PPL2LL(PPL_templ, templ_txt)\n\n # output the result\n fres.Add(res_name, ['LL-wsj', 'PPL-wsj'], [LL_templ, PPL_templ])\n fres.AddWER(res_name, wer)\n\n\ndef main():\n if len(sys.argv) == 1:\n print('\\\"python run.py -train\\\" train LSTM\\n',\n '\\\"python run.py -rescore\\\" rescore nbest\\n',\n '\\\"python run.py -wer\\\" compute WER'\n )\n\n\n for tsize in [2]:\n bindir = '../../tools/trf/bin/'\n tskdir = '{}/'.format(tsize)\n workdir = tskdir + 'trflm/'\n\n fres = wb.FRes('result.txt')\n model = trf.model(bindir, workdir)\n\n class_num = 200\n train = workdir + 'train.id'\n valid = workdir + 'valid.id'\n test = workdir + 'test.id'\n vocab = workdir + 'vocab_c{}.list'.format(class_num)\n order = 4\n feat = 'g4_w_c_ws_cs_wsh_csh_tied.fs'\n #feat = 'g4_w_c_ws_cs_cpw.fs'\n maxlen = 100\n tmax = 50000\n t0 = 2000\n minibatch = 100\n gamma_lambda = '1000,0'\n gamma_zeta = '0,0.6'\n reg = 1e-5\n thread = 8\n\n write_model = workdir + 'trf_c{}_{}'.format(class_num, feat[0:-3])\n write_name = '{}:{}'.format(tsize, os.path.split(write_model)[1])\n\n if '-class' in sys.argv:\n # just cluster for each tsks.\n model.prepare(data(tskdir)[0], data(tskdir)[1], data(tskdir)[2], class_num)\n if '-train' in sys.argv or '-all' in sys.argv:\n config = '-vocab {} -train {} -valid {} -test {} '.format(vocab, train, valid, test)\n config += ' -order {} -feat {} '.format(order, feat)\n config += ' -len {} '.format(maxlen)\n config += ' -write {0}.model -log {0}.log '.format(write_model)\n config += ' -t0 {} -iter {}'.format(t0, tmax)\n config += ' -gamma-lambda {} -gamma-zeta {}'.format(gamma_lambda, gamma_zeta)\n config += ' -L2 {} '.format(reg)\n config += ' -mini-batch {} '.format(minibatch)\n config += ' -thread {} '.format(thread)\n config += ' -print-per-iter 10 -write-at-iter 10000:10000:{}'.format(tmax)\n model.prepare(data(tskdir)[0], data(tskdir)[1], data(tskdir)[2], class_num)\n model.train(config)\n # output\n LL = model.get_last_value(write_model + '.log')\n fres.AddLL(write_name, LL, data(tskdir)[0:3])\n if '-plot' in sys.argv:\n baseline = fres.Get('{}:KN5'.format(tsize))\n trf.PlotLog([write_model], [baseline])\n if '-rescore' in sys.argv or '-all' in sys.argv:\n config = ' -vocab {} '.format(vocab)\n config += ' -read {}.model '.format(write_model)\n config += ' -nbest {} '.format(data(tskdir)[3])\n config += ' -lmscore {0}.lmscore -lmscore-test-id {0}.test-id '.format(write_model)\n model.use(config)\n if '-wer' in sys.argv or '-all' in sys.argv:\n [read_nbest, read_templ, read_acscore, read_lmscore] = data(tskdir)[3:7]\n read_lmscore = write_model + '.lmscore'\n\n [wer, lmscale, acscale] = wb.TuneWER(read_nbest, read_templ,\n wb.LoadScore(read_lmscore),\n wb.LoadScore(read_acscore), np.linspace(0.1,0.9,9))\n print('wer={:.4f} lmscale={:.2f} acscale={:.2f}'.format(wer, lmscale, acscale))\n\n # calculate the ppl on wsj test\n write_templ_id = workdir + os.path.split(read_templ)[1] + '.id'\n v = trf.ReadVocab(vocab)\n trf.NbestToID(read_templ, write_templ_id, v)\n config = ' -vocab {} '.format(vocab)\n config += ' -read {}.model '.format(write_model)\n config += ' -test {} '.format(write_templ_id)\n LL_templ = model.use(config)\n PPL_templ = wb.LL2PPL(-LL_templ, write_templ_id)\n\n # output the result\n fres.Add(write_name, ['LL-wsj', 'PPL-wsj'], [LL_templ, PPL_templ])\n fres.AddWER(write_name, wer)\n if '-inter' in sys.argv:\n # calculate the WER for intermediate models\n for n in np.linspace(10000, 40000, 4):\n inter_model = workdir + 'trf_c{}_{}.n{}'.format(class_num, feat[0:-3], int(n))\n evaulate_trf(model, vocab, inter_model, tsize, fres)\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382645,"cells":{"repo_name":{"kind":"string","value":"plissonf/scikit-learn"},"path":{"kind":"string","value":"examples/svm/plot_svm_scale_c.py"},"copies":{"kind":"string","value":"223"},"size":{"kind":"string","value":"5375"},"content":{"kind":"string","value":"\"\"\"\n==============================================\nScaling the regularization parameter for SVCs\n==============================================\n\nThe following example illustrates the effect of scaling the\nregularization parameter when using :ref:`svm` for\n:ref:`classification `.\nFor SVC classification, we are interested in a risk minimization for the\nequation:\n\n\n.. math::\n\n C \\sum_{i=1, n} \\mathcal{L} (f(x_i), y_i) + \\Omega (w)\n\nwhere\n\n - :math:`C` is used to set the amount of regularization\n - :math:`\\mathcal{L}` is a `loss` function of our samples\n and our model parameters.\n - :math:`\\Omega` is a `penalty` function of our model parameters\n\nIf we consider the loss function to be the individual error per\nsample, then the data-fit term, or the sum of the error for each sample, will\nincrease as we add more samples. The penalization term, however, will not\nincrease.\n\nWhen using, for example, :ref:`cross validation `, to\nset the amount of regularization with `C`, there will be a\ndifferent amount of samples between the main problem and the smaller problems\nwithin the folds of the cross validation.\n\nSince our loss function is dependent on the amount of samples, the latter\nwill influence the selected value of `C`.\nThe question that arises is `How do we optimally adjust C to\naccount for the different amount of training samples?`\n\nThe figures below are used to illustrate the effect of scaling our\n`C` to compensate for the change in the number of samples, in the\ncase of using an `l1` penalty, as well as the `l2` penalty.\n\nl1-penalty case\n-----------------\nIn the `l1` case, theory says that prediction consistency\n(i.e. that under given hypothesis, the estimator\nlearned predicts as well as a model knowing the true distribution)\nis not possible because of the bias of the `l1`. It does say, however,\nthat model consistency, in terms of finding the right set of non-zero\nparameters as well as their signs, can be achieved by scaling\n`C1`.\n\nl2-penalty case\n-----------------\nThe theory says that in order to achieve prediction consistency, the\npenalty parameter should be kept constant\nas the number of samples grow.\n\nSimulations\n------------\n\nThe two figures below plot the values of `C` on the `x-axis` and the\ncorresponding cross-validation scores on the `y-axis`, for several different\nfractions of a generated data-set.\n\nIn the `l1` penalty case, the cross-validation-error correlates best with\nthe test-error, when scaling our `C` with the number of samples, `n`,\nwhich can be seen in the first figure.\n\nFor the `l2` penalty case, the best result comes from the case where `C`\nis not scaled.\n\n.. topic:: Note:\n\n Two separate datasets are used for the two different plots. The reason\n behind this is the `l1` case works better on sparse data, while `l2`\n is better suited to the non-sparse case.\n\"\"\"\nprint(__doc__)\n\n\n# Author: Andreas Mueller \n# Jaques Grobler \n# License: BSD 3 clause\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.cross_validation import ShuffleSplit\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.utils import check_random_state\nfrom sklearn import datasets\n\n\nrnd = check_random_state(1)\n\n# set up dataset\nn_samples = 100\nn_features = 300\n\n# l1 data (only 5 informative features)\nX_1, y_1 = datasets.make_classification(n_samples=n_samples,\n n_features=n_features, n_informative=5,\n random_state=1)\n\n# l2 data: non sparse, but less features\ny_2 = np.sign(.5 - rnd.rand(n_samples))\nX_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]\nX_2 += 5 * rnd.randn(n_samples, n_features / 5)\n\nclf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,\n tol=1e-3),\n np.logspace(-2.3, -1.3, 10), X_1, y_1),\n (LinearSVC(penalty='l2', loss='squared_hinge', dual=True,\n tol=1e-4),\n np.logspace(-4.5, -2, 10), X_2, y_2)]\n\ncolors = ['b', 'g', 'r', 'c']\n\nfor fignum, (clf, cs, X, y) in enumerate(clf_sets):\n # set up the plot for each regressor\n plt.figure(fignum, figsize=(9, 10))\n\n for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):\n param_grid = dict(C=cs)\n # To get nice curve, we need a large number of iterations to\n # reduce the variance\n grid = GridSearchCV(clf, refit=False, param_grid=param_grid,\n cv=ShuffleSplit(n=n_samples, train_size=train_size,\n n_iter=250, random_state=1))\n grid.fit(X, y)\n scores = [x[1] for x in grid.grid_scores_]\n\n scales = [(1, 'No scaling'),\n ((n_samples * train_size), '1/n_samples'),\n ]\n\n for subplotnum, (scaler, name) in enumerate(scales):\n plt.subplot(2, 1, subplotnum + 1)\n plt.xlabel('C')\n plt.ylabel('CV Score')\n grid_cs = cs * float(scaler) # scale the C's\n plt.semilogx(grid_cs, scores, label=\"fraction %.2f\" %\n train_size)\n plt.title('scaling=%s, penalty=%s, loss=%s' %\n (name, clf.penalty, clf.loss))\n\n plt.legend(loc=\"best\")\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382646,"cells":{"repo_name":{"kind":"string","value":"jaredwo/topowx"},"path":{"kind":"string","value":"twx/utils/config.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"10923"},"content":{"kind":"string","value":"from ConfigParser import ConfigParser\nfrom twx.utils import ymdL, mkdir_p\nimport numpy as np\nimport os\nimport pandas as pd\n\nclass TwxConfig():\n '''Class to load and access TopoWx configuration settings in a INI file.\n \n Upon initialization, also creates necessary sub-directories in the TopoWx\n data root directory if they do not exist.\n \n Example TopoWx INI File:\n \n [TOPOWX_CONFIG]\n # Path to TopoWx data root\n TWX_DATA_ROOT=[a path]\n # Lon/lat bounding box for station observations\n STN_BBOX=-126.0,22.0,-64.0,53.0\n # Start date for which to process station observations\n OBS_START_DATE=1895-01-01\n # End data for which to process station observations\n OBS_END_DATE=2016-03-29\n # Start date for interpolation\n INTERP_START_DATE=1948-01-01\n # End date for interpolation\n INTERP_END_DATE=2015-12-31\n # Station observation elements to process\n OBS_ELEMS=tmin,tmax,prcp,tobs_tmin,tobs_tmax,tobs_prcp\n # Primary station observation elements\n OBS_MAIN_ELEMS=tmin,tmax,prcp\n # Station chunk size for which to load and process station observations\n STN_READ_CHUNK_GHCND=100\n STN_READ_CHUNK_SNOTEL=20\n STN_READ_CHUNK_RAWS=20\n # Station chunk size for loading and writing to netcdf file\n STN_WRITE_CHUNK_NC=100\n # Station chunk size for creating aggregated data (e.g.--monthly from daily)\n STN_AGG_CHUNK=1000\n # A geonames username for accessing DEM elevation services\n USERNAME_GEONAMES=[a username]\n '''\n \n def __init__(self, fpath_ini):\n \n cfg = ConfigParser()\n cfg.read(fpath_ini)\n \n self.twx_data_root = cfg.get('TOPOWX_CONFIG', 'twx_data_root')\n self.obs_start_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG',\n 'obs_start_date'))\n self.obs_end_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG',\n 'obs_end_date'))\n self.interp_start_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG',\n 'interp_start_date'))\n self.interp_end_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG',\n 'interp_end_date'))\n \n bbox_str = cfg.get('TOPOWX_CONFIG', 'stn_bbox')\n self.stn_bbox = tuple([np.float(i) for i in bbox_str.split(',')])\n \n self.obs_elems = tuple(cfg.get('TOPOWX_CONFIG', 'obs_elems').split(','))\n self.obs_main_elems = tuple(cfg.get('TOPOWX_CONFIG',\n 'obs_main_elems').split(','))\n self.stn_read_chunk_ghcnd = int(cfg.get('TOPOWX_CONFIG',\n 'stn_read_chunk_ghcnd'))\n self.stn_read_chunk_snotel = int(cfg.get('TOPOWX_CONFIG',\n 'stn_read_chunk_snotel'))\n self.stn_read_chunk_raws = int(cfg.get('TOPOWX_CONFIG',\n 'stn_read_chunk_raws'))\n self.stn_write_chunk_nc = int(cfg.get('TOPOWX_CONFIG',\n 'stn_write_chunk_nc'))\n self.stn_agg_chunk = int(cfg.get('TOPOWX_CONFIG',\n 'stn_agg_chunk'))\n self.username_geonames = cfg.get('TOPOWX_CONFIG',\n 'username_geonames')\n self.fpath_log_daily_infill = cfg.get('TOPOWX_CONFIG',\n 'fpath_log_daily_infill')\n self.twx_data_version = cfg.get('TOPOWX_CONFIG',\n 'twx_data_version')\n \n # Make TopoWx data directory for local storage of station observations\n self.path_stndata = os.path.join(self.twx_data_root, 'station_data')\n mkdir_p(self.path_stndata)\n \n fname_stndata_hdf_ghcnd = 'obs_ghcnd_%d_%d.hdf' % (ymdL(self.obs_start_date),\n ymdL(self.obs_end_date))\n self.fpath_stndata_hdf_ghcnd = os.path.join(self.path_stndata,\n fname_stndata_hdf_ghcnd)\n \n fname_stndata_hdf_snotel = 'obs_snotel_%d_%d.hdf' % (ymdL(self.obs_start_date),\n ymdL(self.obs_end_date))\n self.fpath_stndata_hdf_snotel = os.path.join(self.path_stndata,\n fname_stndata_hdf_snotel)\n \n fname_stndata_hdf_raws = 'obs_raws_%d_%d.hdf' % (ymdL(self.obs_start_date),\n ymdL(self.obs_end_date))\n self.fpath_stndata_hdf_raws = os.path.join(self.path_stndata,\n fname_stndata_hdf_raws)\n \n fname_stndata_nc_all = 'obs_all_%d_%d.nc' % (ymdL(self.obs_start_date),\n ymdL(self.obs_end_date))\n self.fpath_stndata_nc_all = os.path.join(self.path_stndata,\n fname_stndata_nc_all)\n \n fname_stndata_nc_tair_tobs_adj = 'tair_tobs_adj_%d_%d.nc' % (ymdL(self.obs_start_date),\n ymdL(self.obs_end_date))\n self.fpath_stndata_nc_tair_tobs_adj = os.path.join(self.path_stndata,\n fname_stndata_nc_tair_tobs_adj)\n \n fname_stndata_nc_tair_homog = 'tair_homog_%d_%d.nc' % (ymdL(self.obs_start_date),\n ymdL(self.obs_end_date))\n self.fpath_stndata_nc_tair_homog = os.path.join(self.path_stndata,\n fname_stndata_nc_tair_homog)\n \n self.fpath_locqa_hdf = os.path.join(self.path_stndata, 'locqa.hdf')\n self.fpath_locqa_fail_csv = os.path.join(self.path_stndata, 'locqa_fail.csv')\n \n # Make TopoWx data directory for PHA-based homogenization\n self.path_homog_pha = os.path.join(self.path_stndata, 'homog')\n mkdir_p(self.path_homog_pha)\n self.fpath_pha_tgz = os.path.join(self.path_homog_pha, 'phav52i.tar.gz')\n \n # Make TopoWx data directories for reanalysis data\n self.path_reanalysis_data = os.path.join(self.twx_data_root,\n 'reanalysis_data')\n mkdir_p(self.path_reanalysis_data)\n self.path_reanalysis_namerica = os.path.join(self.path_reanalysis_data,\n 'n_america_subset')\n mkdir_p(self.path_reanalysis_namerica)\n \n # Make TopoWx data directory for infilled station observations\n self.path_stndata_infill = os.path.join(self.path_stndata, 'infill')\n mkdir_p(self.path_stndata_infill)\n self.fpath_xval_infill_nc = os.path.join(self.path_stndata_infill,\n 'xval_infill_tair.nc')\n self.fpath_stndata_nc_infill_tmin = os.path.join(self.path_stndata_infill,\n 'infill_tmin.nc')\n self.fpath_stndata_nc_infill_tmax = os.path.join(self.path_stndata_infill,\n 'infill_tmax.nc')\n self.fpath_flagged_bad_stns = os.path.join(self.path_stndata_infill,\n 'bad_stns.csv')\n self.fpath_stndata_nc_serial_tmin = os.path.join(self.path_stndata_infill,\n 'serial_tmin.nc')\n self.fpath_stndata_nc_serial_tmax = os.path.join(self.path_stndata_infill,\n 'serial_tmax.nc')\n \n # Make data directories for storing interp param optimization files\n # Temperature normals\n self.path_interp_optim_norms = os.path.join(self.path_stndata_infill,\n 'optim_norm')\n mkdir_p(self.path_interp_optim_norms)\n # Daily anomalies\n self.path_interp_optim_anoms = os.path.join(self.path_stndata_infill,\n 'optim_anom')\n mkdir_p(self.path_interp_optim_anoms)\n self.fpath_xval_interp_nc_tmin = os.path.join(self.path_stndata_infill,\n 'xval_interp_tmin.nc')\n self.fpath_xval_interp_nc_tmax = os.path.join(self.path_stndata_infill,\n 'xval_interp_tmax.nc')\n \n # Make TopoWx data directory for raster data\n self.path_rasters = os.path.join(self.twx_data_root, 'rasters')\n mkdir_p(self.path_rasters)\n self.path_predictor_rasters = os.path.join(self.path_rasters,\n 'conus_interp_grids', 'ncdf')\n mkdir_p(self.path_predictor_rasters)\n \n # Make TopoWx data directory for writing output tiles\n self.path_tile_out = os.path.join(self.twx_data_root, 'tile_output')\n mkdir_p(self.path_tile_out)\n \n \n # Make TopoWx log directory\n self.path_logs = os.path.join(self.twx_data_root, 'logs')\n mkdir_p(self.path_logs)\n \n ##################################\n # Make TopoWx data directory for final outputs\n ##################################\n \n self.path_final_output = os.path.join(self.twx_data_root, 'final_output_data')\n mkdir_p(self.path_final_output)\n \n # Final auxiliary data directories\n self.path_aux_data = os.path.join(self.path_final_output, 'auxiliary_data')\n mkdir_p(self.path_aux_data)\n self.path_aux_stndata = os.path.join(self.path_aux_data, 'station_data')\n mkdir_p(self.path_aux_stndata)\n self.fpath_stndata_nc_aux_tmin = os.path.join(self.path_aux_stndata,\n 'stn_obs_tmin.nc')\n self.fpath_stndata_nc_aux_tmax = os.path.join(self.path_aux_stndata,\n 'stn_obs_tmax.nc')\n self.fpath_pha_adj_aux = os.path.join(self.path_aux_stndata, 'homog_adjust.csv')\n self.path_aux_grids = os.path.join(self.path_aux_data, 'auxiliary_grids')\n mkdir_p(self.path_aux_grids)\n \n # Final TopoWx output mosaics for normals, daily, and monthly data\n self.path_mosaic_norms = os.path.join(self.path_final_output, 'normals')\n mkdir_p(self.path_mosaic_norms)\n self.path_mosaic_daily = os.path.join(self.path_final_output, 'daily')\n mkdir_p(self.path_mosaic_daily)\n self.path_mosaic_monthly = os.path.join(self.path_final_output, 'monthly')\n mkdir_p(self.path_mosaic_monthly)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382647,"cells":{"repo_name":{"kind":"string","value":"cdiazbas/MPySIR"},"path":{"kind":"string","value":"all2maps.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4874"},"content":{"kind":"string","value":"# Author: cdiazbas@iac.es\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pyLib.imtools as imtools\r\nimport numpy as np\r\n\r\n\r\n# ========================= CREANDO PHIMAP\r\n\r\nimport matplotlib.colors as mcolors\r\ndef make_colormap(seq):\r\n\tseq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\r\n\tcdict = {'red': [], 'green': [], 'blue': []}\r\n\tfor i, item in enumerate(seq):\r\n\t\tif isinstance(item, float):\r\n\t\t\tr1, g1, b1 = seq[i - 1]\r\n\t\t\tr2, g2, b2 = seq[i + 1]\r\n\t\t\tcdict['red'].append([item, r1, r2])\r\n\t\t\tcdict['green'].append([item, g1, g2])\r\n\t\t\tcdict['blue'].append([item, b1, b2])\r\n\treturn mcolors.LinearSegmentedColormap('CustomMap', cdict)\r\nc = mcolors.ColorConverter().to_rgb\r\nphimap = make_colormap([c('white'), c('tomato'), 0.33, c('tomato'), c('deepskyblue'), 0.66, c('deepskyblue'),c('white')])\r\n\r\n\r\ndef dimMap(resultadoSir):\r\n height = resultadoSir.shape[0]*(resultadoSir[0][-1][0][0]+1)\r\n width = (resultadoSir[0][-1][0][1]+1)\r\n return [height, width]\r\n\r\n\r\ndef readmapa(resultadoSir, mapa, magnitud):\r\n cont = 0\r\n for fila in range(0, height):\r\n for columna in range(0, width):\r\n punto = cont % resultadoSir.shape[1]\r\n veces = int(cont/resultadoSir.shape[1])\r\n if magnitud == 8 or magnitud == 9 or magnitud == 10 or magnitud == 11:\r\n mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud]\r\n else:\r\n mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud][index]\r\n cont += 1\r\n return mapa\r\n\r\n\r\ndef corrphi(mapa):\r\n mapa[mapa<0] = (mapa[mapa<0]+360) % 360; mapa[mapa>180] = (mapa[mapa>180]-180)\r\n\r\n\r\ndef do1map(logTau, magnitud):\r\n # ==============================================================================================\r\n # global index\r\n # global magnitud\r\n\r\n # ========================= INPUT\r\n invSir1 = 'MAPA1.npy'\r\n invSir2 = 'MAPA2.npy'\r\n\r\n # logTau = 0.0\r\n # magnitud = 2\r\n # hsv\r\n cmapArray = ['gray','gray','gray','bone','bone','seismic','Spectral_r',phimap,'bone','gray','gray','cubehelix']\r\n magTitle = [r'${\\rm log(\\tau)=}$',r'${\\rm T\\ [kK]}$','p',r'${\\rm v\\ [km/s]}$',r'${\\rm B\\ [kG]}$',r'${\\rm v\\ [km/s]}$',r'${\\rm \\gamma\\ [d]}$',r'${\\rm \\phi\\ [d]}$','vmacro','fillingf','difusa',r'${\\rm \\chi^2}$']\r\n magFile = ['TAU','TEMP','PRESION','VMICRO','CAMPO','VLOS','GAMMA','PHI','VMACRO','FILLING','DIFUSA','CHI2']\r\n\r\n # ========================= MAP\r\n resultadoSir1 = np.load(invSir1)\r\n resultadoSir2 = np.load(invSir2)\r\n # height, width = dimMap(resultadoSir1)\r\n # print('height:',height,'width:',width)\r\n\r\n # mapa = np.zeros((height, width))\r\n index = np.where(resultadoSir1[0][0][1][0][0] == logTau)[0][0]\r\n print('logTau: '+str(logTau)+' -> index: '+str(index))\r\n # readmapa(resultadoSir1, mapa.T ,magnitud)\r\n\r\n from pySir import sirtools as st\r\n mapa1 = st.readSIRMap(resultadoSir1, magnitud, index)\r\n mapa2 = st.readSIRMap(resultadoSir2, magnitud, index)\r\n\r\n mapa = np.concatenate((mapa1, mapa2))\r\n\r\n from scipy import ndimage\r\n mapa = ndimage.median_filter(np.flipud(mapa), 3)\r\n\r\n # Limites en la escala de color\r\n if magnitud == 7: corrphi(mapa)\r\n print('3sigma_map: {0:2.2f}'.format(3*np.std(mapa)))\r\n print('Mean_map: {0:2.2f}'.format(np.mean(mapa)))\r\n print('Min_map: {0:2.2f}'.format(np.min(mapa)))\r\n print('Max_map: {0:2.2f}'.format(np.max(mapa)))\r\n\r\n vmini = np.mean(mapa)-3*np.std(mapa)\r\n if np.min(mapa) >= 0.0 and magnitud != 1: vmini = 0.\r\n vmaxi = np.mean(mapa)+3*np.std(mapa)\r\n if magnitud == 1 or magnitud == 4: vmini = np.min(mapa); vmaxi = np.max(mapa)\r\n if magnitud == 6: vmaxi = 180.\r\n if magnitud == 7: vmaxi = 180.;vmini = 0.\r\n if magnitud == 11: vmaxi = np.max(mapa); vmini = 0.\r\n if magnitud == 5: vmini = np.mean(mapa)-4*np.std(mapa); vmaxi = -vmini\r\n\r\n from matplotlib.colors import LogNorm\r\n plt.imshow(mapa,cmap=cmapArray[magnitud],origin='lower',interpolation='None',vmin=vmini,vmax=vmaxi)#norm=LogNorm()\r\n plt.title('Map 17jun14.006 (1-2)')\r\n plt.xlabel('Slit Axis [pix]')\r\n plt.ylabel('Time Axis [pix]')\r\n cb = plt.colorbar(shrink=.46)#, ticks=[0.6, 0.8, 1., 1.2])\r\n #cb = plt.colorbar(shrink=.46, ticks=[0.3, 0.6, 0.9, 1.2, 1.5])\r\n # cb.set_label(r'Intensity HeI ({0:4.1f}) /$I_{{qs}}$({1:4.1f})'.format(xLambda[341],xLambda[posicontinuo]), labelpad=5., y=0.5, fontsize=12.)\r\n loglabel = r'${\\rm log(\\tau)=}$'\r\n cb.set_label(r\"\"+magTitle[magnitud]+r\", \"+loglabel+\"{0}\".format(logTau), labelpad=8., y=0.5, fontsize=12.)\r\n\r\n # plt.show()\r\n plt.savefig(magFile[magnitud]+'_log{0:02d}.pdf'.format(int(logTau)), bbox_inches='tight')\r\n print(magFile[magnitud]+'_log{0:02d}.pdf SAVE'.format(int(logTau)))\r\n print('-----------------------'+str(magnitud))\r\n plt.clf()\r\n\r\n\r\nfor magnitud in range(12):\r\n do1map(0.0, magnitud)\r\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382648,"cells":{"repo_name":{"kind":"string","value":"timberhill/blablaplot"},"path":{"kind":"string","value":"blablaplot.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6659"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nfrom numpy import loadtxt, asarray\nfrom numpy.random import normal as gaussian_noise\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport warnings\n\n\"\"\"\nHere you register new characters in format:\n'' : (, , ''),\n\"\"\"\ncharlist = {\n\t'a' : (0.7, 1.0, 'a'),\n\t'b' : (0.7, 1.0, 'b'),\n\t'c' : (0.7, 1.0, 'c'),\n\t'd' : (0.7, 1.0, 'd'),\n\t'e' : (0.7, 1.0, 'e'),\n\t'f' : (0.7, 1.0, 'f'),\n\t'g' : (0.7, 1.0, 'g'),\n\t'h' : (0.7, 1.0, 'h'),\n\t'i' : (0.4, 1.0, 'i'),\n\t'j' : (0.4, 1.0, 'j'),\n\t'k' : (0.7, 1.0, 'k'),\n\t'l' : (0.7, 1.0, 'l'),\n\t'm' : (0.7, 1.0, 'm'),\n\t'n' : (0.7, 1.0, 'n'),\n\t'o' : (0.7, 1.0, 'o'),\n\t'p' : (0.7, 1.0, 'p'),\n\t'q' : (0.7, 1.0, 'q'),\n\t'r' : (0.7, 1.0, 'r'),\n\t's' : (0.7, 1.0, 's'),\n\t't' : (0.7, 1.0, 't'),\n\t'u' : (0.7, 1.0, 'u'),\n\t'v' : (0.7, 1.0, 'v'),\n\t'w' : (0.7, 1.0, 'w'),\n\t'x' : (0.7, 1.0, 'x'),\n\t'y' : (0.7, 1.0, 'y'),\n\t'z' : (0.7, 1.0, 'z'),\n\n\t'0' : (0.7, 1.0, '0'),\n\t'1' : (0.5, 1.0, '1'),\n\t'2' : (0.7, 1.0, '2'),\n\t'3' : (0.7, 1.0, '3'),\n\t'4' : (0.7, 1.0, '4'),\n\t'5' : (0.7, 1.0, '5'),\n\t'6' : (0.7, 1.0, '6'),\n\t'7' : (0.7, 1.0, '7'),\n\t'8' : (0.7, 1.0, '8'),\n\t'9' : (0.7, 1.0, '9'),\n\n\t' ' : (0.7, 0.0, 'space'),\n\t'?' : (0.7, 1.0, 'questionmark'),\n\t'!' : (0.2, 1.0, 'exclamationmark'),\n\t',' : (0.1, 0.1, 'comma'),\n\t'.' : (0.2, 0.1, 'fullstop'),\n\t'&' : (0.6, 1.0, 'ampersand'),\n\t'$' : (0.5, 1.0, 'dollar'),\n\t'@' : (0.7, 1.0, 'at'),\n\t'(' : (0.3, 1.0, 'brackets_open'),\n\t')' : (0.3, 1.0, 'brackets_close'),\n\t'#' : (0.7, 1.0, 'hash'),\n\t'%' : (0.7, 1.0, 'percent'),\n}\n\n\nclass Character(object):\n\t\"\"\"\n\tARGUMENTS\n\tchar\t- single character (first one is chosen)\n\tsize\t- size of the letter (width, height)\n\n\tself.xs, self.ys - arrays with letter points\n\t\"\"\"\n\tdef __init__(self, char, filename='', size=(1.0, 1.0), jitter=0.0):\n\t\tif len(char) < 1:\n\t\t\traise Exception('Empty string is passed to Character() constructor.')\n\n\t\tself.char = char[0]\n\t\tif len(filename) > 0:\n\t\t\tself.filename = filename\n\t\telse:\n\t\t\t'chars/' + self.char + '.dat'\n\n\t\tself._getPoints()\n\t\tself.resize(size=size)\n\n\tdef _getPoints(self):\n\t\txs, ys = loadtxt('chars/' + self.filename + '.dat', unpack=True)\n\t\tself.xs = asarray(xs)\n\t\tself.ys = asarray(ys)\n\n\t\tself._sort()\n\n\tdef _sort(self):\n\t\tpoints = zip(self.xs, self.ys)\n\t\tsorted_points = sorted(points)\n\n\t\tself.xs = asarray([point[0] for point in sorted_points])\n\t\tself.ys = asarray([point[1] for point in sorted_points])\n\n\tdef resize(self, size=(1.0, 1.0)):\n\t\tself.size = size\n\n\t\tif len(self.xs) < 1:\n\t\t\tself._getPoints()\n\n\t\txmin = min(self.xs)\n\t\txmax = max(self.xs)\n\t\tymin = min(self.ys)\n\t\tymax = max(self.ys)\n\n\t\tfor i in range(0, len(self.xs)):\n\t\t\tself.xs[i] = self.size[0] * (self.xs[i] - xmin) / (xmax - xmin)\n\t\t\tself.ys[i] = self.size[1] * (self.ys[i] - ymin) / (ymax - ymin)\n\n\n\nclass TextyPloty(object):\n\t\"\"\"\n\tARGUMENTS\n\tjitter\t- to randomize points locations, represents sigma for gaussian noise\n\tspacing\t- distance between letters\n\toffset\t- offset from zero point if format (x, y)\n\tscale\t- scale/size of the letters\n\tfunc\t- function to add text to \n\t\"\"\"\n\tdef __init__(self, jitter=0.0, spacing=0.1, offset=(0.0, 0.0), scale=(1.0, 1.0), func=None):\n\t\tself.jitter = jitter\n\t\tself.spacing = spacing\n\t\tself.offset = offset\n\t\tself.scale = scale\n\t\tself.func = func\n\n\t\tself.charlist = charlist\n\n\t\"\"\"\n\tARGUMENTS\n\ttext\t- string to plot\n\n\tRETURNS\n\txs, ys\t- points coordinates\n\t\"\"\"\n\tdef get(self, text):\n\t\txs, ys = [], []\n\n\t\txoffset = self.offset[0]\n\t\tfor char in text:\n\t\t\tif char == ' ':\n\t\t\t\txoffset += self.charlist[char][0] * self.scale[0]\n\t\t\telif char == '\\t':\n\t\t\t\txoffset += self.charlist[char][0] * self.scale[0] * 4\n\t\t\telif char in self.charlist:\n\t\t\t\tcharobj = Character(char=char, filename=self.charlist[char][2], size=self.charlist[char])\n\t\t\t\txs.extend(self.scale[0] * charobj.xs + xoffset)\n\t\t\t\tys.extend(self.scale[1] * charobj.ys + self.offset[1])\n\t\t\t\txoffset += self.charlist[char][0] * self.scale[0]\n\t\t\telse:\n\t\t\t\twarnings.warn('Could not find file with \"' + char + '\" character. Skipping...', Warning)\n\n\t\t\txoffset += self.spacing * self.scale[0]\n\n\t\tif self.func != None:\n\t\t\tfor i in range(0,len(xs)):\n\t\t\t\tys[i] += self.func(xs[i])\n\n\t\tif self.jitter > 0:\t\t\t\n\t\t\tnoise = gaussian_noise(0.0, self.jitter*self.scale[1], (len(ys)))\n\t\t\tys = [x+y for x, y in zip(ys, noise)]\n\n\t\treturn asarray(xs), asarray(ys)\n\n\nclass ResidualsPlot(object):\n\t\"\"\"\n\n\t\"\"\"\n\tdef __init__(self, data=([],[]), datastyle='k.', xs_fit=[], func=None, fitstyle='r-', \\\n\t\txlabel='', ylabel='', reslabel='', ratio=[4, 1], figsize=(10,6), axis=None, res_axis=None, \\\n\t\tfitlabel='fit', datalabel='points'):\n\t\tself.plt_instance = plt\n\t\tself.xs = data[0]\n\t\tself.ys = data[1]\n\t\tself.datastyle = datastyle\n\t\tself.xs_fit = xs_fit\n\t\tself.func = func\n\t\tself.ys_fit = self.func(self.xs_fit)\n\t\tself.fitstyle = fitstyle\n\t\tself.xlabel = xlabel\n\t\tself.ylabel = ylabel\n\t\tself.reslabel = reslabel\n\t\tself.ratio = ratio\n\t\tself.figsize = figsize\n\t\tself.axis = axis\n\t\tself.res_axis = res_axis\n\n\t\tself.fitlabel = fitlabel\n\t\tself.datalabel = datalabel\n\n\tdef draw(self):\n\t\tself.redraw()\n\n\tdef redraw(self):\n\t\tself.plt_instance = plt\n\n\t\tself.plt_instance.figure(figsize=self.figsize)\n\t\tself.gridspec_instance = gridspec.GridSpec(2, 1, height_ratios=self.ratio)\n\t\tself.gridspec_instance.update(hspace=0.00)\n\t\tself.ax0 = self.plt_instance.subplot(self.gridspec_instance[0])\n\t\tself.ax1 = self.plt_instance.subplot(self.gridspec_instance[1])\n\t\tself.ys_res = self.ys - self.func(self.xs)\n\n\t\t# set axis ranges\n\t\tif self.axis == None:\n\t\t\tself.ax0.axis([min(self.xs_fit) * 1.1, max(self.xs_fit)*1.1, min(self.ys_fit) * 1.1, max(self.ys_fit) * 1.1])\n\t\telif len(self.axis) != 4:\n\t\t\traise Exception('ResidualsPlot: axis should contain 4 numbers: (x1, x2, y1, y2)')\n\t\telse:\n\t\t\tself.ax0.axis(self.axis)\n\n\t\tif self.res_axis == None:\n\t\t\tself.ax1.axis([min(self.xs_fit) * 1.1, max(self.xs_fit)*1.1, min(self.ys_res) * 1.1, max(self.ys_res)*1.1])\n\t\telif len(self.res_axis) != 4:\n\t\t\traise Exception('ResidualsPlot: res_axis should contain 4 numbers: (x1, x2, y1, y2)')\n\t\telse:\n\t\t\tself.ax1.axis(self.res_axis)\n\n\t\t# set axis labels\n\t\tself.ax0.set_ylabel(self.ylabel)\n\t\tself.ax1.set_ylabel(self.reslabel)\n\t\tself.ax1.set_xlabel(self.xlabel)\n\n\t\t# first subplot: datapoints and fit\n\t\tself.ax0.plot(self.xs_fit, self.ys_fit, self.fitstyle, label=self.fitlabel)\n\t\tself.ax0.plot(self.xs, self.ys, self.datastyle, label=self.datalabel)\n\n\t\t# second subplot: residuals\n\t\tself.ax1.plot([min(self.xs), max(self.xs)], [0,0], self.fitstyle)\n\t\tself.ax1.plot(self.xs, self.ys_res, self.datastyle)\n\n\t\tself.ax0.legend(loc=\"upper right\")\n\n\n\tdef show(self):\n\t\tself.plt_instance.show()\n\n\tdef savefig(self, name='plot.pdf'):\n\t\tself.plt_instance.savefig(name)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382649,"cells":{"repo_name":{"kind":"string","value":"tiankanl/2014_fall_ASTR599"},"path":{"kind":"string","value":"notebooks/fig_code/helpers.py"},"copies":{"kind":"string","value":"74"},"size":{"kind":"string","value":"2301"},"content":{"kind":"string","value":"\"\"\"\nSmall helpers for code that is not shown in the notebooks\n\"\"\"\n\nfrom sklearn import neighbors, datasets, linear_model\nimport pylab as pl\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\n# Create color maps for 3-class classification problem, as with iris\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\ndef plot_iris_knn():\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features. We could\n # avoid this ugly slicing by using a two-dim dataset\n y = iris.target\n\n knn = neighbors.KNeighborsClassifier(n_neighbors=3)\n knn.fit(X, y)\n\n x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1\n y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),\n np.linspace(y_min, y_max, 100))\n Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n pl.figure()\n pl.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\n pl.xlabel('sepal length (cm)')\n pl.ylabel('sepal width (cm)')\n pl.axis('tight')\n\n\ndef plot_polynomial_regression():\n rng = np.random.RandomState(0)\n x = 2*rng.rand(100) - 1\n\n f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9\n y = f(x) + .4 * rng.normal(size=100)\n\n x_test = np.linspace(-1, 1, 100)\n\n pl.figure()\n pl.scatter(x, y, s=4)\n\n X = np.array([x**i for i in range(5)]).T\n X_test = np.array([x_test**i for i in range(5)]).T\n regr = linear_model.LinearRegression()\n regr.fit(X, y)\n pl.plot(x_test, regr.predict(X_test), label='4th order')\n\n X = np.array([x**i for i in range(10)]).T\n X_test = np.array([x_test**i for i in range(10)]).T\n regr = linear_model.LinearRegression()\n regr.fit(X, y)\n pl.plot(x_test, regr.predict(X_test), label='9th order')\n\n pl.legend(loc='best')\n pl.axis('tight')\n pl.title('Fitting a 4th and a 9th order polynomial')\n\n pl.figure()\n pl.scatter(x, y, s=4)\n pl.plot(x_test, f(x_test), label=\"truth\")\n pl.axis('tight')\n pl.title('Ground truth (9th order polynomial)')\n\n\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382650,"cells":{"repo_name":{"kind":"string","value":"mwv/scikit-learn"},"path":{"kind":"string","value":"examples/datasets/plot_random_dataset.py"},"copies":{"kind":"string","value":"348"},"size":{"kind":"string","value":"2254"},"content":{"kind":"string","value":"\"\"\"\n==============================================\nPlot randomly generated classification dataset\n==============================================\n\nPlot several randomly generated 2D classification datasets.\nThis example illustrates the :func:`datasets.make_classification`\n:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`\nfunctions.\n\nFor ``make_classification``, three binary and two multi-class classification\ndatasets are generated, with different numbers of informative features and\nclusters per class. \"\"\"\n\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import make_blobs\nfrom sklearn.datasets import make_gaussian_quantiles\n\nplt.figure(figsize=(8, 8))\nplt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)\n\nplt.subplot(321)\nplt.title(\"One informative feature, one cluster per class\", fontsize='small')\nX1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,\n n_clusters_per_class=1)\nplt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)\n\nplt.subplot(322)\nplt.title(\"Two informative features, one cluster per class\", fontsize='small')\nX1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,\n n_clusters_per_class=1)\nplt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)\n\nplt.subplot(323)\nplt.title(\"Two informative features, two clusters per class\", fontsize='small')\nX2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)\nplt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)\n\n\nplt.subplot(324)\nplt.title(\"Multi-class, two informative features, one cluster\",\n fontsize='small')\nX1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,\n n_clusters_per_class=1, n_classes=3)\nplt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)\n\nplt.subplot(325)\nplt.title(\"Three blobs\", fontsize='small')\nX1, Y1 = make_blobs(n_features=2, centers=3)\nplt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)\n\nplt.subplot(326)\nplt.title(\"Gaussian divided into three quantiles\", fontsize='small')\nX1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)\nplt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)\n\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382651,"cells":{"repo_name":{"kind":"string","value":"r-mart/scikit-learn"},"path":{"kind":"string","value":"examples/mixture/plot_gmm_selection.py"},"copies":{"kind":"string","value":"248"},"size":{"kind":"string","value":"3223"},"content":{"kind":"string","value":"\"\"\"\n=================================\nGaussian Mixture Model Selection\n=================================\n\nThis example shows that model selection can be performed with\nGaussian Mixture Models using information-theoretic criteria (BIC).\nModel selection concerns both the covariance type\nand the number of components in the model.\nIn that case, AIC also provides the right result (not shown to save time),\nbut BIC is better suited if the problem is to identify the right model.\nUnlike Bayesian procedures, such inferences are prior-free.\n\nIn that case, the model with 2 components and full covariance\n(which corresponds to the true generative model) is selected.\n\"\"\"\nprint(__doc__)\n\nimport itertools\n\nimport numpy as np\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nfrom sklearn import mixture\n\n# Number of samples per component\nn_samples = 500\n\n# Generate random sample, two components\nnp.random.seed(0)\nC = np.array([[0., -0.1], [1.7, .4]])\nX = np.r_[np.dot(np.random.randn(n_samples, 2), C),\n .7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]\n\nlowest_bic = np.infty\nbic = []\nn_components_range = range(1, 7)\ncv_types = ['spherical', 'tied', 'diag', 'full']\nfor cv_type in cv_types:\n for n_components in n_components_range:\n # Fit a mixture of Gaussians with EM\n gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)\n gmm.fit(X)\n bic.append(gmm.bic(X))\n if bic[-1] < lowest_bic:\n lowest_bic = bic[-1]\n best_gmm = gmm\n\nbic = np.array(bic)\ncolor_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])\nclf = best_gmm\nbars = []\n\n# Plot the BIC scores\nspl = plt.subplot(2, 1, 1)\nfor i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):\n xpos = np.array(n_components_range) + .2 * (i - 2)\n bars.append(plt.bar(xpos, bic[i * len(n_components_range):\n (i + 1) * len(n_components_range)],\n width=.2, color=color))\nplt.xticks(n_components_range)\nplt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])\nplt.title('BIC score per model')\nxpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\\\n .2 * np.floor(bic.argmin() / len(n_components_range))\nplt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)\nspl.set_xlabel('Number of components')\nspl.legend([b[0] for b in bars], cv_types)\n\n# Plot the winner\nsplot = plt.subplot(2, 1, 2)\nY_ = clf.predict(X)\nfor i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,\n color_iter)):\n v, w = linalg.eigh(covar)\n if not np.any(Y_ == i):\n continue\n plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan2(w[0][1], w[0][0])\n angle = 180 * angle / np.pi # convert to degrees\n v *= 4\n ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)\n ell.set_clip_box(splot.bbox)\n ell.set_alpha(.5)\n splot.add_artist(ell)\n\nplt.xlim(-10, 10)\nplt.ylim(-3, 6)\nplt.xticks(())\nplt.yticks(())\nplt.title('Selected GMM: full model, 2 components')\nplt.subplots_adjust(hspace=.35, bottom=.02)\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382652,"cells":{"repo_name":{"kind":"string","value":"MusicVisualizationUMass/TeamNameGenerator"},"path":{"kind":"string","value":"src/musicvisualizer/pipeline/models/tests/losc_manual_tests.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1565"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\n'''A set of manual tests to make sure bits and pieces are working. Not part of\nthe general testing framework but kept for now in case there is some useful data\nhere later\n'''\nfrom musicvisualizer.pipeline.models.linear_oscillator import LinearOscillatorModel\nimport matplotlib.pyplot as plt\n#plt.style.use('ggplot')\n\nif __name__ == '__main__':\n points = 1024\n\n def dataIn():\n print(\"CUSTOM DATA IN\")\n t = 0.0\n MAXT = 30\n while t < MAXT:\n t += 0.1\n\n def dataIn_empty():\n for i in range(1000):\n yield (0.0, 0.0)\n def dataIn_singlePulse():\n yield(points/2, 20)\n for i in range(1000):\n yield (0.0, 0.0)\n\n M = LinearOscillatorModel(\n sampleRate = 24, # Visual sample rate\n dataInFPS = 96, # Data sample rate (to generate visual)\n number_of_points = points, # how many points in simulation?\n hook = 11.0,\n dataIn = None,\n damping = 0.9999)\n\n I = iter(M)\n\n plt.ion() # Interactive I/O\n \n\n for frame in I:\n ys = [ p[0] for p in frame]\n vs = [ p[1] for p in frame]\n xs = range(len(frame))\n # print(\"max(ys) = {}\".format(max(ys)))\n # print(\"max(vs) = {}\".format(max(vs)))\n ys = ys + [1.0, -1.0]\n xs = list(xs) + [0, 0]\n plt.scatter(xs, ys)\n plt.show() # Update visuals\n plt.pause(0.02) # Pause\n plt.cla() # Clear \n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382653,"cells":{"repo_name":{"kind":"string","value":"Ziqi-Li/bknqgis"},"path":{"kind":"string","value":"pandas/pandas/core/dtypes/inference.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"8381"},"content":{"kind":"string","value":"\"\"\" basic inference routines \"\"\"\n\nimport collections\nimport re\nimport numpy as np\nfrom numbers import Number\nfrom pandas.compat import (PY2, string_types, text_type,\n string_and_binary_types)\nfrom pandas._libs import lib\n\nis_bool = lib.is_bool\n\nis_integer = lib.is_integer\n\nis_float = lib.is_float\n\nis_complex = lib.is_complex\n\nis_scalar = lib.isscalar\n\nis_decimal = lib.is_decimal\n\nis_interval = lib.is_interval\n\n\ndef is_number(obj):\n \"\"\"\n Check if the object is a number.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_number : bool\n Whether `obj` is a number or not.\n\n Examples\n --------\n >>> is_number(1)\n True\n >>> is_number(\"foo\")\n False\n \"\"\"\n\n return isinstance(obj, (Number, np.number))\n\n\ndef is_string_like(obj):\n \"\"\"\n Check if the object is a string.\n\n Parameters\n ----------\n obj : The object to check.\n\n Examples\n --------\n >>> is_string_like(\"foo\")\n True\n >>> is_string_like(1)\n False\n\n Returns\n -------\n is_str_like : bool\n Whether `obj` is a string or not.\n \"\"\"\n\n return isinstance(obj, (text_type, string_types))\n\n\ndef _iterable_not_string(obj):\n \"\"\"\n Check if the object is an iterable but not a string.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_iter_not_string : bool\n Whether `obj` is a non-string iterable.\n\n Examples\n --------\n >>> _iterable_not_string([1, 2, 3])\n True\n >>> _iterable_not_string(\"foo\")\n False\n >>> _iterable_not_string(1)\n False\n \"\"\"\n\n return (isinstance(obj, collections.Iterable) and\n not isinstance(obj, string_types))\n\n\ndef is_iterator(obj):\n \"\"\"\n Check if the object is an iterator.\n\n For example, lists are considered iterators\n but not strings or datetime objects.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_iter : bool\n Whether `obj` is an iterator.\n\n Examples\n --------\n >>> is_iterator([1, 2, 3])\n True\n >>> is_iterator(datetime(2017, 1, 1))\n False\n >>> is_iterator(\"foo\")\n False\n >>> is_iterator(1)\n False\n \"\"\"\n\n if not hasattr(obj, '__iter__'):\n return False\n\n if PY2:\n return hasattr(obj, 'next')\n else:\n # Python 3 generators have\n # __next__ instead of next\n return hasattr(obj, '__next__')\n\n\ndef is_file_like(obj):\n \"\"\"\n Check if the object is a file-like object.\n\n For objects to be considered file-like, they must\n be an iterator AND have either a `read` and/or `write`\n method as an attribute.\n\n Note: file-like objects must be iterable, but\n iterable objects need not be file-like.\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_file_like : bool\n Whether `obj` has file-like properties.\n\n Examples\n --------\n >>> buffer(StringIO(\"data\"))\n >>> is_file_like(buffer)\n True\n >>> is_file_like([1, 2, 3])\n False\n \"\"\"\n\n if not (hasattr(obj, 'read') or hasattr(obj, 'write')):\n return False\n\n if not hasattr(obj, \"__iter__\"):\n return False\n\n return True\n\n\ndef is_re(obj):\n \"\"\"\n Check if the object is a regex pattern instance.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_regex : bool\n Whether `obj` is a regex pattern.\n\n Examples\n --------\n >>> is_re(re.compile(\".*\"))\n True\n >>> is_re(\"foo\")\n False\n \"\"\"\n\n return isinstance(obj, re._pattern_type)\n\n\ndef is_re_compilable(obj):\n \"\"\"\n Check if the object can be compiled into a regex pattern instance.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_regex_compilable : bool\n Whether `obj` can be compiled as a regex pattern.\n\n Examples\n --------\n >>> is_re_compilable(\".*\")\n True\n >>> is_re_compilable(1)\n False\n \"\"\"\n\n try:\n re.compile(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef is_list_like(obj):\n \"\"\"\n Check if the object is list-like.\n\n Objects that are considered list-like are for example Python\n lists, tuples, sets, NumPy arrays, and Pandas Series.\n\n Strings and datetime objects, however, are not considered list-like.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_list_like : bool\n Whether `obj` has list-like properties.\n\n Examples\n --------\n >>> is_list_like([1, 2, 3])\n True\n >>> is_list_like({1, 2, 3})\n True\n >>> is_list_like(datetime(2017, 1, 1))\n False\n >>> is_list_like(\"foo\")\n False\n >>> is_list_like(1)\n False\n \"\"\"\n\n return (hasattr(obj, '__iter__') and\n not isinstance(obj, string_and_binary_types))\n\n\ndef is_nested_list_like(obj):\n \"\"\"\n Check if the object is list-like, and that all of its elements\n are also list-like.\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_list_like : bool\n Whether `obj` has list-like properties.\n\n Examples\n --------\n >>> is_nested_list_like([[1, 2, 3]])\n True\n >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])\n True\n >>> is_nested_list_like([\"foo\"])\n False\n >>> is_nested_list_like([])\n False\n >>> is_nested_list_like([[1, 2, 3], 1])\n False\n\n Notes\n -----\n This won't reliably detect whether a consumable iterator (e. g.\n a generator) is a nested-list-like without consuming the iterator.\n To avoid consuming it, we always return False if the outer container\n doesn't define `__len__`.\n\n See Also\n --------\n is_list_like\n \"\"\"\n return (is_list_like(obj) and hasattr(obj, '__len__') and\n len(obj) > 0 and all(is_list_like(item) for item in obj))\n\n\ndef is_dict_like(obj):\n \"\"\"\n Check if the object is dict-like.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_dict_like : bool\n Whether `obj` has dict-like properties.\n\n Examples\n --------\n >>> is_dict_like({1: 2})\n True\n >>> is_dict_like([1, 2, 3])\n False\n \"\"\"\n\n return hasattr(obj, '__getitem__') and hasattr(obj, 'keys')\n\n\ndef is_named_tuple(obj):\n \"\"\"\n Check if the object is a named tuple.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_named_tuple : bool\n Whether `obj` is a named tuple.\n\n Examples\n --------\n >>> Point = namedtuple(\"Point\", [\"x\", \"y\"])\n >>> p = Point(1, 2)\n >>>\n >>> is_named_tuple(p)\n True\n >>> is_named_tuple((1, 2))\n False\n \"\"\"\n\n return isinstance(obj, tuple) and hasattr(obj, '_fields')\n\n\ndef is_hashable(obj):\n \"\"\"Return True if hash(obj) will succeed, False otherwise.\n\n Some types will pass a test against collections.Hashable but fail when they\n are actually hashed with hash().\n\n Distinguish between these and other types by trying the call to hash() and\n seeing if they raise TypeError.\n\n Examples\n --------\n >>> a = ([],)\n >>> isinstance(a, collections.Hashable)\n True\n >>> is_hashable(a)\n False\n \"\"\"\n # Unfortunately, we can't use isinstance(obj, collections.Hashable), which\n # can be faster than calling hash. That is because numpy scalars on Python\n # 3 fail this test.\n\n # Reconsider this decision once this numpy bug is fixed:\n # https://github.com/numpy/numpy/issues/5562\n\n try:\n hash(obj)\n except TypeError:\n return False\n else:\n return True\n\n\ndef is_sequence(obj):\n \"\"\"\n Check if the object is a sequence of objects.\n String types are not included as sequences here.\n\n Parameters\n ----------\n obj : The object to check.\n\n Returns\n -------\n is_sequence : bool\n Whether `obj` is a sequence of objects.\n\n Examples\n --------\n >>> l = [1, 2, 3]\n >>>\n >>> is_sequence(l)\n True\n >>> is_sequence(iter(l))\n False\n \"\"\"\n\n try:\n iter(obj) # Can iterate over it.\n len(obj) # Has a length associated with it.\n return not isinstance(obj, string_and_binary_types)\n except (TypeError, AttributeError):\n return False\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":382654,"cells":{"repo_name":{"kind":"string","value":"yassersouri/omgh"},"path":{"kind":"string","value":"src/scripts/grid_search_c.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2365"},"content":{"kind":"string","value":"import sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom dataset import CUB_200_2011\nfrom storage import datastore\nfrom deep_extractor import CNN_Features_CAFFE_REFERENCE\nfrom datetime import datetime as dt\nimport settings\nimport utils\nimport numpy as np\n\n\ncub = CUB_200_2011(settings.CUB_ROOT)\nfeatures_storage_r = datastore(settings.storage('ccrft'))\nfeature_extractor_r = CNN_Features_CAFFE_REFERENCE(features_storage_r, make_net=False)\n\nfeatures_storage_c = datastore(settings.storage('cccft'))\nfeature_extractor_c = CNN_Features_CAFFE_REFERENCE(features_storage_c, make_net=False)\n\nfeatures_storage_p_h = datastore(settings.storage('ccpheadft-100000'))\nfeature_extractor_p_h = CNN_Features_CAFFE_REFERENCE(features_storage_p_h, make_net=False)\n\nfeatures_storage_p_h = datastore(settings.storage('ccpheadft-100000'))\nfeature_extractor_p_h = CNN_Features_CAFFE_REFERENCE(features_storage_p_h, make_net=False)\n\nfeatures_storage_p_b = datastore(settings.storage('ccpbodyft-10000'))\nfeature_extractor_p_b = CNN_Features_CAFFE_REFERENCE(features_storage_p_b, make_net=False)\n\nXtrain_r, ytrain_r, Xtest_r, ytest_r = cub.get_train_test(feature_extractor_r.extract_one)\nXtrain_c, ytrain_c, Xtest_c, ytest_c = cub.get_train_test(feature_extractor_c.extract_one)\nXtrain_p_h, ytrain_p_h, Xtest_p_h, ytest_p_h = cub.get_train_test(feature_extractor_p_h.extract_one)\nXtrain_p_b, ytrain_p_b, Xtest_p_b, ytest_p_b = cub.get_train_test(feature_extractor_p_b.extract_one)\n\nXtrain = np.concatenate((Xtrain_r, Xtrain_c, Xtrain_p_h, Xtrain_p_b), axis=1)\nXtest = np.concatenate((Xtest_r, Xtest_c, Xtest_p_h, Xtest_p_b), axis=1)\n\nimport numpy\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.grid_search import GridSearchCV\n\nCS = numpy.array([100, 10, 1, 0.1, 0.01, 0.001, 0.0001])\nmodel = svm.LinearSVC()\ngrid_search = GridSearchCV(estimator=model, param_grid=dict(C=CS), n_jobs=3)\n\ngrid_search.fit(Xtrain, ytrain_r)\n\nprint 'best c:', grid_search.best_params_\n\n\na = dt.now()\nmodel = svm.LinearSVC(C=grid_search.best_params_['C'])\nmodel.fit(Xtrain, ytrain_r)\nb = dt.now()\nprint 'fitted in: %s' % (b - a)\n\na = dt.now()\npredictions = model.predict(Xtest)\nb = dt.now()\nprint 'predicted in: %s' % (b - a)\n\nprint 'accuracy', accuracy_score(ytest_r, predictions)\nprint 'mean accuracy', utils.mean_accuracy(ytest_r, predictions)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382655,"cells":{"repo_name":{"kind":"string","value":"louispotok/pandas"},"path":{"kind":"string","value":"pandas/tests/indexes/datetimes/test_partial_slicing.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"15706"},"content":{"kind":"string","value":"\"\"\" test partial slicing on Series/Frame \"\"\"\n\nimport pytest\n\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\nimport operator as op\n\nfrom pandas import (DatetimeIndex, Series, DataFrame,\n date_range, Index, Timedelta, Timestamp)\nfrom pandas.util import testing as tm\n\n\nclass TestSlicing(object):\n def test_dti_slicing(self):\n dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')\n dti2 = dti[[1, 3, 5]]\n\n v1 = dti2[0]\n v2 = dti2[1]\n v3 = dti2[2]\n\n assert v1 == Timestamp('2/28/2005')\n assert v2 == Timestamp('4/30/2005')\n assert v3 == Timestamp('6/30/2005')\n\n # don't carry freq through irregular slicing\n assert dti2.freq is None\n\n def test_slice_keeps_name(self):\n # GH4226\n st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')\n et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')\n dr = pd.date_range(st, et, freq='H', name='timebucket')\n assert dr[1:].name == dr.name\n\n def test_slice_with_negative_step(self):\n ts = Series(np.arange(20),\n date_range('2014-01-01', periods=20, freq='MS'))\n SLC = pd.IndexSlice\n\n def assert_slices_equivalent(l_slc, i_slc):\n tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])\n tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])\n tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])\n\n assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1])\n assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1])\n\n assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1])\n assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1])\n\n assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1],\n SLC[13:8:-1])\n assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp(\n '2014-10-01'):-1], SLC[13:8:-1])\n assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1],\n SLC[13:8:-1])\n assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1],\n SLC[13:8:-1])\n\n assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0])\n\n def test_slice_with_zero_step_raises(self):\n ts = Series(np.arange(20),\n date_range('2014-01-01', periods=20, freq='MS'))\n tm.assert_raises_regex(ValueError, 'slice step cannot be zero',\n lambda: ts[::0])\n tm.assert_raises_regex(ValueError, 'slice step cannot be zero',\n lambda: ts.loc[::0])\n tm.assert_raises_regex(ValueError, 'slice step cannot be zero',\n lambda: ts.loc[::0])\n\n def test_slice_bounds_empty(self):\n # GH 14354\n empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015')\n\n right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')\n exp = Timestamp('2015-01-02 23:59:59.999999999')\n assert right == exp\n\n left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')\n exp = Timestamp('2015-01-02 00:00:00')\n assert left == exp\n\n def test_slice_duplicate_monotonic(self):\n # https://github.com/pandas-dev/pandas/issues/16515\n idx = pd.DatetimeIndex(['2017', '2017'])\n result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc')\n expected = Timestamp('2017-01-01')\n assert result == expected\n\n def test_monotone_DTI_indexing_bug(self):\n # GH 19362\n # Testing accessing the first element in a montononic descending\n # partial string indexing.\n\n df = pd.DataFrame(list(range(5)))\n date_list = ['2018-01-02', '2017-02-10', '2016-03-10',\n '2015-03-15', '2014-03-16']\n date_index = pd.to_datetime(date_list)\n df['date'] = date_index\n expected = pd.DataFrame({0: list(range(5)), 'date': date_index})\n tm.assert_frame_equal(df, expected)\n\n df = pd.DataFrame({'A': [1, 2, 3]},\n index=pd.date_range('20170101',\n periods=3)[::-1])\n expected = pd.DataFrame({'A': 1},\n index=pd.date_range('20170103',\n periods=1))\n tm.assert_frame_equal(df.loc['2017-01-03'], expected)\n\n def test_slice_year(self):\n dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)\n\n s = Series(np.arange(len(dti)), index=dti)\n result = s['2005']\n expected = s[s.index.year == 2005]\n tm.assert_series_equal(result, expected)\n\n df = DataFrame(np.random.rand(len(dti), 5), index=dti)\n result = df.loc['2005']\n expected = df[df.index.year == 2005]\n tm.assert_frame_equal(result, expected)\n\n rng = date_range('1/1/2000', '1/1/2010')\n\n result = rng.get_loc('2009')\n expected = slice(3288, 3653)\n assert result == expected\n\n def test_slice_quarter(self):\n dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)\n\n s = Series(np.arange(len(dti)), index=dti)\n assert len(s['2001Q1']) == 90\n\n df = DataFrame(np.random.rand(len(dti), 5), index=dti)\n assert len(df.loc['1Q01']) == 90\n\n def test_slice_month(self):\n dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)\n s = Series(np.arange(len(dti)), index=dti)\n assert len(s['2005-11']) == 30\n\n df = DataFrame(np.random.rand(len(dti), 5), index=dti)\n assert len(df.loc['2005-11']) == 30\n\n tm.assert_series_equal(s['2005-11'], s['11-2005'])\n\n def test_partial_slice(self):\n rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-05':'2006-02']\n expected = s['20050501':'20060228']\n tm.assert_series_equal(result, expected)\n\n result = s['2005-05':]\n expected = s['20050501':]\n tm.assert_series_equal(result, expected)\n\n result = s[:'2006-02']\n expected = s[:'20060228']\n tm.assert_series_equal(result, expected)\n\n result = s['2005-1-1']\n assert result == s.iloc[0]\n\n pytest.raises(Exception, s.__getitem__, '2004-12-31')\n\n def test_partial_slice_daily(self):\n rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-1-31']\n tm.assert_series_equal(result, s.iloc[:24])\n\n pytest.raises(Exception, s.__getitem__, '2004-12-31 00')\n\n def test_partial_slice_hourly(self):\n rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),\n periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-1-1']\n tm.assert_series_equal(result, s.iloc[:60 * 4])\n\n result = s['2005-1-1 20']\n tm.assert_series_equal(result, s.iloc[:60])\n\n assert s['2005-1-1 20:00'] == s.iloc[0]\n pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15')\n\n def test_partial_slice_minutely(self):\n rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),\n periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-1-1 23:59']\n tm.assert_series_equal(result, s.iloc[:60])\n\n result = s['2005-1-1']\n tm.assert_series_equal(result, s.iloc[:60])\n\n assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0]\n pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00')\n\n def test_partial_slice_second_precision(self):\n rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59,\n microsecond=999990),\n periods=20, freq='US')\n s = Series(np.arange(20), rng)\n\n tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10])\n tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10])\n\n tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:])\n tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:])\n\n assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0]\n tm.assert_raises_regex(KeyError, '2005-1-1 00:00:00',\n lambda: s['2005-1-1 00:00:00'])\n\n def test_partial_slicing_dataframe(self):\n # GH14856\n # Test various combinations of string slicing resolution vs.\n # index resolution\n # - If string resolution is less precise than index resolution,\n # string is considered a slice\n # - If string resolution is equal to or more precise than index\n # resolution, string is considered an exact match\n formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H',\n '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S']\n resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second']\n for rnum, resolution in enumerate(resolutions[2:], 2):\n # we check only 'day', 'hour', 'minute' and 'second'\n unit = Timedelta(\"1 \" + resolution)\n middate = datetime(2012, 1, 1, 0, 0, 0)\n index = DatetimeIndex([middate - unit,\n middate, middate + unit])\n values = [1, 2, 3]\n df = DataFrame({'a': values}, index, dtype=np.int64)\n assert df.index.resolution == resolution\n\n # Timestamp with the same resolution as index\n # Should be exact match for Series (return scalar)\n # and raise KeyError for Frame\n for timestamp, expected in zip(index, values):\n ts_string = timestamp.strftime(formats[rnum])\n # make ts_string as precise as index\n result = df['a'][ts_string]\n assert isinstance(result, np.int64)\n assert result == expected\n pytest.raises(KeyError, df.__getitem__, ts_string)\n\n # Timestamp with resolution less precise than index\n for fmt in formats[:rnum]:\n for element, theslice in [[0, slice(None, 1)],\n [1, slice(1, None)]]:\n ts_string = index[element].strftime(fmt)\n\n # Series should return slice\n result = df['a'][ts_string]\n expected = df['a'][theslice]\n tm.assert_series_equal(result, expected)\n\n # Frame should return slice as well\n result = df[ts_string]\n expected = df[theslice]\n tm.assert_frame_equal(result, expected)\n\n # Timestamp with resolution more precise than index\n # Compatible with existing key\n # Should return scalar for Series\n # and raise KeyError for Frame\n for fmt in formats[rnum + 1:]:\n ts_string = index[1].strftime(fmt)\n result = df['a'][ts_string]\n assert isinstance(result, np.int64)\n assert result == 2\n pytest.raises(KeyError, df.__getitem__, ts_string)\n\n # Not compatible with existing key\n # Should raise KeyError\n for fmt, res in list(zip(formats, resolutions))[rnum + 1:]:\n ts = index[1] + Timedelta(\"1 \" + res)\n ts_string = ts.strftime(fmt)\n pytest.raises(KeyError, df['a'].__getitem__, ts_string)\n pytest.raises(KeyError, df.__getitem__, ts_string)\n\n def test_partial_slicing_with_multiindex(self):\n\n # GH 4758\n # partial string indexing with a multi-index buggy\n df = DataFrame({'ACCOUNT': [\"ACCT1\", \"ACCT1\", \"ACCT1\", \"ACCT2\"],\n 'TICKER': [\"ABC\", \"MNP\", \"XYZ\", \"XYZ\"],\n 'val': [1, 2, 3, 4]},\n index=date_range(\"2013-06-19 09:30:00\",\n periods=4, freq='5T'))\n df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)\n\n expected = DataFrame([\n [1]\n ], index=Index(['ABC'], name='TICKER'), columns=['val'])\n result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]\n tm.assert_frame_equal(result, expected)\n\n expected = df_multi.loc[\n (pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]\n result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]\n tm.assert_series_equal(result, expected)\n\n # this is a KeyError as we don't do partial string selection on\n # multi-levels\n def f():\n df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]\n\n pytest.raises(KeyError, f)\n\n # GH 4294\n # partial slice on a series mi\n s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range(\n '2000-1-1', periods=1000)).stack()\n\n s2 = s[:-1].copy()\n expected = s2['2000-1-4']\n result = s2[pd.Timestamp('2000-1-4')]\n tm.assert_series_equal(result, expected)\n\n result = s[pd.Timestamp('2000-1-4')]\n expected = s['2000-1-4']\n tm.assert_series_equal(result, expected)\n\n df2 = pd.DataFrame(s)\n expected = df2.xs('2000-1-4')\n result = df2.loc[pd.Timestamp('2000-1-4')]\n tm.assert_frame_equal(result, expected)\n\n def test_partial_slice_doesnt_require_monotonicity(self):\n # For historical reasons.\n s = pd.Series(np.arange(10), pd.date_range('2014-01-01', periods=10))\n\n nonmonotonic = s[[3, 5, 4]]\n expected = nonmonotonic.iloc[:0]\n timestamp = pd.Timestamp('2014-01-10')\n\n tm.assert_series_equal(nonmonotonic['2014-01-10':], expected)\n tm.assert_raises_regex(KeyError,\n r\"Timestamp\\('2014-01-10 00:00:00'\\)\",\n lambda: nonmonotonic[timestamp:])\n\n tm.assert_series_equal(nonmonotonic.loc['2014-01-10':], expected)\n tm.assert_raises_regex(KeyError,\n r\"Timestamp\\('2014-01-10 00:00:00'\\)\",\n lambda: nonmonotonic.loc[timestamp:])\n\n def test_loc_datetime_length_one(self):\n # GH16071\n df = pd.DataFrame(columns=['1'],\n index=pd.date_range('2016-10-01T00:00:00',\n '2016-10-01T23:59:59'))\n result = df.loc[datetime(2016, 10, 1):]\n tm.assert_frame_equal(result, df)\n\n result = df.loc['2016-10-01T00:00:00':]\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize('datetimelike', [\n Timestamp('20130101'), datetime(2013, 1, 1),\n np.datetime64('2013-01-01T00:00', 'ns')])\n @pytest.mark.parametrize('op,expected', [\n (op.lt, [True, False, False, False]),\n (op.le, [True, True, False, False]),\n (op.eq, [False, True, False, False]),\n (op.gt, [False, False, False, True])])\n def test_selection_by_datetimelike(self, datetimelike, op, expected):\n # GH issue #17965, test for ability to compare datetime64[ns] columns\n # to datetimelike\n df = DataFrame({'A': [pd.Timestamp('20120101'),\n pd.Timestamp('20130101'),\n np.nan, pd.Timestamp('20130103')]})\n result = op(df.A, datetimelike)\n expected = Series(expected, name='A')\n tm.assert_series_equal(result, expected)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382656,"cells":{"repo_name":{"kind":"string","value":"dorvaljulien/StarFiddle"},"path":{"kind":"string","value":"density_scatter_plot.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4635"},"content":{"kind":"string","value":"\"\"\"\nA \"density scatter plot\" is a scatter plot with points displaying a color\ncorresponding to the local \"point density\".\n\"\"\"\n\nimport matplotlib.pyplot as plt, numpy as np, numpy.random, scipy\n#mport cubehelix\nfrom kdtree import Tree\nlog=np.log10\n\n\ndef DensityScatter(xdat,ydat,ax=None,NNb=15,Nbins=20,logx=False,logy=False,**kwargs):\n \"\"\"\n ax = DensityScatter( xdat, ydat,ax=None, NNb=15, Nbins=20,\n logx=False, logy=False, **kwargs)\n ------------------------------------------------------------\n xdat : data array for x \n ydat : data array for y \n ax : If needed, previously existing matplotlib axis object\n Nnb : Number of neighbour points to compute local density\n Nbins : Number of density(colour) bins\n logx : Boolean, do you want xdata to be displayed on a logscale ?\n logy : Boolean, do you want ydata to be displayed on a logscale ?\n **kwargs : Means any additionnal keyword will be passed to plt.plot\n\n Display a scatter plot of xdat, ydat and attribute colors to points \n according to the local point density. Allows to visualize the distribution\n of points in high density regions without doing an histogram2d.\n \"\"\"\n N=len(xdat)\n xdat = np.array(xdat); ydat = np.array(ydat)\n X = (xdat - min(xdat))/(max(xdat) - min(xdat))\n Y = (ydat - min(ydat))/(max(ydat) - min(ydat))\n if logx:\n X = log(xdat/max(xdat))\n if logy:\n Y = log(ydat/max(ydat))\n T = Tree(X, Y)\n density = np.zeros(N)\n def ComputeDensity(nb,d):\n return nb/( np.pi*d**2)\n for i in range(N):\n _, dist = T.nnearest(i, NNb)\n density[i] = ComputeDensity(NNb,dist[-1])\n density_bins = np.logspace( 0.5*(log(min(density))+log(max(density))), log(max(density)), \n Nbins)\n density_bins = np.array( [0] + list(density_bins) )\n SelectionIndices = []\n for i in range(Nbins):\n ind_arr = np.nonzero(( density_bins[i] <= density ) * ( density < density_bins[i+1]))[0]\n SelectionIndices.append(ind_arr)\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cm = plt.get_cmap(\"rainbow\")\n for i,(ind,alph) in enumerate(zip(SelectionIndices,np.linspace(1.,0.,Nbins))):\n color = cm(1.*(i)/Nbins)\n ax.plot( xdat[ind], ydat[ind], \"o\", color=color, alpha=alph, \n markeredgecolor=\"none\",**kwargs)\n if logy:\n ax.set_yscale(\"log\")\n if logx:\n ax.set_xscale(\"log\")\n return ax\n\n\n\ndef DensityScatter3D(xdat,ydat,zdat,ax=None,NNb=15,Nbins=20,**kwargs):\n \"\"\"\n ax = DensityScatter3D( xdat, ydat, zdat, ax=None, NNb=15, Nbins=20, **kwargs)\n ------------------------------------------------------------\n xdat : data array for x \n ydat : data array for y \n zdat : data array for z \n ax : If needed, previously existing matplotlib axis object\n Nnb : Number of neighbour points to compute local density\n Nbins : Number of density(colour) bins\n **kwargs : Means any additionnal keyword will be passed to plt.plot\n\n Display a 3d scatter plot of xdat, ydat, zdat and attribute colors to points \n according to the local point density. It's kind of experimental, I played with\n transparency and order or display to be able to see what's going on in high density\n regions.\n \"\"\"\n N=len(xdat)\n xdat = np.array(xdat); ydat = np.array(ydat)\n X = (xdat - min(xdat))/(max(xdat) - min(xdat))\n Y = (ydat - min(ydat))/(max(ydat) - min(ydat))\n Z = (zdat - min(zdat))/(max(zdat) - min(zdat))\n T = Tree(X, Y, Z)\n density = np.zeros(N)\n def ComputeDensity(nb,d):\n return nb/( 4./3 *np.pi*d**3)\n for i in range(N):\n _, dist = T.nnearest(i, NNb)\n density[i] = ComputeDensity(NNb,dist[-1])\n density_bins = np.logspace( 0.5*(log(min(density))+log(max(density))), log(max(density)), \n Nbins)\n density_bins = np.array( [0] + list(density_bins) )\n SelectionIndices = []\n for i in range(Nbins):\n ind_arr = np.nonzero(( density_bins[i] <= density ) * ( density < density_bins[i+1]))[0]\n SelectionIndices.append(ind_arr)\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n cm = plt.get_cmap(\"rainbow\")\n for i,(ind,alph) in enumerate(zip(SelectionIndices,np.linspace(1.,0.,Nbins))):\n color = cm(1.*(i)/Nbins)\n ax.plot( xdat[ind], ydat[ind], zdat[ind], \"o\", color=color, alpha=alph, \n markeredgecolor=\"none\",**kwargs)\n return ax\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382657,"cells":{"repo_name":{"kind":"string","value":"Huyuwei/tvm"},"path":{"kind":"string","value":"docs/conf.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"8618"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n#\n# documentation build configuration file, created by\n# sphinx-quickstart on Thu Jul 23 19:40:08 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport sys\nimport os, subprocess\nimport shlex\nimport recommonmark\nimport sphinx_gallery\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.insert(0, os.path.join(curr_path, '../python/'))\nsys.path.insert(0, os.path.join(curr_path, '../topi/python'))\nsys.path.insert(0, os.path.join(curr_path, '../nnvm/python'))\nsys.path.insert(0, os.path.join(curr_path, '../vta/python'))\n\n# -- General configuration ------------------------------------------------\n\n# General information about the project.\nproject = u'tvm'\nauthor = u'%s developers' % project\ncopyright = u'2018, %s' % author\ngithub_doc_root = 'https://github.com/tqchen/tvm/tree/master/docs/'\n\n# add markdown parser\nCommonMarkParser.github_doc_root = github_doc_root\nsource_parsers = {\n '.md': CommonMarkParser\n}\nos.environ['TVM_BUILD_DOC'] = '1'\nos.environ['NNVM_BUILD_DOC'] = '1'\n# Version information.\nimport tvm\nversion = tvm.__version__\nrelease = tvm.__version__\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.mathjax',\n 'sphinx_gallery.gen_gallery',\n]\n\nbreathe_projects = {'tvm' : 'doxygen/xml/'}\nbreathe_default_project = 'tvm'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme is set by the make target\nhtml_theme = os.environ.get('TVM_THEME', 'rtd')\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n# only import rtd theme and set it if want to build docs locally\nif not on_rtd and html_theme == 'rtd':\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_theme_options = {\n 'analytics_id': 'UA-75982049-2',\n 'logo_only': True,\n}\n\nhtml_logo = \"_static/img/tvm-logo-small.png\"\n\nhtml_favicon = \"_static/img/tvm-logo-square.png\"\n\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = project + 'doc'\n\n# -- Options for LaTeX output ---------------------------------------------\nlatex_elements = {\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, '%s.tex' % project, project,\n author, 'manual'),\n]\n\n# hook for doxygen\ndef run_doxygen(folder):\n \"\"\"Run the doxygen make command in the designated folder.\"\"\"\n try:\n #retcode = subprocess.call(\"cd %s; make doc\" % folder, shell=True)\n retcode = subprocess.call(\"rm -rf _build/html/doxygen\", shell=True)\n retcode = subprocess.call(\"mkdir -p _build/html\", shell=True)\n retcode = subprocess.call(\"cp -rf doxygen/html _build/html/doxygen\", shell=True)\n if retcode < 0:\n sys.stderr.write(\"doxygen terminated by signal %s\" % (-retcode))\n except OSError as e:\n sys.stderr.write(\"doxygen execution failed: %s\" % e)\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),\n 'matplotlib': ('http://matplotlib.org/', None),\n}\n\nfrom sphinx_gallery.sorting import ExplicitOrder\n\nexamples_dirs = [\"../tutorials/\", \"../vta/tutorials/\"]\ngallery_dirs = [\"tutorials\", \"vta/tutorials\"]\n\nsubsection_order = ExplicitOrder(\n ['../tutorials/frontend',\n '../tutorials/language',\n '../tutorials/optimize',\n '../tutorials/autotvm',\n '../tutorials/dev',\n '../tutorials/topi',\n '../tutorials/deployment',\n '../vta/tutorials/frontend',\n '../vta/tutorials/optimize',\n '../vta/tutorials/autotvm'])\n\ndef generate_doxygen_xml(app):\n \"\"\"Run the doxygen make commands if we're on the ReadTheDocs server\"\"\"\n run_doxygen('..')\n\ndef setup(app):\n # Add hook for building doxygen xml when needed\n # no c++ API for now\n app.connect(\"builder-inited\", generate_doxygen_xml)\n app.add_stylesheet('css/tvm_theme.css')\n app.add_config_value('recommonmark_config', {\n 'url_resolver': lambda url: github_doc_root + url,\n 'auto_doc_ref': True\n }, True)\n app.add_transform(AutoStructify)\n\n\nsphinx_gallery_conf = {\n 'backreferences_dir': 'gen_modules/backreferences',\n 'doc_module': ('tvm', 'numpy'),\n'reference_url': {\n 'tvm': None,\n 'matplotlib': 'http://matplotlib.org',\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},\n 'examples_dirs': examples_dirs,\n 'gallery_dirs': gallery_dirs,\n 'subsection_order': subsection_order,\n 'filename_pattern': os.environ.get(\"TVM_TUTORIAL_EXEC_PATTERN\", \".py\"),\n 'find_mayavi_figures': False,\n 'expected_failing_examples': []\n}\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382658,"cells":{"repo_name":{"kind":"string","value":"dpshelio/sunpy"},"path":{"kind":"string","value":"examples/parse_time.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3839"},"content":{"kind":"string","value":"\"\"\"\n========================================\nParsing times with sunpy.time.parse_time\n========================================\n\nThis is an example to show some possible usage of ``parse_time``.\n``parse_time`` is a function that can be useful to create `~astropy.time.Time`\nobjects from various other time objects and strings.\n\"\"\"\n##############################################################################\n# Import the required modules.\nfrom datetime import datetime, date\nimport time\n\nimport numpy as np\nimport pandas\n\nfrom sunpy.time import parse_time\n\n\n##############################################################################\n# Suppose you want to parse some strings, ``parse_time`` can do that.\nt1 = parse_time('1995-12-31 23:59:60')\n\n##############################################################################\n# Of course you could do the same with `~astropy.time.Time`.\n# But SunPy ``parse_time`` can parse even more formats of time strings.\n# And as you see from the examples, thanks to `~astropy.time.Time`, ``parse_time``\n# can handle leap seconds too.\nt2 = parse_time('1995-Dec-31 23:59:60')\n\n\n##############################################################################\n# You can mention the scale of the time as a keyword parameter if you need.\n# Similar to scale you can pass in any astropy Time compatible keywords to\n# ``parse_time``. See all arguments\n# `here: `__\nt3 = parse_time('2012:124:21:08:12', scale='tai')\n\n\n##############################################################################\n# Now that you are done with strings, let's see other type ``parse_time`` handles,\n# tuples. `~astropy.time.Time` does not handle tuples but ``parse_time`` does.\nt4 = parse_time((1998, 11, 14))\nt5 = parse_time((2001, 1, 1, 12, 12, 12, 8899))\n\n##############################################################################\n# This also means that you can parse a ``time.struct_time``.\nt6 = parse_time(time.localtime())\n\n##############################################################################\n# ``parse_time`` also parses ``datetime`` and ``date`` objects.\nt7 = parse_time(datetime.now())\nt8 = parse_time(date.today())\n\n\n##############################################################################\n# ``parse_time`` can return ``astropy.time.Time`` objects for ``pandas.Timestamp``,\n# ``pandas.Series`` and ``pandas.DatetimeIndex``.\nt9 = parse_time(pandas.Timestamp(datetime(1966, 2, 3)))\n\nt10 = parse_time(\n pandas.Series([[datetime(2012, 1, 1, 0, 0),\n datetime(2012, 1, 2, 0, 0)],\n [datetime(2012, 1, 3, 0, 0),\n datetime(2012, 1, 4, 0, 0)]]))\n\nt11 = parse_time(\n pandas.DatetimeIndex([\n datetime(2012, 1, 1, 0, 0),\n datetime(2012, 1, 2, 0, 0),\n datetime(2012, 1, 3, 0, 0),\n datetime(2012, 1, 4, 0, 0)\n ]))\n\n\n##############################################################################\n# ``parse_time`` can parse ``numpy.datetime64`` objects.\nt12 = parse_time(np.datetime64('2014-02-07T16:47:51.008288123-0500'))\nt13 = parse_time(\n np.array(\n ['2014-02-07T16:47:51.008288123', '2014-02-07T18:47:51.008288123'],\n dtype='datetime64'))\n\n##############################################################################\n# Parse time returns `~astropy.time.Time` object for every parsable input that\n# you give to it.\n# ``parse_time`` can handle all formats that `~astropy.time.Time` can handle.\n# That is,\n# ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date', 'datetime',\n# 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']\n# at the time of writing. This can be used by passing format keyword argument\n# to ``parse_time``.\nparse_time(1234.0, format='jd')\nparse_time('B1950.0', format='byear_str')\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":382659,"cells":{"repo_name":{"kind":"string","value":"ekadhanda/bin"},"path":{"kind":"string","value":"python/coda-cont.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"7175"},"content":{"kind":"string","value":"#! /usr/bin/env python\n\n# Written by Vasaant S/O Krishnan Friday, 19 May 2017\n# Run without arguments for instructions.\n\nimport sys\nusrFile = sys.argv[1:]\n\nif len(usrFile) == 0:\n print \"\"\n print \"# Script to read in file of the CODA format and plot a multivariate\"\n print \"# distribution with contours.\"\n print \"# An index.txt and chain.txt file must be provided and the script\"\n print \"# will automatically identify them for internal use. Options are:\"\n print \"\"\n print \"# samp = Sample chain.txt data at this frequency (computational consideration).\"\n print \"\"\n print \" -->$ coda-cont.py CODAindex.txt CODAchain.txt samp=xx\"\n print \"\"\n exit()\n\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n#=====================================================================\n# Define variables.\n#\nints = '\\s+?([+-]?\\d+)' # Integers for regex\n#floats = '\\s+?([+-]?\\d+(?:\\.\\d+)?)' # Floats or int\nfloats = '\\s+?([+-]?\\d+(?:\\.\\d+)?|\\.\\d+)([eE][+-]?\\d+)?' # Floats or int or scientific\ncodaFiles = [] # CODAindex and CODAchain files\nindexFileFnd = False # CODAindex file identified?\nchainFileFnd = False # CODAchain file identified?\nindexCodes = {} # Dictionary containing CODAindex info.\n# chainIndx = [] # Indexes/Column 1 of CODAchain.txt file\nchainData = [] # Data/Column 2 of CODAchain.txt file\nvarOne = '' # x data\nvarTwo = '' # y data\n#=====================================================================\n\n\n\n#=====================================================================\n# Determine which are the CODAindex and CODAchain files and\n# automatically assign them to their respective variables.\n#\nfor i in usrFile:\n codaSearch = re.search('.txt',i)\n if codaSearch:\n codaFiles.append(i)\n\nif len(codaFiles) == 2: # Assuming 1 index and 1 chain file\n for j in codaFiles:\n with open(j,'r') as chkTyp: # Run a quick check on the first line only\n firstLine = chkTyp.readline()\n codaIndex = re.search('^(\\S+)' + ints + ints + '$', firstLine)\n codaChain = re.search('^(\\d+)' + floats + '$', firstLine)\n if codaIndex:\n indexFile = j\n indexFileFnd = True\n if codaChain:\n chainFile = j\n chainFileFnd = True\nelse:\n print \"Insfficient files of CODA*.txt format.\"\n print \"Check your input files.\"\n#=====================================================================\n\n\n\n#=====================================================================\n# Determine user requested variable from CODAIndex file\n#\nfor i in usrFile:\n userReqCodaIndx = re.search('var=(\\S+),(\\S+)',i)\n if userReqCodaIndx:\n varOne = str(userReqCodaIndx.group(1))\n varTwo = str(userReqCodaIndx.group(2))\n#=====================================================================\n\n\n\nif indexFileFnd and chainFileFnd:\n #=====================================================================\n # Harvest index file for the variable list and corresponding\n # [start,stop] coords:\n #\n for line in open(indexFile, 'r'):\n reqIndex = re.search('^(\\S+)' + ints + ints + '$', line)\n if reqIndex:\n key = str(reqIndex.group(1))\n value = [int(reqIndex.group(2)), int(reqIndex.group(3))]\n indexCodes[key] = value\n\n maxElement = max(indexCodes, key = indexCodes.get) # The key with the largest value\n chainLen = max(indexCodes[maxElement]) # The largest value (expected amt. of data)\n\n if len(indexCodes) < 2:\n print \"Insufficient variables in %s for contour plot.\"%(indexFile)\n contVarsOk = False\n elif len(indexCodes) == 2:\n varOne = indexCodes.keys()[0]\n varTwo = indexCodes.keys()[1]\n contOne = indexCodes[varOne]\n contTwo = indexCodes[varTwo]\n contVarsOk = True\n else:\n if varOne == '' or varTwo == '':\n print \"Manually select variables for contour plot.\"\n contVarsOk = False\n else:\n contOne = indexCodes[varOne]\n contTwo = indexCodes[varTwo]\n contVarsOk = True\n #=====================================================================\n\n\n\n #=====================================================================\n # Harvest chain file\n #\n for line in open(chainFile, 'r'):\n reqChain = re.search('^(\\d+)' + floats + '$', line)\n if reqChain:\n #chainIndx.append( int(reqChain.group(1)))\n chainData.append(float(reqChain.group(2)))\n #chainIndx = np.array(chainIndx)\n chainData = np.array(chainData)\n #=====================================================================\n\n\n\n #=====================================================================\n # Basic check on the harvest by comparing harvested vs. expected\n # no. of data.\n #\n if len(chainData) != chainLen:\n print \" Warning! \"\n print \" %10d lines expected from %s.\"%(chainLen,indexFile)\n print \" %10d lines harvested from %s.\"%(len(chainData),chainFile)\n #=====================================================================\n\n\n\n #=====================================================================\n # Contour plot\n #\n #\n if contVarsOk:\n dataOne = chainData[contOne[0]-1:contOne[1]] # Python starts from 0. CODAindex from 1\n dataTwo = chainData[contTwo[0]-1:contTwo[1]]\n\n # Ensure same amount of data from both variables\n if (contOne[0]-contOne[1]) != (contTwo[0]-contTwo[1]):\n print \" %10d lines harvested from %s.\"%(len(dataOne),varOne)\n print \" %10d lines harvested from %s.\"%(len(dataTwo),varTwo)\n else:\n # This section to get data to the ~100s for computational consideration...\n if len(dataOne) >= 1000:\n sampleFactor = 10**int(np.floor(np.log10(len(dataOne)) - 2))\n elif len(dataOne) > 500 and len(dataOne) < 1000:\n sampleFactor = int(len(dataOne)/5.0)\n else:\n sampleFactor = 1\n\n # ... unless you want a customised option:\n for i in usrFile:\n userReqSamp = re.search('samp=(\\d+)',i)\n if userReqSamp:\n if int(userReqSamp.group(1)) < len(dataOne):\n sampleFactor = int(userReqSamp.group(1))\n\n dataOne = dataOne[0::sampleFactor] # Select data at intervals\n dataTwo = dataTwo[0::sampleFactor]\n dataComb = {varOne:dataOne, # Apparently jointplot likes dict format\n varTwo:dataTwo}\n sns.jointplot(x=varOne,y=varTwo,data=dataComb,kind=\"kde\").set_axis_labels(varOne,varTwo)\n plt.show()\n #=====================================================================\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382660,"cells":{"repo_name":{"kind":"string","value":"alephu5/Soundbyte"},"path":{"kind":"string","value":"environment/lib/python3.3/site-packages/pandas/sparse/tests/test_libsparse.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"11260"},"content":{"kind":"string","value":"from pandas import Series\n\nimport nose\nfrom numpy import nan\nimport numpy as np\nimport operator\nfrom numpy.testing import assert_almost_equal, assert_equal\nimport pandas.util.testing as tm\n\nfrom pandas.core.sparse import SparseSeries\nfrom pandas import DataFrame\n\nfrom pandas._sparse import IntIndex, BlockIndex\nimport pandas._sparse as splib\n\nTEST_LENGTH = 20\n\nplain_case = dict(xloc=[0, 7, 15],\n xlen=[3, 5, 5],\n yloc=[2, 9, 14],\n ylen=[2, 3, 5],\n intersect_loc=[2, 9, 15],\n intersect_len=[1, 3, 4])\ndelete_blocks = dict(xloc=[0, 5],\n xlen=[4, 4],\n yloc=[1],\n ylen=[4],\n intersect_loc=[1],\n intersect_len=[3])\nsplit_blocks = dict(xloc=[0],\n xlen=[10],\n yloc=[0, 5],\n ylen=[3, 7],\n intersect_loc=[0, 5],\n intersect_len=[3, 5])\nskip_block = dict(xloc=[10],\n xlen=[5],\n yloc=[0, 12],\n ylen=[5, 3],\n intersect_loc=[12],\n intersect_len=[3])\n\nno_intersect = dict(xloc=[0, 10],\n xlen=[4, 6],\n yloc=[5, 17],\n ylen=[4, 2],\n intersect_loc=[],\n intersect_len=[])\n\n\ndef check_cases(_check_case):\n def _check_case_dict(case):\n _check_case(case['xloc'], case['xlen'], case['yloc'], case['ylen'],\n case['intersect_loc'], case['intersect_len'])\n\n _check_case_dict(plain_case)\n _check_case_dict(delete_blocks)\n _check_case_dict(split_blocks)\n _check_case_dict(skip_block)\n _check_case_dict(no_intersect)\n\n # one or both is empty\n _check_case([0], [5], [], [], [], [])\n _check_case([], [], [], [], [], [])\n\n\ndef test_index_make_union():\n def _check_case(xloc, xlen, yloc, ylen, eloc, elen):\n xindex = BlockIndex(TEST_LENGTH, xloc, xlen)\n yindex = BlockIndex(TEST_LENGTH, yloc, ylen)\n bresult = xindex.make_union(yindex)\n assert(isinstance(bresult, BlockIndex))\n assert_equal(bresult.blocs, eloc)\n assert_equal(bresult.blengths, elen)\n\n ixindex = xindex.to_int_index()\n iyindex = yindex.to_int_index()\n iresult = ixindex.make_union(iyindex)\n assert(isinstance(iresult, IntIndex))\n assert_equal(iresult.indices, bresult.to_int_index().indices)\n\n \"\"\"\n x: ----\n y: ----\n r: --------\n \"\"\"\n xloc = [0]\n xlen = [5]\n yloc = [5]\n ylen = [4]\n eloc = [0]\n elen = [9]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n \"\"\"\n x: ----- -----\n y: ----- --\n \"\"\"\n xloc = [0, 10]\n xlen = [5, 5]\n yloc = [2, 17]\n ylen = [5, 2]\n eloc = [0, 10, 17]\n elen = [7, 5, 2]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n \"\"\"\n x: ------\n y: -------\n r: ----------\n \"\"\"\n xloc = [1]\n xlen = [5]\n yloc = [3]\n ylen = [5]\n eloc = [1]\n elen = [7]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n \"\"\"\n x: ------ -----\n y: -------\n r: -------------\n \"\"\"\n xloc = [2, 10]\n xlen = [4, 4]\n yloc = [4]\n ylen = [8]\n eloc = [2]\n elen = [12]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n \"\"\"\n x: --- -----\n y: -------\n r: -------------\n \"\"\"\n xloc = [0, 5]\n xlen = [3, 5]\n yloc = [0]\n ylen = [7]\n eloc = [0]\n elen = [10]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n \"\"\"\n x: ------ -----\n y: ------- ---\n r: -------------\n \"\"\"\n xloc = [2, 10]\n xlen = [4, 4]\n yloc = [4, 13]\n ylen = [8, 4]\n eloc = [2]\n elen = [15]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n \"\"\"\n x: ----------------------\n y: ---- ---- ---\n r: ----------------------\n \"\"\"\n xloc = [2]\n xlen = [15]\n yloc = [4, 9, 14]\n ylen = [3, 2, 2]\n eloc = [2]\n elen = [15]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n \"\"\"\n x: ---- ---\n y: --- ---\n \"\"\"\n xloc = [0, 10]\n xlen = [3, 3]\n yloc = [5, 15]\n ylen = [2, 2]\n eloc = [0, 5, 10, 15]\n elen = [3, 2, 3, 2]\n _check_case(xloc, xlen, yloc, ylen, eloc, elen)\n\n # TODO: different-length index objects\n\n\ndef test_lookup():\n\n def _check(index):\n assert(index.lookup(0) == -1)\n assert(index.lookup(5) == 0)\n assert(index.lookup(7) == 2)\n assert(index.lookup(8) == -1)\n assert(index.lookup(9) == -1)\n assert(index.lookup(10) == -1)\n assert(index.lookup(11) == -1)\n assert(index.lookup(12) == 3)\n assert(index.lookup(17) == 8)\n assert(index.lookup(18) == -1)\n\n bindex = BlockIndex(20, [5, 12], [3, 6])\n iindex = bindex.to_int_index()\n\n _check(bindex)\n _check(iindex)\n\n # corner cases\n\n\ndef test_intersect():\n def _check_correct(a, b, expected):\n result = a.intersect(b)\n assert(result.equals(expected))\n\n def _check_length_exc(a, longer):\n nose.tools.assert_raises(Exception, a.intersect, longer)\n\n def _check_case(xloc, xlen, yloc, ylen, eloc, elen):\n xindex = BlockIndex(TEST_LENGTH, xloc, xlen)\n yindex = BlockIndex(TEST_LENGTH, yloc, ylen)\n expected = BlockIndex(TEST_LENGTH, eloc, elen)\n longer_index = BlockIndex(TEST_LENGTH + 1, yloc, ylen)\n\n _check_correct(xindex, yindex, expected)\n _check_correct(xindex.to_int_index(),\n yindex.to_int_index(),\n expected.to_int_index())\n\n _check_length_exc(xindex, longer_index)\n _check_length_exc(xindex.to_int_index(),\n longer_index.to_int_index())\n\n check_cases(_check_case)\n\n\nclass TestBlockIndex(tm.TestCase):\n\n def test_equals(self):\n index = BlockIndex(10, [0, 4], [2, 5])\n\n self.assert_(index.equals(index))\n self.assert_(not index.equals(BlockIndex(10, [0, 4], [2, 6])))\n\n def test_check_integrity(self):\n locs = []\n lengths = []\n\n # 0-length OK\n index = BlockIndex(0, locs, lengths)\n\n # also OK even though empty\n index = BlockIndex(1, locs, lengths)\n\n # block extend beyond end\n self.assertRaises(Exception, BlockIndex, 10, [5], [10])\n\n # block overlap\n self.assertRaises(Exception, BlockIndex, 10, [2, 5], [5, 3])\n\n def test_to_int_index(self):\n locs = [0, 10]\n lengths = [4, 6]\n exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15]\n\n block = BlockIndex(20, locs, lengths)\n dense = block.to_int_index()\n\n assert_equal(dense.indices, exp_inds)\n\n def test_to_block_index(self):\n index = BlockIndex(10, [0, 5], [4, 5])\n self.assert_(index.to_block_index() is index)\n\n\nclass TestIntIndex(tm.TestCase):\n\n def test_equals(self):\n index = IntIndex(10, [0, 1, 2, 3, 4])\n self.assert_(index.equals(index))\n self.assert_(not index.equals(IntIndex(10, [0, 1, 2, 3])))\n\n def test_to_block_index(self):\n def _check_case(xloc, xlen, yloc, ylen, eloc, elen):\n xindex = BlockIndex(TEST_LENGTH, xloc, xlen)\n yindex = BlockIndex(TEST_LENGTH, yloc, ylen)\n\n # see if survive the round trip\n xbindex = xindex.to_int_index().to_block_index()\n ybindex = yindex.to_int_index().to_block_index()\n tm.assert_isinstance(xbindex, BlockIndex)\n self.assert_(xbindex.equals(xindex))\n self.assert_(ybindex.equals(yindex))\n check_cases(_check_case)\n\n def test_to_int_index(self):\n index = IntIndex(10, [2, 3, 4, 5, 6])\n self.assert_(index.to_int_index() is index)\n\n\nclass TestSparseOperators(tm.TestCase):\n\n def _nan_op_tests(self, sparse_op, python_op):\n def _check_case(xloc, xlen, yloc, ylen, eloc, elen):\n xindex = BlockIndex(TEST_LENGTH, xloc, xlen)\n yindex = BlockIndex(TEST_LENGTH, yloc, ylen)\n\n xdindex = xindex.to_int_index()\n ydindex = yindex.to_int_index()\n\n x = np.arange(xindex.npoints) * 10. + 1\n y = np.arange(yindex.npoints) * 100. + 1\n\n result_block_vals, rb_index = sparse_op(x, xindex, y, yindex)\n result_int_vals, ri_index = sparse_op(x, xdindex, y, ydindex)\n\n self.assert_(rb_index.to_int_index().equals(ri_index))\n assert_equal(result_block_vals, result_int_vals)\n\n # check versus Series...\n xseries = Series(x, xdindex.indices)\n yseries = Series(y, ydindex.indices)\n series_result = python_op(xseries, yseries).valid()\n assert_equal(result_block_vals, series_result.values)\n assert_equal(result_int_vals, series_result.values)\n\n check_cases(_check_case)\n\n def _op_tests(self, sparse_op, python_op):\n def _check_case(xloc, xlen, yloc, ylen, eloc, elen):\n xindex = BlockIndex(TEST_LENGTH, xloc, xlen)\n yindex = BlockIndex(TEST_LENGTH, yloc, ylen)\n\n xdindex = xindex.to_int_index()\n ydindex = yindex.to_int_index()\n\n x = np.arange(xindex.npoints) * 10. + 1\n y = np.arange(yindex.npoints) * 100. + 1\n\n xfill = 0\n yfill = 2\n\n result_block_vals, rb_index = sparse_op(\n x, xindex, xfill, y, yindex, yfill)\n result_int_vals, ri_index = sparse_op(x, xdindex, xfill,\n y, ydindex, yfill)\n\n self.assert_(rb_index.to_int_index().equals(ri_index))\n assert_equal(result_block_vals, result_int_vals)\n\n # check versus Series...\n xseries = Series(x, xdindex.indices)\n xseries = xseries.reindex(np.arange(TEST_LENGTH)).fillna(xfill)\n\n yseries = Series(y, ydindex.indices)\n yseries = yseries.reindex(np.arange(TEST_LENGTH)).fillna(yfill)\n\n series_result = python_op(xseries, yseries)\n series_result = series_result.reindex(ri_index.indices)\n\n assert_equal(result_block_vals, series_result.values)\n assert_equal(result_int_vals, series_result.values)\n\n check_cases(_check_case)\n\n# too cute? oh but how I abhor code duplication\n\ncheck_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']\n\n\ndef make_nanoptestf(op):\n def f(self):\n sparse_op = getattr(splib, 'sparse_nan%s' % op)\n python_op = getattr(operator, op)\n self._nan_op_tests(sparse_op, python_op)\n f.__name__ = 'test_nan%s' % op\n return f\n\n\ndef make_optestf(op):\n def f(self):\n sparse_op = getattr(splib, 'sparse_%s' % op)\n python_op = getattr(operator, op)\n self._op_tests(sparse_op, python_op)\n f.__name__ = 'test_%s' % op\n return f\n\nfor op in check_ops:\n f = make_nanoptestf(op)\n g = make_optestf(op)\n setattr(TestSparseOperators, f.__name__, f)\n setattr(TestSparseOperators, g.__name__, g)\n del f\n del g\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382661,"cells":{"repo_name":{"kind":"string","value":"tionn/holo-at-on"},"path":{"kind":"string","value":"code/get.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1360"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport io\r\nimport urllib2\r\nimport string\r\nfrom BeautifulSoup import BeautifulSoup\r\nimport pandas as pd\r\nimport sys\r\n\r\ncity_url = 'http://twblg.dict.edu.tw/holodict_new/index/xiangzhen_level1.jsp?county=1'\r\n\r\ndef extract_items(base_url):\t\r\n\thtml = urllib2.urlopen(base_url).read()\r\n\tsoup = BeautifulSoup(html)\r\n\t#print(soup.prettify())\r\n\r\n\tdata = []\t\r\n\ttable = soup.findAll('tr', attrs={'class':['all_space1', 'all_space2']})\t\r\n\r\n\tfor row in table:\r\n\t\tcols = row.findAll('td')\r\n\t\tcols = [ele.text.strip() for ele in cols]\r\n\t\tdata.append([ele for ele in cols if ele]) # Get rid of empty values\t\r\n\t\r\n\treturn data\r\n\r\n\r\ndef get_area_url():\r\n\tbase_url = 'http://twblg.dict.edu.tw/holodict_new/index/xiangzhen_level1.jsp?county=%s'\r\n\turl = []\r\n\tfor i in string.ascii_uppercase:\r\n\t\turl.append(base_url % i)\t\r\n\treturn url\r\n\r\n\r\nif __name__=='__main__':\r\n\t# 縣市名稱\t\r\n\tdata = extract_items(city_url)\r\n\tdata.pop() # ignore data from '其他'\r\n\tprint 'Cities and countries are done.'\t\t\t\r\n\r\n\t# 鄉鎮區名稱\r\n\tarea_url = get_area_url()\r\n\tfor i in area_url:\r\n\t\tarea_data = extract_items(i)\r\n\t\tdata.extend(area_data)\t\t\r\n\r\n\tprint 'Townships are done.'\r\n\r\n\t#df = pd.DataFrame(data, columns=['name', 'holo'])\t\r\n\tdf = pd.DataFrame(data)\t\t\r\n\tdf.to_csv('moe_mapping.csv', encoding='utf-8', index=False, header=0)\r\n\tprint 'csv file done.'"},"license":{"kind":"string","value":"cc0-1.0"}}},{"rowIdx":382662,"cells":{"repo_name":{"kind":"string","value":"jmetzen/scikit-learn"},"path":{"kind":"string","value":"examples/tree/plot_tree_regression_multioutput.py"},"copies":{"kind":"string","value":"22"},"size":{"kind":"string","value":"1848"},"content":{"kind":"string","value":"\"\"\"\n===================================================================\nMulti-output Decision Tree Regression\n===================================================================\n\nAn example to illustrate multi-output regression with decision tree.\n\nThe :ref:`decision trees `\nis used to predict simultaneously the noisy x and y observations of a circle\ngiven a single underlying feature. As a result, it learns local linear\nregressions approximating the circle.\n\nWe can see that if the maximum depth of the tree (controlled by the\n`max_depth` parameter) is set too high, the decision trees learn too fine\ndetails of the training data and learn from the noise, i.e. they overfit.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\n\n# Create a random dataset\nrng = np.random.RandomState(1)\nX = np.sort(200 * rng.rand(100, 1) - 100, axis=0)\ny = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T\ny[::5, :] += (0.5 - rng.rand(20, 2))\n\n# Fit regression model\nregr_1 = DecisionTreeRegressor(max_depth=2)\nregr_2 = DecisionTreeRegressor(max_depth=5)\nregr_3 = DecisionTreeRegressor(max_depth=8)\nregr_1.fit(X, y)\nregr_2.fit(X, y)\nregr_3.fit(X, y)\n\n# Predict\nX_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]\ny_1 = regr_1.predict(X_test)\ny_2 = regr_2.predict(X_test)\ny_3 = regr_3.predict(X_test)\n\n# Plot the results\nplt.figure()\ns = 50\nplt.scatter(y[:, 0], y[:, 1], c=\"navy\", s=s, label=\"data\")\nplt.scatter(y_1[:, 0], y_1[:, 1], c=\"cornflowerblue\", s=s, label=\"max_depth=2\")\nplt.scatter(y_2[:, 0], y_2[:, 1], c=\"c\", s=s, label=\"max_depth=5\")\nplt.scatter(y_3[:, 0], y_3[:, 1], c=\"orange\", s=s, label=\"max_depth=8\")\nplt.xlim([-6, 6])\nplt.ylim([-6, 6])\nplt.xlabel(\"data\")\nplt.ylabel(\"target\")\nplt.title(\"Multi-output Decision Tree Regression\")\nplt.legend()\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382663,"cells":{"repo_name":{"kind":"string","value":"wzbozon/scikit-learn"},"path":{"kind":"string","value":"benchmarks/bench_glm.py"},"copies":{"kind":"string","value":"297"},"size":{"kind":"string","value":"1493"},"content":{"kind":"string","value":"\"\"\"\nA comparison of different methods in GLM\n\nData comes from a random square matrix.\n\n\"\"\"\nfrom datetime import datetime\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.utils.bench import total_seconds\n\n\nif __name__ == '__main__':\n\n import pylab as pl\n\n n_iter = 40\n\n time_ridge = np.empty(n_iter)\n time_ols = np.empty(n_iter)\n time_lasso = np.empty(n_iter)\n\n dimensions = 500 * np.arange(1, n_iter + 1)\n\n for i in range(n_iter):\n\n print('Iteration %s of %s' % (i, n_iter))\n\n n_samples, n_features = 10 * i + 3, 10 * i + 3\n\n X = np.random.randn(n_samples, n_features)\n Y = np.random.randn(n_samples)\n\n start = datetime.now()\n ridge = linear_model.Ridge(alpha=1.)\n ridge.fit(X, Y)\n time_ridge[i] = total_seconds(datetime.now() - start)\n\n start = datetime.now()\n ols = linear_model.LinearRegression()\n ols.fit(X, Y)\n time_ols[i] = total_seconds(datetime.now() - start)\n\n start = datetime.now()\n lasso = linear_model.LassoLars()\n lasso.fit(X, Y)\n time_lasso[i] = total_seconds(datetime.now() - start)\n\n pl.figure('scikit-learn GLM benchmark results')\n pl.xlabel('Dimensions')\n pl.ylabel('Time (s)')\n pl.plot(dimensions, time_ridge, color='r')\n pl.plot(dimensions, time_ols, color='g')\n pl.plot(dimensions, time_lasso, color='b')\n\n pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')\n pl.axis('tight')\n pl.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382664,"cells":{"repo_name":{"kind":"string","value":"hainm/scikit-learn"},"path":{"kind":"string","value":"sklearn/utils/extmath.py"},"copies":{"kind":"string","value":"142"},"size":{"kind":"string","value":"21102"},"content":{"kind":"string","value":"\"\"\"\nExtended math utilities.\n\"\"\"\n# Authors: Gael Varoquaux\n# Alexandre Gramfort\n# Alexandre T. Passos\n# Olivier Grisel\n# Lars Buitinck\n# Stefan van der Walt\n# Kyle Kastner\n# License: BSD 3 clause\n\nfrom __future__ import division\nfrom functools import partial\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.sparse import issparse\n\nfrom . import check_random_state\nfrom .fixes import np_version\nfrom ._logistic_sigmoid import _log_logistic_sigmoid\nfrom ..externals.six.moves import xrange\nfrom .sparsefuncs_fast import csr_row_norms\nfrom .validation import check_array, NonBLASDotWarning\n\n\ndef norm(x):\n \"\"\"Compute the Euclidean or Frobenius norm of x.\n\n Returns the Euclidean norm when x is a vector, the Frobenius norm when x\n is a matrix (2-d array). More precise than sqrt(squared_norm(x)).\n \"\"\"\n x = np.asarray(x)\n nrm2, = linalg.get_blas_funcs(['nrm2'], [x])\n return nrm2(x)\n\n\n# Newer NumPy has a ravel that needs less copying.\nif np_version < (1, 7, 1):\n _ravel = np.ravel\nelse:\n _ravel = partial(np.ravel, order='K')\n\n\ndef squared_norm(x):\n \"\"\"Squared Euclidean or Frobenius norm of x.\n\n Returns the Euclidean norm when x is a vector, the Frobenius norm when x\n is a matrix (2-d array). Faster than norm(x) ** 2.\n \"\"\"\n x = _ravel(x)\n return np.dot(x, x)\n\n\ndef row_norms(X, squared=False):\n \"\"\"Row-wise (squared) Euclidean norm of X.\n\n Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse\n matrices and does not create an X.shape-sized temporary.\n\n Performs no input validation.\n \"\"\"\n if issparse(X):\n norms = csr_row_norms(X)\n else:\n norms = np.einsum('ij,ij->i', X, X)\n\n if not squared:\n np.sqrt(norms, norms)\n return norms\n\n\ndef fast_logdet(A):\n \"\"\"Compute log(det(A)) for A symmetric\n\n Equivalent to : np.log(nl.det(A)) but more robust.\n It returns -Inf if det(A) is non positive or is not defined.\n \"\"\"\n sign, ld = np.linalg.slogdet(A)\n if not sign > 0:\n return -np.inf\n return ld\n\n\ndef _impose_f_order(X):\n \"\"\"Helper Function\"\"\"\n # important to access flags instead of calling np.isfortran,\n # this catches corner cases.\n if X.flags.c_contiguous:\n return check_array(X.T, copy=False, order='F'), True\n else:\n return check_array(X, copy=False, order='F'), False\n\n\ndef _fast_dot(A, B):\n if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'\n raise ValueError\n\n if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)\n for x in [A, B]):\n warnings.warn('Data must be of same type. Supported types '\n 'are 32 and 64 bit float. '\n 'Falling back to np.dot.', NonBLASDotWarning)\n raise ValueError\n\n if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:\n raise ValueError\n\n # scipy 0.9 compliant API\n dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]\n A, trans_a = _impose_f_order(A)\n B, trans_b = _impose_f_order(B)\n return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)\n\n\ndef _have_blas_gemm():\n try:\n linalg.get_blas_funcs(['gemm'])\n return True\n except (AttributeError, ValueError):\n warnings.warn('Could not import BLAS, falling back to np.dot')\n return False\n\n\n# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.\nif np_version < (1, 7, 2) and _have_blas_gemm():\n def fast_dot(A, B):\n \"\"\"Compute fast dot products directly calling BLAS.\n\n This function calls BLAS directly while warranting Fortran contiguity.\n This helps avoiding extra copies `np.dot` would have created.\n For details see section `Linear Algebra on large Arrays`:\n http://wiki.scipy.org/PerformanceTips\n\n Parameters\n ----------\n A, B: instance of np.ndarray\n Input arrays. Arrays are supposed to be of the same dtype and to\n have exactly 2 dimensions. Currently only floats are supported.\n In case these requirements aren't met np.dot(A, B) is returned\n instead. To activate the related warning issued in this case\n execute the following lines of code:\n\n >> import warnings\n >> from sklearn.utils.validation import NonBLASDotWarning\n >> warnings.simplefilter('always', NonBLASDotWarning)\n \"\"\"\n try:\n return _fast_dot(A, B)\n except ValueError:\n # Maltyped or malformed data.\n return np.dot(A, B)\nelse:\n fast_dot = np.dot\n\n\ndef density(w, **kwargs):\n \"\"\"Compute density of a sparse vector\n\n Return a value between 0 and 1\n \"\"\"\n if hasattr(w, \"toarray\"):\n d = float(w.nnz) / (w.shape[0] * w.shape[1])\n else:\n d = 0 if w is None else float((w != 0).sum()) / w.size\n return d\n\n\ndef safe_sparse_dot(a, b, dense_output=False):\n \"\"\"Dot product that handle the sparse matrix case correctly\n\n Uses BLAS GEMM as replacement for numpy.dot where possible\n to avoid unnecessary copies.\n \"\"\"\n if issparse(a) or issparse(b):\n ret = a * b\n if dense_output and hasattr(ret, \"toarray\"):\n ret = ret.toarray()\n return ret\n else:\n return fast_dot(a, b)\n\n\ndef randomized_range_finder(A, size, n_iter, random_state=None):\n \"\"\"Computes an orthonormal matrix whose range approximates the range of A.\n\n Parameters\n ----------\n A: 2D array\n The input data matrix\n size: integer\n Size of the return array\n n_iter: integer\n Number of power iterations used to stabilize the result\n random_state: RandomState or an int seed (0 by default)\n A random number generator instance\n\n Returns\n -------\n Q: 2D array\n A (size x size) projection matrix, the range of which\n approximates well the range of the input matrix A.\n\n Notes\n -----\n\n Follows Algorithm 4.3 of\n Finding structure with randomness: Stochastic algorithms for constructing\n approximate matrix decompositions\n Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061\n \"\"\"\n random_state = check_random_state(random_state)\n\n # generating random gaussian vectors r with shape: (A.shape[1], size)\n R = random_state.normal(size=(A.shape[1], size))\n\n # sampling the range of A using by linear projection of r\n Y = safe_sparse_dot(A, R)\n del R\n\n # perform power iterations with Y to further 'imprint' the top\n # singular vectors of A in Y\n for i in xrange(n_iter):\n Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))\n\n # extracting an orthonormal basis of the A range samples\n Q, R = linalg.qr(Y, mode='economic')\n return Q\n\n\ndef randomized_svd(M, n_components, n_oversamples=10, n_iter=0,\n transpose='auto', flip_sign=True, random_state=0):\n \"\"\"Computes a truncated randomized SVD\n\n Parameters\n ----------\n M: ndarray or sparse matrix\n Matrix to decompose\n\n n_components: int\n Number of singular values and vectors to extract.\n\n n_oversamples: int (default is 10)\n Additional number of random vectors to sample the range of M so as\n to ensure proper conditioning. The total number of random vectors\n used to find the range of M is n_components + n_oversamples.\n\n n_iter: int (default is 0)\n Number of power iterations (can be used to deal with very noisy\n problems).\n\n transpose: True, False or 'auto' (default)\n Whether the algorithm should be applied to M.T instead of M. The\n result should approximately be the same. The 'auto' mode will\n trigger the transposition if M.shape[1] > M.shape[0] since this\n implementation of randomized SVD tend to be a little faster in that\n case).\n\n flip_sign: boolean, (True by default)\n The output of a singular value decomposition is only unique up to a\n permutation of the signs of the singular vectors. If `flip_sign` is\n set to `True`, the sign ambiguity is resolved by making the largest\n loadings for each component in the left singular vectors positive.\n\n random_state: RandomState or an int seed (0 by default)\n A random number generator instance to make behavior\n\n Notes\n -----\n This algorithm finds a (usually very good) approximate truncated\n singular value decomposition using randomization to speed up the\n computations. It is particularly fast on large matrices on which\n you wish to extract only a small number of components.\n\n References\n ----------\n * Finding structure with randomness: Stochastic algorithms for constructing\n approximate matrix decompositions\n Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061\n\n * A randomized algorithm for the decomposition of matrices\n Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert\n \"\"\"\n random_state = check_random_state(random_state)\n n_random = n_components + n_oversamples\n n_samples, n_features = M.shape\n\n if transpose == 'auto' and n_samples > n_features:\n transpose = True\n if transpose:\n # this implementation is a bit faster with smaller shape[1]\n M = M.T\n\n Q = randomized_range_finder(M, n_random, n_iter, random_state)\n\n # project M to the (k + p) dimensional space using the basis vectors\n B = safe_sparse_dot(Q.T, M)\n\n # compute the SVD on the thin matrix: (k + p) wide\n Uhat, s, V = linalg.svd(B, full_matrices=False)\n del B\n U = np.dot(Q, Uhat)\n\n if flip_sign:\n U, V = svd_flip(U, V)\n\n if transpose:\n # transpose back the results according to the input convention\n return V[:n_components, :].T, s[:n_components], U[:, :n_components].T\n else:\n return U[:, :n_components], s[:n_components], V[:n_components, :]\n\n\ndef logsumexp(arr, axis=0):\n \"\"\"Computes the sum of arr assuming arr is in the log domain.\n\n Returns log(sum(exp(arr))) while minimizing the possibility of\n over/underflow.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.utils.extmath import logsumexp\n >>> a = np.arange(10)\n >>> np.log(np.sum(np.exp(a)))\n 9.4586297444267107\n >>> logsumexp(a)\n 9.4586297444267107\n \"\"\"\n arr = np.rollaxis(arr, axis)\n # Use the max to normalize, as with the log this is what accumulates\n # the less errors\n vmax = arr.max(axis=0)\n out = np.log(np.sum(np.exp(arr - vmax), axis=0))\n out += vmax\n return out\n\n\ndef weighted_mode(a, w, axis=0):\n \"\"\"Returns an array of the weighted modal (most common) value in a\n\n If there is more than one such value, only the first is returned.\n The bin-count for the modal bins is also returned.\n\n This is an extension of the algorithm in scipy.stats.mode.\n\n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n w : array_like\n n-dimensional array of weights for each value\n axis : int, optional\n Axis along which to operate. Default is 0, i.e. the first axis.\n\n Returns\n -------\n vals : ndarray\n Array of modal values.\n score : ndarray\n Array of weighted counts for each mode.\n\n Examples\n --------\n >>> from sklearn.utils.extmath import weighted_mode\n >>> x = [4, 1, 4, 2, 4, 2]\n >>> weights = [1, 1, 1, 1, 1, 1]\n >>> weighted_mode(x, weights)\n (array([ 4.]), array([ 3.]))\n\n The value 4 appears three times: with uniform weights, the result is\n simply the mode of the distribution.\n\n >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's\n >>> weighted_mode(x, weights)\n (array([ 2.]), array([ 3.5]))\n\n The value 2 has the highest score: it appears twice with weights of\n 1.5 and 2: the sum of these is 3.\n\n See Also\n --------\n scipy.stats.mode\n \"\"\"\n if axis is None:\n a = np.ravel(a)\n w = np.ravel(w)\n axis = 0\n else:\n a = np.asarray(a)\n w = np.asarray(w)\n axis = axis\n\n if a.shape != w.shape:\n w = np.zeros(a.shape, dtype=w.dtype) + w\n\n scores = np.unique(np.ravel(a)) # get ALL unique values\n testshape = list(a.shape)\n testshape[axis] = 1\n oldmostfreq = np.zeros(testshape)\n oldcounts = np.zeros(testshape)\n for score in scores:\n template = np.zeros(a.shape)\n ind = (a == score)\n template[ind] = w[ind]\n counts = np.expand_dims(np.sum(template, axis), axis)\n mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)\n oldcounts = np.maximum(counts, oldcounts)\n oldmostfreq = mostfrequent\n return mostfrequent, oldcounts\n\n\ndef pinvh(a, cond=None, rcond=None, lower=True):\n \"\"\"Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.\n\n Calculate a generalized inverse of a symmetric matrix using its\n eigenvalue decomposition and including all 'large' eigenvalues.\n\n Parameters\n ----------\n a : array, shape (N, N)\n Real symmetric or complex hermetian matrix to be pseudo-inverted\n\n cond : float or None, default None\n Cutoff for 'small' eigenvalues.\n Singular values smaller than rcond * largest_eigenvalue are considered\n zero.\n\n If None or -1, suitable machine precision is used.\n\n rcond : float or None, default None (deprecated)\n Cutoff for 'small' eigenvalues.\n Singular values smaller than rcond * largest_eigenvalue are considered\n zero.\n\n If None or -1, suitable machine precision is used.\n\n lower : boolean\n Whether the pertinent array data is taken from the lower or upper\n triangle of a. (Default: lower)\n\n Returns\n -------\n B : array, shape (N, N)\n\n Raises\n ------\n LinAlgError\n If eigenvalue does not converge\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.random.randn(9, 6)\n >>> a = np.dot(a, a.T)\n >>> B = pinvh(a)\n >>> np.allclose(a, np.dot(a, np.dot(B, a)))\n True\n >>> np.allclose(B, np.dot(B, np.dot(a, B)))\n True\n\n \"\"\"\n a = np.asarray_chkfinite(a)\n s, u = linalg.eigh(a, lower=lower)\n\n if rcond is not None:\n cond = rcond\n if cond in [None, -1]:\n t = u.dtype.char.lower()\n factor = {'f': 1E3, 'd': 1E6}\n cond = factor[t] * np.finfo(t).eps\n\n # unlike svd case, eigh can lead to negative eigenvalues\n above_cutoff = (abs(s) > cond * np.max(abs(s)))\n psigma_diag = np.zeros_like(s)\n psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]\n\n return np.dot(u * psigma_diag, np.conjugate(u).T)\n\n\ndef cartesian(arrays, out=None):\n \"\"\"Generate a cartesian product of input arrays.\n\n Parameters\n ----------\n arrays : list of array-like\n 1-D arrays to form the cartesian product of.\n out : ndarray\n Array to place the cartesian product in.\n\n Returns\n -------\n out : ndarray\n 2-D array of shape (M, len(arrays)) containing cartesian products\n formed of input arrays.\n\n Examples\n --------\n >>> cartesian(([1, 2, 3], [4, 5], [6, 7]))\n array([[1, 4, 6],\n [1, 4, 7],\n [1, 5, 6],\n [1, 5, 7],\n [2, 4, 6],\n [2, 4, 7],\n [2, 5, 6],\n [2, 5, 7],\n [3, 4, 6],\n [3, 4, 7],\n [3, 5, 6],\n [3, 5, 7]])\n\n \"\"\"\n arrays = [np.asarray(x) for x in arrays]\n shape = (len(x) for x in arrays)\n dtype = arrays[0].dtype\n\n ix = np.indices(shape)\n ix = ix.reshape(len(arrays), -1).T\n\n if out is None:\n out = np.empty_like(ix, dtype=dtype)\n\n for n, arr in enumerate(arrays):\n out[:, n] = arrays[n][ix[:, n]]\n\n return out\n\n\ndef svd_flip(u, v, u_based_decision=True):\n \"\"\"Sign correction to ensure deterministic output from SVD.\n\n Adjusts the columns of u and the rows of v such that the loadings in the\n columns in u that are largest in absolute value are always positive.\n\n Parameters\n ----------\n u, v : ndarray\n u and v are the output of `linalg.svd` or\n `sklearn.utils.extmath.randomized_svd`, with matching inner dimensions\n so one can compute `np.dot(u * s, v)`.\n\n u_based_decision : boolean, (default=True)\n If True, use the columns of u as the basis for sign flipping. Otherwise,\n use the rows of v. The choice of which variable to base the decision on\n is generally algorithm dependent.\n\n\n Returns\n -------\n u_adjusted, v_adjusted : arrays with the same dimensions as the input.\n\n \"\"\"\n if u_based_decision:\n # columns of u, rows of v\n max_abs_cols = np.argmax(np.abs(u), axis=0)\n signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])\n u *= signs\n v *= signs[:, np.newaxis]\n else:\n # rows of v, columns of u\n max_abs_rows = np.argmax(np.abs(v), axis=1)\n signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])\n u *= signs\n v *= signs[:, np.newaxis]\n return u, v\n\n\ndef log_logistic(X, out=None):\n \"\"\"Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.\n\n This implementation is numerically stable because it splits positive and\n negative values::\n\n -log(1 + exp(-x_i)) if x_i > 0\n x_i - log(1 + exp(x_i)) if x_i <= 0\n\n For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.\n\n Parameters\n ----------\n X: array-like, shape (M, N)\n Argument to the logistic function\n\n out: array-like, shape: (M, N), optional:\n Preallocated output array.\n\n Returns\n -------\n out: array, shape (M, N)\n Log of the logistic function evaluated at every point in x\n\n Notes\n -----\n See the blog post describing this implementation:\n http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/\n \"\"\"\n is_1d = X.ndim == 1\n X = check_array(X, dtype=np.float)\n\n n_samples, n_features = X.shape\n\n if out is None:\n out = np.empty_like(X)\n\n _log_logistic_sigmoid(n_samples, n_features, X, out)\n\n if is_1d:\n return np.squeeze(out)\n return out\n\n\ndef safe_min(X):\n \"\"\"Returns the minimum value of a dense or a CSR/CSC matrix.\n\n Adapated from http://stackoverflow.com/q/13426580\n\n \"\"\"\n if issparse(X):\n if len(X.data) == 0:\n return 0\n m = X.data.min()\n return m if X.getnnz() == X.size else min(m, 0)\n else:\n return X.min()\n\n\ndef make_nonnegative(X, min_value=0):\n \"\"\"Ensure `X.min()` >= `min_value`.\"\"\"\n min_ = safe_min(X)\n if min_ < min_value:\n if issparse(X):\n raise ValueError(\"Cannot make the data matrix\"\n \" nonnegative because it is sparse.\"\n \" Adding a value to every entry would\"\n \" make it no longer sparse.\")\n X = X + (min_value - min_)\n return X\n\n\ndef _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):\n \"\"\"Calculate an average mean update and a Youngs and Cramer variance update.\n\n From the paper \"Algorithms for computing the sample variance: analysis and\n recommendations\", by Chan, Golub, and LeVeque.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Data to use for variance update\n\n old_mean : array-like, shape: (n_features,)\n\n old_variance : array-like, shape: (n_features,)\n\n old_sample_count : int\n\n Returns\n -------\n updated_mean : array, shape (n_features,)\n\n updated_variance : array, shape (n_features,)\n\n updated_sample_count : int\n\n References\n ----------\n T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:\n recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247\n\n \"\"\"\n new_sum = X.sum(axis=0)\n new_variance = X.var(axis=0) * X.shape[0]\n old_sum = old_mean * old_sample_count\n n_samples = X.shape[0]\n updated_sample_count = old_sample_count + n_samples\n partial_variance = old_sample_count / (n_samples * updated_sample_count) * (\n n_samples / old_sample_count * old_sum - new_sum) ** 2\n unnormalized_variance = old_variance * old_sample_count + new_variance + \\\n partial_variance\n return ((old_sum + new_sum) / updated_sample_count,\n unnormalized_variance / updated_sample_count,\n updated_sample_count)\n\n\ndef _deterministic_vector_sign_flip(u):\n \"\"\"Modify the sign of vectors for reproducibility\n\n Flips the sign of elements of all the vectors (rows of u) such that\n the absolute maximum element of each vector is positive.\n\n Parameters\n ----------\n u : ndarray\n Array with vectors as its rows.\n\n Returns\n -------\n u_flipped : ndarray with same shape as u\n Array with the sign flipped vectors as its rows.\n \"\"\"\n max_abs_rows = np.argmax(np.abs(u), axis=1)\n signs = np.sign(u[range(u.shape[0]), max_abs_rows])\n u *= signs[:, np.newaxis]\n return u\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382665,"cells":{"repo_name":{"kind":"string","value":"RMKD/networkx"},"path":{"kind":"string","value":"examples/drawing/unix_email.py"},"copies":{"kind":"string","value":"62"},"size":{"kind":"string","value":"2683"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\nCreate a directed graph, allowing multiple edges and self loops, from\na unix mailbox. The nodes are email addresses with links\nthat point from the sender to the recievers. The edge data\nis a Python email.Message object which contains all of\nthe email message data. \n\nThis example shows the power of XDiGraph to hold edge data\nof arbitrary Python objects (in this case a list of email messages).\n\nBy default, load the sample unix email mailbox called \"unix_email.mbox\".\nYou can load your own mailbox by naming it on the command line, eg\n\npython unixemail.py /var/spool/mail/username\n\n\"\"\"\n__author__ = \"\"\"Aric Hagberg (hagberg@lanl.gov)\"\"\"\n# Copyright (C) 2005 by \n# Aric Hagberg \n# Dan Schult \n# Pieter Swart \n# All rights reserved.\n# BSD license.\n\nimport email\nfrom email.utils import getaddresses,parseaddr\nimport mailbox\nimport sys\n\n# unix mailbox recipe\n# see http://www.python.org/doc/current/lib/module-mailbox.html\ndef msgfactory(fp):\n try:\n return email.message_from_file(fp)\n except email.Errors.MessageParseError:\n # Don't return None since that will stop the mailbox iterator\n return ''\n\n\n\nif __name__ == '__main__':\n\n import networkx as nx\n try: \n import matplotlib.pyplot as plt\n except:\n pass\n\n if len(sys.argv)==1:\n filePath = \"unix_email.mbox\"\n else:\n filePath = sys.argv[1]\n\n mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox\n\n G=nx.MultiDiGraph() # create empty graph\n\n # parse each messages and build graph \n for msg in mbox: # msg is python email.Message.Message object\n (source_name,source_addr) = parseaddr(msg['From']) # sender\n # get all recipients\n # see http://www.python.org/doc/current/lib/module-email.Utils.html\n tos = msg.get_all('to', [])\n ccs = msg.get_all('cc', [])\n resent_tos = msg.get_all('resent-to', [])\n resent_ccs = msg.get_all('resent-cc', [])\n all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)\n # now add the edges for this mail message\n for (target_name,target_addr) in all_recipients:\n G.add_edge(source_addr,target_addr,message=msg) \n\n # print edges with message subject\n for (u,v,d) in G.edges_iter(data=True):\n print(\"From: %s To: %s Subject: %s\"%(u,v,d['message'][\"Subject\"]))\n \n\n try: # draw\n pos=nx.spring_layout(G,iterations=10)\n nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)\n plt.savefig(\"unix_email.png\")\n plt.show()\n except: # matplotlib not available\n pass\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382666,"cells":{"repo_name":{"kind":"string","value":"kdebrab/pandas"},"path":{"kind":"string","value":"pandas/tests/frame/test_convert_to.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"12494"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\nimport pytest\nimport pytz\nimport collections\nfrom collections import OrderedDict, defaultdict\nimport numpy as np\n\nfrom pandas import compat\nfrom pandas.compat import long\nfrom pandas import (DataFrame, Series, MultiIndex, Timestamp,\n date_range)\n\nimport pandas.util.testing as tm\nfrom pandas.tests.frame.common import TestData\n\n\nclass TestDataFrameConvertTo(TestData):\n\n def test_to_dict_timestamp(self):\n\n # GH11247\n # split/records producing np.datetime64 rather than Timestamps\n # on datetime64[ns] dtypes only\n\n tsmp = Timestamp('20130101')\n test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})\n test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})\n\n expected_records = [{'A': tsmp, 'B': tsmp},\n {'A': tsmp, 'B': tsmp}]\n expected_records_mixed = [{'A': tsmp, 'B': 1},\n {'A': tsmp, 'B': 2}]\n\n assert (test_data.to_dict(orient='records') ==\n expected_records)\n assert (test_data_mixed.to_dict(orient='records') ==\n expected_records_mixed)\n\n expected_series = {\n 'A': Series([tsmp, tsmp], name='A'),\n 'B': Series([tsmp, tsmp], name='B'),\n }\n expected_series_mixed = {\n 'A': Series([tsmp, tsmp], name='A'),\n 'B': Series([1, 2], name='B'),\n }\n\n tm.assert_dict_equal(test_data.to_dict(orient='series'),\n expected_series)\n tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),\n expected_series_mixed)\n\n expected_split = {\n 'index': [0, 1],\n 'data': [[tsmp, tsmp],\n [tsmp, tsmp]],\n 'columns': ['A', 'B']\n }\n expected_split_mixed = {\n 'index': [0, 1],\n 'data': [[tsmp, 1],\n [tsmp, 2]],\n 'columns': ['A', 'B']\n }\n\n tm.assert_dict_equal(test_data.to_dict(orient='split'),\n expected_split)\n tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),\n expected_split_mixed)\n\n def test_to_dict_invalid_orient(self):\n df = DataFrame({'A': [0, 1]})\n pytest.raises(ValueError, df.to_dict, orient='xinvalid')\n\n def test_to_records_dt64(self):\n df = DataFrame([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]],\n index=date_range(\"2012-01-01\", \"2012-01-02\"))\n\n # convert_datetime64 defaults to None\n expected = df.index.values[0]\n result = df.to_records()['index'][0]\n assert expected == result\n\n # check for FutureWarning if convert_datetime64=False is passed\n with tm.assert_produces_warning(FutureWarning):\n expected = df.index.values[0]\n result = df.to_records(convert_datetime64=False)['index'][0]\n assert expected == result\n\n # check for FutureWarning if convert_datetime64=True is passed\n with tm.assert_produces_warning(FutureWarning):\n expected = df.index[0]\n result = df.to_records(convert_datetime64=True)['index'][0]\n assert expected == result\n\n def test_to_records_with_multindex(self):\n # GH3189\n index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n data = np.zeros((8, 4))\n df = DataFrame(data, index=index)\n r = df.to_records(index=True)['level_0']\n assert 'bar' in r\n assert 'one' not in r\n\n def test_to_records_with_Mapping_type(self):\n import email\n from email.parser import Parser\n import collections\n\n collections.Mapping.register(email.message.Message)\n\n headers = Parser().parsestr('From: \\n'\n 'To: \\n'\n 'Subject: Test message\\n'\n '\\n'\n 'Body would go here\\n')\n\n frame = DataFrame.from_records([headers])\n all(x in frame for x in ['Type', 'Subject', 'From'])\n\n def test_to_records_floats(self):\n df = DataFrame(np.random.rand(10, 10))\n df.to_records()\n\n def test_to_records_index_name(self):\n df = DataFrame(np.random.randn(3, 3))\n df.index.name = 'X'\n rs = df.to_records()\n assert 'X' in rs.dtype.fields\n\n df = DataFrame(np.random.randn(3, 3))\n rs = df.to_records()\n assert 'index' in rs.dtype.fields\n\n df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])\n df.index.names = ['A', None]\n rs = df.to_records()\n assert 'level_0' in rs.dtype.fields\n\n def test_to_records_with_unicode_index(self):\n # GH13172\n # unicode_literals conflict with to_records\n result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\\\n .to_records()\n expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])\n tm.assert_almost_equal(result, expected)\n\n def test_to_records_with_unicode_column_names(self):\n # xref issue: https://github.com/numpy/numpy/issues/2407\n # Issue #11879. to_records used to raise an exception when used\n # with column names containing non-ascii characters in Python 2\n result = DataFrame(data={u\"accented_name_é\": [1.0]}).to_records()\n\n # Note that numpy allows for unicode field names but dtypes need\n # to be specified using dictionary instead of list of tuples.\n expected = np.rec.array(\n [(0, 1.0)],\n dtype={\"names\": [\"index\", u\"accented_name_é\"],\n \"formats\": ['=i8', '=f8']}\n )\n tm.assert_almost_equal(result, expected)\n\n def test_to_records_with_categorical(self):\n\n # GH8626\n\n # dict creation\n df = DataFrame({'A': list('abc')}, dtype='category')\n expected = Series(list('abc'), dtype='category', name='A')\n tm.assert_series_equal(df['A'], expected)\n\n # list-like creation\n df = DataFrame(list('abc'), dtype='category')\n expected = Series(list('abc'), dtype='category', name=0)\n tm.assert_series_equal(df[0], expected)\n\n # to record array\n # this coerces\n result = df.to_records()\n expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],\n dtype=[('index', '=i8'), ('0', 'O')])\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize('mapping', [\n dict,\n collections.defaultdict(list),\n collections.OrderedDict])\n def test_to_dict(self, mapping):\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n\n # GH16122\n recons_data = DataFrame(test_data).to_dict(into=mapping)\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n assert (v2 == recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"l\", mapping)\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n assert (v2 == recons_data[k][int(k2) - 1])\n\n recons_data = DataFrame(test_data).to_dict(\"s\", mapping)\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n assert (v2 == recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"sp\", mapping)\n expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],\n 'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}\n tm.assert_dict_equal(recons_data, expected_split)\n\n recons_data = DataFrame(test_data).to_dict(\"r\", mapping)\n expected_records = [{'A': 1.0, 'B': '1'},\n {'A': 2.0, 'B': '2'},\n {'A': np.nan, 'B': '3'}]\n assert isinstance(recons_data, list)\n assert (len(recons_data) == 3)\n for l, r in zip(recons_data, expected_records):\n tm.assert_dict_equal(l, r)\n\n # GH10844\n recons_data = DataFrame(test_data).to_dict(\"i\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n assert (v2 == recons_data[k2][k])\n\n df = DataFrame(test_data)\n df['duped'] = df[df.columns[0]]\n recons_data = df.to_dict(\"i\")\n comp_data = test_data.copy()\n comp_data['duped'] = comp_data[df.columns[0]]\n for k, v in compat.iteritems(comp_data):\n for k2, v2 in compat.iteritems(v):\n assert (v2 == recons_data[k2][k])\n\n @pytest.mark.parametrize('mapping', [\n list,\n collections.defaultdict,\n []])\n def test_to_dict_errors(self, mapping):\n # GH16122\n df = DataFrame(np.random.randn(3, 3))\n with pytest.raises(TypeError):\n df.to_dict(into=mapping)\n\n def test_to_dict_not_unique_warning(self):\n # GH16927: When converting to a dict, if a column has a non-unique name\n # it will be dropped, throwing a warning.\n df = DataFrame([[1, 2, 3]], columns=['a', 'a', 'b'])\n with tm.assert_produces_warning(UserWarning):\n df.to_dict()\n\n @pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])\n def test_to_records_datetimeindex_with_tz(self, tz):\n # GH13937\n dr = date_range('2016-01-01', periods=10,\n freq='S', tz=tz)\n\n df = DataFrame({'datetime': dr}, index=dr)\n\n expected = df.to_records()\n result = df.tz_convert(\"UTC\").to_records()\n\n # both converted to UTC, so they are equal\n tm.assert_numpy_array_equal(result, expected)\n\n def test_to_dict_box_scalars(self):\n # 14216\n # make sure that we are boxing properly\n d = {'a': [1], 'b': ['b']}\n\n result = DataFrame(d).to_dict()\n assert isinstance(list(result['a'])[0], (int, long))\n assert isinstance(list(result['b'])[0], (int, long))\n\n result = DataFrame(d).to_dict(orient='records')\n assert isinstance(result[0]['a'], (int, long))\n\n def test_frame_to_dict_tz(self):\n # GH18372 When converting to dict with orient='records' columns of\n # datetime that are tz-aware were not converted to required arrays\n data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),\n (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)]\n df = DataFrame(list(data), columns=[\"d\", ])\n\n result = df.to_dict(orient='records')\n expected = [\n {'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)},\n {'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)},\n ]\n tm.assert_dict_equal(result[0], expected[0])\n tm.assert_dict_equal(result[1], expected[1])\n\n @pytest.mark.parametrize('into, expected', [\n (dict, {0: {'int_col': 1, 'float_col': 1.0},\n 1: {'int_col': 2, 'float_col': 2.0},\n 2: {'int_col': 3, 'float_col': 3.0}}),\n (OrderedDict, OrderedDict([(0, {'int_col': 1, 'float_col': 1.0}),\n (1, {'int_col': 2, 'float_col': 2.0}),\n (2, {'int_col': 3, 'float_col': 3.0})])),\n (defaultdict(list), defaultdict(list,\n {0: {'int_col': 1, 'float_col': 1.0},\n 1: {'int_col': 2, 'float_col': 2.0},\n 2: {'int_col': 3, 'float_col': 3.0}}))\n ])\n def test_to_dict_index_dtypes(self, into, expected):\n # GH 18580\n # When using to_dict(orient='index') on a dataframe with int\n # and float columns only the int columns were cast to float\n\n df = DataFrame({'int_col': [1, 2, 3],\n 'float_col': [1.0, 2.0, 3.0]})\n\n result = df.to_dict(orient='index', into=into)\n cols = ['int_col', 'float_col']\n result = DataFrame.from_dict(result, orient='index')[cols]\n expected = DataFrame.from_dict(expected, orient='index')[cols]\n tm.assert_frame_equal(result, expected)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382667,"cells":{"repo_name":{"kind":"string","value":"rrohan/scikit-learn"},"path":{"kind":"string","value":"examples/model_selection/plot_validation_curve.py"},"copies":{"kind":"string","value":"229"},"size":{"kind":"string","value":"1823"},"content":{"kind":"string","value":"\"\"\"\n==========================\nPlotting Validation Curves\n==========================\n\nIn this plot you can see the training scores and validation scores of an SVM\nfor different values of the kernel parameter gamma. For very low values of\ngamma, you can see that both the training score and the validation score are\nlow. This is called underfitting. Medium values of gamma will result in high\nvalues for both scores, i.e. the classifier is performing fairly well. If gamma\nis too high, the classifier will overfit, which means that the training score\nis good but the validation score is poor.\n\"\"\"\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.datasets import load_digits\nfrom sklearn.svm import SVC\nfrom sklearn.learning_curve import validation_curve\n\ndigits = load_digits()\nX, y = digits.data, digits.target\n\nparam_range = np.logspace(-6, -1, 5)\ntrain_scores, test_scores = validation_curve(\n SVC(), X, y, param_name=\"gamma\", param_range=param_range,\n cv=10, scoring=\"accuracy\", n_jobs=1)\ntrain_scores_mean = np.mean(train_scores, axis=1)\ntrain_scores_std = np.std(train_scores, axis=1)\ntest_scores_mean = np.mean(test_scores, axis=1)\ntest_scores_std = np.std(test_scores, axis=1)\n\nplt.title(\"Validation Curve with SVM\")\nplt.xlabel(\"$\\gamma$\")\nplt.ylabel(\"Score\")\nplt.ylim(0.0, 1.1)\nplt.semilogx(param_range, train_scores_mean, label=\"Training score\", color=\"r\")\nplt.fill_between(param_range, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.2, color=\"r\")\nplt.semilogx(param_range, test_scores_mean, label=\"Cross-validation score\",\n color=\"g\")\nplt.fill_between(param_range, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.2, color=\"g\")\nplt.legend(loc=\"best\")\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382668,"cells":{"repo_name":{"kind":"string","value":"slipguru/palladio"},"path":{"kind":"string","value":"palladio/config_templates/default_config.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2485"},"content":{"kind":"string","value":"# Configuration file example for PALLADIO\n# version: 2.0\n\nimport numpy as np\n\nfrom sklearn.feature_selection import RFE\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import GridSearchCV\n\nfrom palladio import datasets\n\nimport os\n\n#####################\n# DATASET PATHS ###\n#####################\n\n# * All the path are w.r.t. config file path\n\n# The list of all files required for the experiments\n\ndata_path = 'data/gedm.csv'\ntarget_path = 'data/labels.csv'\n\n# pandas.read_csv options\ndata_loading_options = {\n 'delimiter': ',',\n 'header': 0,\n 'index_col': 0\n}\ntarget_loading_options = data_loading_options\n\ndataset = datasets.load_csv(os.path.join(os.path.dirname(__file__),data_path),\n os.path.join(os.path.dirname(__file__),target_path),\n data_loading_options=data_loading_options,\n target_loading_options=target_loading_options,\n samples_on='col')\n\ndata, labels = dataset.data, dataset.target\nfeature_names = dataset.feature_names\n\n#######################\n# SESSION OPTIONS ###\n#######################\n\nsession_folder = 'palladio_test_session'\n\n# The learning task, if None palladio tries to guess it\n# [see sklearn.utils.multiclass.type_of_target]\nlearning_task = None\n\n# The number of repetitions of 'regular' experiments\nn_splits_regular = 50\n\n# The number of repetitions of 'permutation' experiments\nn_splits_permutation = 50\n\n#######################\n# LEARNER OPTIONS ###\n#######################\n\nmodel = RFE(LinearSVC(loss='hinge'), step=0.3)\n\n# Set the estimator to be a GridSearchCV\nparam_grid = {\n 'n_features_to_select': [10, 20, 50],\n 'estimator__C': np.logspace(-4, 0, 5),\n}\n\nestimator = GridSearchCV(model, param_grid=param_grid, cv=3, scoring='accuracy', n_jobs=1)\n\n# Set options for ModelAssessment\nma_options = {\n 'test_size': 0.25,\n 'scoring': 'accuracy',\n 'n_jobs': -1,\n 'n_splits': n_splits_regular\n}\n\n# For the Pipeline object, indicate the name of the step from which to\n# retrieve the list of selected features\n# For a single estimator which has a `coef_` attributes (e.g., elastic net or\n# lasso) set to True\nvs_analysis = True\n\n# ~~ Signature Parameters\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfrequency_threshold = 0.75\n\n# ~~ Plotting Options\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nscore_surfaces_options = {\n 'logspace': ['estimator__C'],\n 'plot_errors': True\n}\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382669,"cells":{"repo_name":{"kind":"string","value":"DonBeo/scikit-learn"},"path":{"kind":"string","value":"sklearn/svm/tests/test_sparse.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"10550"},"content":{"kind":"string","value":"from nose.tools import assert_raises, assert_true, assert_false\n\nimport numpy as np\nfrom scipy import sparse\nfrom numpy.testing import (assert_array_almost_equal, assert_array_equal,\n assert_equal)\n\nfrom sklearn import datasets, svm, linear_model, base\nfrom sklearn.datasets import make_classification, load_digits\nfrom sklearn.svm.tests import test_svm\nfrom sklearn.utils import ConvergenceWarning\nfrom sklearn.utils.extmath import safe_sparse_dot\nfrom sklearn.utils.testing import assert_warns\n\n# test sample 1\nX = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])\nX_sp = sparse.lil_matrix(X)\nY = [1, 1, 1, 2, 2, 2]\nT = np.array([[-1, -1], [2, 2], [3, 2]])\ntrue_result = [1, 2, 2]\n\n# test sample 2\nX2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],\n [0, 0, 2], [3, 3, 3]])\nX2_sp = sparse.dok_matrix(X2)\nY2 = [1, 2, 2, 2, 3]\nT2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])\ntrue_result2 = [1, 2, 3]\n\n\niris = datasets.load_iris()\n# permute\nrng = np.random.RandomState(0)\nperm = rng.permutation(iris.target.size)\niris.data = iris.data[perm]\niris.target = iris.target[perm]\n# sparsify\niris.data = sparse.csr_matrix(iris.data)\n\n\ndef test_svc():\n # Check that sparse SVC gives the same result as SVC\n\n clf = svm.SVC(kernel='linear', probability=True, random_state=0)\n clf.fit(X, Y)\n sp_clf = svm.SVC(kernel='linear', probability=True, random_state=0)\n sp_clf.fit(X_sp, Y)\n\n assert_array_equal(sp_clf.predict(T), true_result)\n\n assert_true(sparse.issparse(sp_clf.support_vectors_))\n assert_array_almost_equal(clf.support_vectors_,\n sp_clf.support_vectors_.toarray())\n\n assert_true(sparse.issparse(sp_clf.dual_coef_))\n assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())\n\n assert_true(sparse.issparse(sp_clf.coef_))\n assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())\n assert_array_almost_equal(clf.support_, sp_clf.support_)\n assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))\n\n # refit with a different dataset\n clf.fit(X2, Y2)\n sp_clf.fit(X2_sp, Y2)\n assert_array_almost_equal(clf.support_vectors_,\n sp_clf.support_vectors_.toarray())\n assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())\n assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())\n assert_array_almost_equal(clf.support_, sp_clf.support_)\n assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))\n assert_array_almost_equal(clf.predict_proba(T2),\n sp_clf.predict_proba(T2), 4)\n\n\ndef test_unsorted_indices():\n # test that the result with sorted and unsorted indices in csr is the same\n # we use a subset of digits as iris, blobs or make_classification didn't\n # show the problem\n digits = load_digits()\n X, y = digits.data[:50], digits.target[:50]\n X_test = sparse.csr_matrix(digits.data[50:100])\n\n X_sparse = sparse.csr_matrix(X)\n coef_dense = svm.SVC(kernel='linear', probability=True,\n random_state=0).fit(X, y).coef_\n sparse_svc = svm.SVC(kernel='linear', probability=True,\n random_state=0).fit(X_sparse, y)\n coef_sorted = sparse_svc.coef_\n # make sure dense and sparse SVM give the same result\n assert_array_almost_equal(coef_dense, coef_sorted.toarray())\n\n X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]\n X_test_unsorted = X_test[np.arange(X_test.shape[0])]\n\n # make sure we scramble the indices\n assert_false(X_sparse_unsorted.has_sorted_indices)\n assert_false(X_test_unsorted.has_sorted_indices)\n\n unsorted_svc = svm.SVC(kernel='linear', probability=True,\n random_state=0).fit(X_sparse_unsorted, y)\n coef_unsorted = unsorted_svc.coef_\n # make sure unsorted indices give same result\n assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())\n assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),\n sparse_svc.predict_proba(X_test))\n\n\ndef test_svc_with_custom_kernel():\n kfunc = lambda x, y: safe_sparse_dot(x, y.T)\n clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)\n clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)\n assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))\n\n\ndef test_svc_iris():\n # Test the sparse SVC with the iris dataset\n for k in ('linear', 'poly', 'rbf'):\n sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)\n clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)\n\n assert_array_almost_equal(clf.support_vectors_,\n sp_clf.support_vectors_.toarray())\n assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())\n assert_array_almost_equal(\n clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))\n if k == 'linear':\n assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())\n\n\ndef test_error():\n # Test that it gives proper exception on deficient input\n # impossible value of C\n assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)\n\n # impossible value of nu\n clf = svm.NuSVC(nu=0.0)\n assert_raises(ValueError, clf.fit, X_sp, Y)\n\n Y2 = Y[:-1] # wrong dimensions for labels\n assert_raises(ValueError, clf.fit, X_sp, Y2)\n\n clf = svm.SVC()\n clf.fit(X_sp, Y)\n assert_array_equal(clf.predict(T), true_result)\n\n\ndef test_linearsvc():\n # Similar to test_SVC\n clf = svm.LinearSVC(random_state=0).fit(X, Y)\n sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)\n\n assert_true(sp_clf.fit_intercept)\n\n assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)\n assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)\n\n assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))\n\n clf.fit(X2, Y2)\n sp_clf.fit(X2_sp, Y2)\n\n assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)\n assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)\n\n\ndef test_linearsvc_iris():\n # Test the sparse LinearSVC with the iris dataset\n\n sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)\n clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)\n\n assert_equal(clf.fit_intercept, sp_clf.fit_intercept)\n\n assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)\n assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)\n assert_array_almost_equal(\n clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))\n\n # check decision_function\n pred = np.argmax(sp_clf.decision_function(iris.data), 1)\n assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))\n\n # sparsify the coefficients on both models and check that they still\n # produce the same results\n clf.sparsify()\n assert_array_equal(pred, clf.predict(iris.data))\n sp_clf.sparsify()\n assert_array_equal(pred, sp_clf.predict(iris.data))\n\n\ndef test_weight():\n # Test class weights\n X_, y_ = make_classification(n_samples=200, n_features=100,\n weights=[0.833, 0.167], random_state=0)\n\n X_ = sparse.csr_matrix(X_)\n for clf in (linear_model.LogisticRegression(),\n svm.LinearSVC(random_state=0),\n svm.SVC()):\n clf.set_params(class_weight={0: 5})\n clf.fit(X_[:180], y_[:180])\n y_pred = clf.predict(X_[180:])\n assert_true(np.sum(y_pred == y_[180:]) >= 11)\n\n\ndef test_sample_weights():\n # Test weights on individual samples\n clf = svm.SVC()\n clf.fit(X_sp, Y)\n assert_array_equal(clf.predict(X[2]), [1.])\n\n sample_weight = [.1] * 3 + [10] * 3\n clf.fit(X_sp, Y, sample_weight=sample_weight)\n assert_array_equal(clf.predict(X[2]), [2.])\n\n\ndef test_sparse_liblinear_intercept_handling():\n # Test that sparse liblinear honours intercept_scaling param\n test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)\n\n\ndef test_sparse_realdata():\n # Test on a subset from the 20newsgroups dataset.\n # This catchs some bugs if input is not correctly converted into\n # sparse format or weights are not correctly initialized.\n\n data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])\n indices = np.array([6, 5, 35, 31])\n indptr = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])\n X = sparse.csr_matrix((data, indices, indptr))\n y = np.array(\n [1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,\n 0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,\n 0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,\n 3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,\n 0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,\n 3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,\n 1., 3.])\n\n clf = svm.SVC(kernel='linear').fit(X.toarray(), y)\n sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)\n\n assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())\n assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())\n\n\ndef test_sparse_svc_clone_with_callable_kernel():\n # Test that the \"dense_fit\" is called even though we use sparse input\n # meaning that everything works fine.\n a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,\n random_state=0)\n b = base.clone(a)\n\n b.fit(X_sp, Y)\n pred = b.predict(X_sp)\n b.predict_proba(X_sp)\n\n dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),\n probability=True, random_state=0)\n pred_dense = dense_svm.fit(X, Y).predict(X)\n assert_array_equal(pred_dense, pred)\n # b.decision_function(X_sp) # XXX : should be supported\n\n\ndef test_timeout():\n sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,\n random_state=0, max_iter=1)\n\n assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)\n\n\ndef test_consistent_proba():\n a = svm.SVC(probability=True, max_iter=1, random_state=0)\n proba_1 = a.fit(X, Y).predict_proba(X)\n a = svm.SVC(probability=True, max_iter=1, random_state=0)\n proba_2 = a.fit(X, Y).predict_proba(X)\n assert_array_almost_equal(proba_1, proba_2)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382670,"cells":{"repo_name":{"kind":"string","value":"Kongsea/tensorflow"},"path":{"kind":"string","value":"tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py"},"copies":{"kind":"string","value":"10"},"size":{"kind":"string","value":"12872"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Estimator input.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport tempfile\n\nimport numpy as np\n\nfrom tensorflow.python.training import training_util\nfrom tensorflow.contrib.layers.python.layers import optimizers\nfrom tensorflow.contrib.learn.python.learn import metric_spec\nfrom tensorflow.contrib.learn.python.learn import models\nfrom tensorflow.contrib.learn.python.learn.datasets import base\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn\nfrom tensorflow.contrib.metrics.python.ops import metric_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import input as input_lib\nfrom tensorflow.python.training import queue_runner_impl\n\n\n_BOSTON_INPUT_DIM = 13\n_IRIS_INPUT_DIM = 4\n\n\ndef boston_input_fn(num_epochs=None):\n boston = base.load_boston()\n features = input_lib.limit_epochs(\n array_ops.reshape(\n constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),\n num_epochs=num_epochs)\n labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])\n return features, labels\n\n\ndef boston_input_fn_with_queue(num_epochs=None):\n features, labels = boston_input_fn(num_epochs=num_epochs)\n\n # Create a minimal queue runner.\n fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)\n queue_runner = queue_runner_impl.QueueRunner(fake_queue,\n [constant_op.constant(0)])\n queue_runner_impl.add_queue_runner(queue_runner)\n\n return features, labels\n\n\ndef iris_input_fn():\n iris = base.load_iris()\n features = array_ops.reshape(\n constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])\n labels = array_ops.reshape(constant_op.constant(iris.target), [-1])\n return features, labels\n\n\ndef iris_input_fn_labels_dict():\n iris = base.load_iris()\n features = array_ops.reshape(\n constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])\n labels = {\n 'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])\n }\n return features, labels\n\n\ndef boston_eval_fn():\n boston = base.load_boston()\n n_examples = len(boston.target)\n features = array_ops.reshape(\n constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])\n labels = array_ops.reshape(\n constant_op.constant(boston.target), [n_examples, 1])\n return array_ops.concat([features, features], 0), array_ops.concat(\n [labels, labels], 0)\n\n\ndef extract(data, key):\n if isinstance(data, dict):\n assert key in data\n return data[key]\n else:\n return data\n\n\ndef linear_model_params_fn(features, labels, mode, params):\n features = extract(features, 'input')\n labels = extract(labels, 'labels')\n\n assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,\n model_fn.ModeKeys.INFER)\n prediction, loss = (models.linear_regression_zero_init(features, labels))\n train_op = optimizers.optimize_loss(\n loss,\n training_util.get_global_step(),\n optimizer='Adagrad',\n learning_rate=params['learning_rate'])\n return prediction, loss, train_op\n\n\ndef linear_model_fn(features, labels, mode):\n features = extract(features, 'input')\n labels = extract(labels, 'labels')\n assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,\n model_fn.ModeKeys.INFER)\n if isinstance(features, dict):\n (_, features), = features.items()\n prediction, loss = (models.linear_regression_zero_init(features, labels))\n train_op = optimizers.optimize_loss(\n loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)\n return prediction, loss, train_op\n\n\ndef linear_model_fn_with_model_fn_ops(features, labels, mode):\n \"\"\"Same as linear_model_fn, but returns `ModelFnOps`.\"\"\"\n assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,\n model_fn.ModeKeys.INFER)\n prediction, loss = (models.linear_regression_zero_init(features, labels))\n train_op = optimizers.optimize_loss(\n loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)\n return model_fn.ModelFnOps(\n mode=mode, predictions=prediction, loss=loss, train_op=train_op)\n\n\ndef logistic_model_no_mode_fn(features, labels):\n features = extract(features, 'input')\n labels = extract(labels, 'labels')\n labels = array_ops.one_hot(labels, 3, 1, 0)\n prediction, loss = (models.logistic_regression_zero_init(features, labels))\n train_op = optimizers.optimize_loss(\n loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)\n return {\n 'class': math_ops.argmax(prediction, 1),\n 'prob': prediction\n }, loss, train_op\n\n\nVOCAB_FILE_CONTENT = 'emerson\\nlake\\npalmer\\n'\nEXTRA_FILE_CONTENT = 'kermit\\npiggy\\nralph\\n'\n\n\nclass EstimatorInputTest(test.TestCase):\n\n def testContinueTrainingDictionaryInput(self):\n boston = base.load_boston()\n output_dir = tempfile.mkdtemp()\n est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)\n boston_input = {'input': boston.data}\n float64_target = {'labels': boston.target.astype(np.float64)}\n est.fit(x=boston_input, y=float64_target, steps=50)\n scores = est.evaluate(\n x=boston_input,\n y=float64_target,\n metrics={'MSE': metric_ops.streaming_mean_squared_error})\n del est\n # Create another estimator object with the same output dir.\n est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)\n\n # Check we can evaluate and predict.\n scores2 = est2.evaluate(\n x=boston_input,\n y=float64_target,\n metrics={'MSE': metric_ops.streaming_mean_squared_error})\n self.assertAllClose(scores2['MSE'], scores['MSE'])\n predictions = np.array(list(est2.predict(x=boston_input)))\n other_score = _sklearn.mean_squared_error(predictions,\n float64_target['labels'])\n self.assertAllClose(other_score, scores['MSE'])\n\n def testBostonAll(self):\n boston = base.load_boston()\n est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))\n float64_labels = boston.target.astype(np.float64)\n est.fit(x=boston.data, y=float64_labels, steps=100)\n scores = est.score(\n x=boston.data,\n y=float64_labels,\n metrics={'MSE': metric_ops.streaming_mean_squared_error})\n predictions = np.array(list(est.predict(x=boston.data)))\n other_score = _sklearn.mean_squared_error(predictions, boston.target)\n self.assertAllClose(scores['MSE'], other_score)\n self.assertTrue('global_step' in scores)\n self.assertEqual(100, scores['global_step'])\n\n def testBostonAllDictionaryInput(self):\n boston = base.load_boston()\n est = estimator.Estimator(model_fn=linear_model_fn)\n boston_input = {'input': boston.data}\n float64_target = {'labels': boston.target.astype(np.float64)}\n est.fit(x=boston_input, y=float64_target, steps=100)\n scores = est.evaluate(\n x=boston_input,\n y=float64_target,\n metrics={'MSE': metric_ops.streaming_mean_squared_error})\n predictions = np.array(list(est.predict(x=boston_input)))\n other_score = _sklearn.mean_squared_error(predictions, boston.target)\n self.assertAllClose(other_score, scores['MSE'])\n self.assertTrue('global_step' in scores)\n self.assertEqual(scores['global_step'], 100)\n\n def testIrisAll(self):\n iris = base.load_iris()\n est = estimator.SKCompat(\n estimator.Estimator(model_fn=logistic_model_no_mode_fn))\n est.fit(iris.data, iris.target, steps=100)\n scores = est.score(\n x=iris.data,\n y=iris.target,\n metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})\n predictions = est.predict(x=iris.data)\n predictions_class = est.predict(x=iris.data, outputs=['class'])['class']\n self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])\n self.assertAllClose(predictions['class'], predictions_class)\n self.assertAllClose(\n predictions['class'], np.argmax(\n predictions['prob'], axis=1))\n other_score = _sklearn.accuracy_score(iris.target, predictions['class'])\n self.assertAllClose(scores['accuracy'], other_score)\n self.assertTrue('global_step' in scores)\n self.assertEqual(100, scores['global_step'])\n\n def testIrisAllDictionaryInput(self):\n iris = base.load_iris()\n est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)\n iris_data = {'input': iris.data}\n iris_target = {'labels': iris.target}\n est.fit(iris_data, iris_target, steps=100)\n scores = est.evaluate(\n x=iris_data,\n y=iris_target,\n metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})\n predictions = list(est.predict(x=iris_data))\n predictions_class = list(est.predict(x=iris_data, outputs=['class']))\n self.assertEqual(len(predictions), iris.target.shape[0])\n classes_batch = np.array([p['class'] for p in predictions])\n self.assertAllClose(classes_batch,\n np.array([p['class'] for p in predictions_class]))\n self.assertAllClose(\n classes_batch,\n np.argmax(\n np.array([p['prob'] for p in predictions]), axis=1))\n other_score = _sklearn.accuracy_score(iris.target, classes_batch)\n self.assertAllClose(other_score, scores['accuracy'])\n self.assertTrue('global_step' in scores)\n self.assertEqual(scores['global_step'], 100)\n\n def testIrisInputFn(self):\n iris = base.load_iris()\n est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)\n est.fit(input_fn=iris_input_fn, steps=100)\n _ = est.evaluate(input_fn=iris_input_fn, steps=1)\n predictions = list(est.predict(x=iris.data))\n self.assertEqual(len(predictions), iris.target.shape[0])\n\n def testIrisInputFnLabelsDict(self):\n iris = base.load_iris()\n est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)\n est.fit(input_fn=iris_input_fn_labels_dict, steps=100)\n _ = est.evaluate(\n input_fn=iris_input_fn_labels_dict,\n steps=1,\n metrics={\n 'accuracy':\n metric_spec.MetricSpec(\n metric_fn=metric_ops.streaming_accuracy,\n prediction_key='class',\n label_key='labels')\n })\n predictions = list(est.predict(x=iris.data))\n self.assertEqual(len(predictions), iris.target.shape[0])\n\n def testTrainInputFn(self):\n est = estimator.Estimator(model_fn=linear_model_fn)\n est.fit(input_fn=boston_input_fn, steps=1)\n _ = est.evaluate(input_fn=boston_eval_fn, steps=1)\n\n def testPredictInputFn(self):\n est = estimator.Estimator(model_fn=linear_model_fn)\n boston = base.load_boston()\n est.fit(input_fn=boston_input_fn, steps=1)\n input_fn = functools.partial(boston_input_fn, num_epochs=1)\n output = list(est.predict(input_fn=input_fn))\n self.assertEqual(len(output), boston.target.shape[0])\n\n def testPredictInputFnWithQueue(self):\n est = estimator.Estimator(model_fn=linear_model_fn)\n boston = base.load_boston()\n est.fit(input_fn=boston_input_fn, steps=1)\n input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)\n output = list(est.predict(input_fn=input_fn))\n self.assertEqual(len(output), boston.target.shape[0] * 2)\n\n def testPredictConstInputFn(self):\n est = estimator.Estimator(model_fn=linear_model_fn)\n boston = base.load_boston()\n est.fit(input_fn=boston_input_fn, steps=1)\n\n def input_fn():\n features = array_ops.reshape(\n constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])\n labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])\n return features, labels\n\n output = list(est.predict(input_fn=input_fn))\n self.assertEqual(len(output), boston.target.shape[0])\n\n\nif __name__ == '__main__':\n test.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382671,"cells":{"repo_name":{"kind":"string","value":"arabenjamin/scikit-learn"},"path":{"kind":"string","value":"doc/datasets/mldata_fixture.py"},"copies":{"kind":"string","value":"367"},"size":{"kind":"string","value":"1183"},"content":{"kind":"string","value":"\"\"\"Fixture module to skip the datasets loading when offline\n\nMock urllib2 access to mldata.org and create a temporary data folder.\n\"\"\"\n\nfrom os import makedirs\nfrom os.path import join\nimport numpy as np\nimport tempfile\nimport shutil\n\nfrom sklearn import datasets\nfrom sklearn.utils.testing import install_mldata_mock\nfrom sklearn.utils.testing import uninstall_mldata_mock\n\n\ndef globs(globs):\n # Create a temporary folder for the data fetcher\n global custom_data_home\n custom_data_home = tempfile.mkdtemp()\n makedirs(join(custom_data_home, 'mldata'))\n globs['custom_data_home'] = custom_data_home\n return globs\n\n\ndef setup_module():\n # setup mock urllib2 module to avoid downloading from mldata.org\n install_mldata_mock({\n 'mnist-original': {\n 'data': np.empty((70000, 784)),\n 'label': np.repeat(np.arange(10, dtype='d'), 7000),\n },\n 'iris': {\n 'data': np.empty((150, 4)),\n },\n 'datasets-uci-iris': {\n 'double0': np.empty((150, 4)),\n 'class': np.empty((150,)),\n },\n })\n\n\ndef teardown_module():\n uninstall_mldata_mock()\n shutil.rmtree(custom_data_home)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382672,"cells":{"repo_name":{"kind":"string","value":"mo-g/iris"},"path":{"kind":"string","value":"lib/iris/quickplot.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"8992"},"content":{"kind":"string","value":"# (C) British Crown Copyright 2010 - 2015, Met Office\n#\n# This file is part of Iris.\n#\n# Iris is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Iris is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with Iris. If not, see .\n\"\"\"\nHigh-level plotting extensions to :mod:`iris.plot`.\n\nThese routines work much like their :mod:`iris.plot` counterparts, but they\nautomatically add a plot title, axis titles, and a colour bar when appropriate.\n\nSee also: :ref:`matplotlib `.\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\nfrom six.moves import (filter, input, map, range, zip) # noqa\n\nimport cf_units\nimport matplotlib.pyplot as plt\n\nimport iris.config\nimport iris.coords\nimport iris.plot as iplt\n\n\ndef _use_symbol(units):\n # For non-time units use the shortest unit representation.\n # E.g. prefer 'K' over 'kelvin', but not '0.0174532925199433 rad'\n # over 'degrees'\n return (not units.is_time() and\n not units.is_time_reference() and\n len(units.symbol) < len(str(units)))\n\n\ndef _title(cube_or_coord, with_units):\n if cube_or_coord is None:\n title = ''\n else:\n title = cube_or_coord.name().replace('_', ' ').capitalize()\n units = cube_or_coord.units\n if with_units and not (units.is_unknown() or\n units.is_no_unit() or\n units == cf_units.Unit('1')):\n\n if _use_symbol(units):\n units = units.symbol\n title += ' / {}'.format(units)\n\n return title\n\n\ndef _label(cube, mode, result=None, ndims=2, coords=None):\n \"\"\"Puts labels on the current plot using the given cube.\"\"\"\n\n plt.title(_title(cube, with_units=False))\n\n if result is not None:\n draw_edges = mode == iris.coords.POINT_MODE\n bar = plt.colorbar(result, orientation='horizontal',\n drawedges=draw_edges)\n has_known_units = not (cube.units.is_unknown() or\n cube.units.is_no_unit())\n if has_known_units and cube.units != cf_units.Unit('1'):\n # Use shortest unit representation for anything other than time\n if _use_symbol(cube.units):\n bar.set_label(cube.units.symbol)\n else:\n bar.set_label(cube.units)\n # Remove the tick which is put on the colorbar by default.\n bar.ax.tick_params(length=0)\n\n if coords is None:\n plot_defn = iplt._get_plot_defn(cube, mode, ndims)\n else:\n plot_defn = iplt._get_plot_defn_custom_coords_picked(\n cube, coords, mode, ndims=ndims)\n\n if ndims == 2:\n if not iplt._can_draw_map(plot_defn.coords):\n plt.ylabel(_title(plot_defn.coords[0], with_units=True))\n plt.xlabel(_title(plot_defn.coords[1], with_units=True))\n elif ndims == 1:\n plt.xlabel(_title(plot_defn.coords[0], with_units=True))\n plt.ylabel(_title(cube, with_units=True))\n else:\n msg = 'Unexpected number of dimensions (%s) given to _label.' % ndims\n raise ValueError(msg)\n\n\ndef _label_with_bounds(cube, result=None, ndims=2, coords=None):\n _label(cube, iris.coords.BOUND_MODE, result, ndims, coords)\n\n\ndef _label_with_points(cube, result=None, ndims=2, coords=None):\n _label(cube, iris.coords.POINT_MODE, result, ndims, coords)\n\n\ndef _get_titles(u_object, v_object):\n if u_object is None:\n u_object = iplt._u_object_from_v_object(v_object)\n xunits = u_object is not None and not u_object.units.is_time_reference()\n yunits = not v_object.units.is_time_reference()\n xlabel = _title(u_object, with_units=xunits)\n ylabel = _title(v_object, with_units=yunits)\n title = ''\n if u_object is None:\n title = _title(v_object, with_units=False)\n elif isinstance(u_object, iris.cube.Cube) and \\\n not isinstance(v_object, iris.cube.Cube):\n title = _title(u_object, with_units=False)\n elif isinstance(v_object, iris.cube.Cube) and \\\n not isinstance(u_object, iris.cube.Cube):\n title = _title(v_object, with_units=False)\n return xlabel, ylabel, title\n\n\ndef _label_1d_plot(*args):\n if len(args) > 1 and isinstance(args[1],\n (iris.cube.Cube, iris.coords.Coord)):\n xlabel, ylabel, title = _get_titles(*args[:2])\n else:\n xlabel, ylabel, title = _get_titles(None, args[0])\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n\ndef contour(cube, *args, **kwargs):\n \"\"\"\n Draws contour lines on a labelled plot based on the given Cube.\n\n With the basic call signature, contour \"level\" values are chosen\n automatically::\n\n contour(cube)\n\n Supply a number to use *N* automatically chosen levels::\n\n contour(cube, N)\n\n Supply a sequence *V* to use explicitly defined levels::\n\n contour(cube, V)\n\n See :func:`iris.plot.contour` for details of valid keyword arguments.\n\n \"\"\"\n coords = kwargs.get('coords')\n result = iplt.contour(cube, *args, **kwargs)\n _label_with_points(cube, coords=coords)\n return result\n\n\ndef contourf(cube, *args, **kwargs):\n \"\"\"\n Draws filled contours on a labelled plot based on the given Cube.\n\n With the basic call signature, contour \"level\" values are chosen\n automatically::\n\n contour(cube)\n\n Supply a number to use *N* automatically chosen levels::\n\n contour(cube, N)\n\n Supply a sequence *V* to use explicitly defined levels::\n\n contour(cube, V)\n\n See :func:`iris.plot.contourf` for details of valid keyword arguments.\n\n \"\"\"\n coords = kwargs.get('coords')\n result = iplt.contourf(cube, *args, **kwargs)\n _label_with_points(cube, result, coords=coords)\n return result\n\n\ndef outline(cube, coords=None, color='k', linewidth=None):\n \"\"\"\n Draws cell outlines on a labelled plot based on the given Cube.\n\n Kwargs:\n\n * coords: list of :class:`~iris.coords.Coord` objects or coordinate names\n Use the given coordinates as the axes for the plot. The order of the\n given coordinates indicates which axis to use for each, where the first\n element is the horizontal axis of the plot and the second element is\n the vertical axis of the plot.\n\n * color: None or mpl color\n The color of the cell outlines. If None, the matplotlibrc setting\n patch.edgecolor is used by default.\n\n * linewidth: None or number\n The width of the lines showing the cell outlines. If None, the default\n width in patch.linewidth in matplotlibrc is used.\n\n \"\"\"\n result = iplt.outline(cube, color=color, linewidth=linewidth,\n coords=coords)\n\n _label_with_bounds(cube, coords=coords)\n return result\n\n\ndef pcolor(cube, *args, **kwargs):\n \"\"\"\n Draws a labelled pseudocolor plot based on the given Cube.\n\n See :func:`iris.plot.pcolor` for details of valid keyword arguments.\n\n \"\"\"\n coords = kwargs.get('coords')\n result = iplt.pcolor(cube, *args, **kwargs)\n _label_with_bounds(cube, result, coords=coords)\n return result\n\n\ndef pcolormesh(cube, *args, **kwargs):\n \"\"\"\n Draws a labelled pseudocolour plot based on the given Cube.\n\n See :func:`iris.plot.pcolormesh` for details of valid keyword arguments.\n\n \"\"\"\n coords = kwargs.get('coords')\n result = iplt.pcolormesh(cube, *args, **kwargs)\n _label_with_bounds(cube, result, coords=coords)\n return result\n\n\ndef points(cube, *args, **kwargs):\n \"\"\"\n Draws sample point positions on a labelled plot based on the given Cube.\n\n See :func:`iris.plot.points` for details of valid keyword arguments.\n\n \"\"\"\n coords = kwargs.get('coords')\n result = iplt.points(cube, *args, **kwargs)\n _label_with_points(cube, coords=coords)\n return result\n\n\ndef plot(*args, **kwargs):\n \"\"\"\n Draws a labelled line plot based on the given cube(s) or\n coordinate(s).\n\n See :func:`iris.plot.plot` for details of valid arguments and\n keyword arguments.\n\n \"\"\"\n result = iplt.plot(*args, **kwargs)\n _label_1d_plot(*args)\n return result\n\n\ndef scatter(x, y, *args, **kwargs):\n \"\"\"\n Draws a labelled scatter plot based on the given cubes or\n coordinates.\n\n See :func:`iris.plot.scatter` for details of valid arguments and\n keyword arguments.\n\n \"\"\"\n result = iplt.scatter(x, y, *args, **kwargs)\n _label_1d_plot(x, y)\n return result\n\n\n# Provide a convenience show method from pyplot.\nshow = plt.show\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382673,"cells":{"repo_name":{"kind":"string","value":"liyinwei/pandas"},"path":{"kind":"string","value":"quickstart/12_getting_data_in_out.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1191"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: liyinwei\n@E-mail: coridc@foxmail.com\n@Time: 2016/11/21 11:35\n@Description:\n 1.文件读写,包括:\n a)csv\n Writing to a csv file: http://pandas.pydata.org/pandas-docs/stable/io.html#io-store-in-csv\n Reading from a csv file: http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table\n b)HDF5: http://pandas.pydata.org/pandas-docs/stable/io.html#io-hdf5\n c)Excel: http://pandas.pydata.org/pandas-docs/stable/io.html#io-excel\n\"\"\"\n\n\"\"\"\nID: 12_01\nDesc: Writing to a csv file\n\"\"\"\n# df.to_csv('foo.csv')\n\n\"\"\"\nID: 12_02\nDesc: Reading from a csv file\n\"\"\"\n# pd.read_csv('foo.csv')\n\n\"\"\"\nID: 12_03\nDesc: Writing to a HDF5 Store\n\"\"\"\n# df.to_hdf('foo.h5','df')\n\n\"\"\"\nID: 12_04\nDesc: Reading from a HDF5 Store\n\"\"\"\n# pd.read_hdf('foo.h5','df')\n\n\"\"\"\nID: 12_05\nDesc: Writing to an excel file\n\"\"\"\n# df.to_excel('foo.xlsx', sheet_name='Sheet1')\n\n\"\"\"\nID: 12_06\nDesc: Reading from an excel file\n\"\"\"\n# pd.read_excel('foo.xlsx', 'Sheet1', index_col=None, na_values=['NA'])\n\n\nif __name__ == '__main__':\n pass\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382674,"cells":{"repo_name":{"kind":"string","value":"ryandougherty/mwa-capstone"},"path":{"kind":"string","value":"MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/legend_auto.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"2267"},"content":{"kind":"string","value":"\"\"\"\nThis file was written to test matplotlib's autolegend placement\nalgorithm, but shows lots of different ways to create legends so is\nuseful as a general examples\n\nThanks to John Gill and Phil ?? for help at the matplotlib sprint at\npycon 2005 where the auto-legend support was written.\n\"\"\"\nfrom pylab import *\nimport sys\n\nrcParams['legend.loc'] = 'best'\n\nN = 100\nx = arange(N)\n\ndef fig_1():\n figure(1)\n t = arange(0, 40.0 * pi, 0.1)\n l, = plot(t, 100*sin(t), 'r', label='sine')\n legend()\n\ndef fig_2():\n figure(2)\n plot(x, 'o', label='x=y')\n legend()\n\ndef fig_3():\n figure(3)\n plot(x, -x, 'o', label='x= -y')\n legend()\n\ndef fig_4():\n figure(4)\n plot(x, ones(len(x)), 'o', label='y=1')\n plot(x, -ones(len(x)), 'o', label='y=-1')\n legend()\n\ndef fig_5():\n figure(5)\n n, bins, patches = hist(randn(1000), 40, normed=1)\n l, = plot(bins, normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3)\n legend([l, patches[0]], ['fit', 'hist'])\n\ndef fig_6():\n figure(6)\n plot(x, 50-x, 'o', label='y=1')\n plot(x, x-50, 'o', label='y=-1')\n legend()\n\ndef fig_7():\n figure(7)\n xx = x - (N/2.0)\n plot(xx, (xx*xx)-1225, 'bo', label='$y=x^2$')\n plot(xx, 25*xx, 'go', label='$y=25x$')\n plot(xx, -25*xx, 'mo', label='$y=-25x$')\n legend()\n\ndef fig_8():\n figure(8)\n b1 = bar(x, x, color='m')\n b2 = bar(x, x[::-1], color='g')\n legend([b1[0], b2[0]], ['up', 'down'])\n\ndef fig_9():\n figure(9)\n b1 = bar(x, -x)\n b2 = bar(x, -x[::-1], color='r')\n legend([b1[0], b2[0]], ['down', 'up'])\n\ndef fig_10():\n figure(10)\n b1 = bar(x, x, bottom=-100, color='m')\n b2 = bar(x, x[::-1], bottom=-100, color='g')\n b3 = bar(x, -x, bottom=100)\n b4 = bar(x, -x[::-1], bottom=100, color='r')\n legend([b1[0], b2[0], b3[0], b4[0]], ['bottom right', 'bottom left',\n 'top left', 'top right'])\n\nif __name__ == '__main__':\n nfigs = 10\n figures = []\n for f in sys.argv[1:]:\n try:\n figures.append(int(f))\n except ValueError:\n pass\n if len(figures) == 0:\n figures = range(1, nfigs+1)\n\n for fig in figures:\n fn_name = \"fig_%d\" % fig\n fn = globals()[fn_name]\n fn()\n\n show()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":382675,"cells":{"repo_name":{"kind":"string","value":"paulrbrenner/GOS"},"path":{"kind":"string","value":"examples/migration/visualization/plotlyviz.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1188"},"content":{"kind":"string","value":"import plotly\nimport pandas as pd\n\ndef map(dataframe, title = \"Map\", colorbarName = None):\n\n\t#---The next line and the line at bottom are for the Jupyter Notebook---\n\t#plotly.offline.init_notebook_mode(connected=True)\n\n\tdata = [ dict(\n\t\ttype = 'choropleth',\n\t\tlocations = dataframe['country'],\n\t\tz = dataframe['value'],\n\t# text = dataframe['name'],\n\t\tcolorscale = [[0,\"rgb(215,25,28)\"],[0.25,\"rgb(253,174,97)\"],[0.5,\"rgb(255,255,191)\"],[0.75,\"rgb(166,217,106)\"],[1,\"rgb(26,150,65)\"]],\n\t\tautocolorscale = False,\n\t\treversescale = False,\n\t\tmarker = dict(\n\t\t line = dict (\n\t\t color = 'rgb(180,180,180)',\n\t\t width = 0.5\n\t\t ) ),\n\t\tcolorbar = dict(\n\t\t autotick = False,\n\t\t # tickprefix = 'V',\n\t\t title = colorbarName),\n\t ) ]\n\n\tlayout = dict(\n\t title = title,\n\t titlefont = dict(\n\t\tsize = 60\n\t\t),\n\t geo = dict(\n\t\tshowframe = False,\n\t\tshowcoastlines = False,\n\t\tshowcountries = True,\n\t\tcountrycolor = \"#f0f0f0\",\n\t\tprojection = dict(\n\t\t type = 'Mercator'\n\t\t)\n\t )\n\t)\n\n\tfig = dict( data=data, layout=layout )\n\n\t#plotly.offline.iplot(fig, validate=False, filename='plotly-map' )\n\tplotly.offline.plot(fig, validate=False, filename='plotly-map' )\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382676,"cells":{"repo_name":{"kind":"string","value":"awni/tensorflow"},"path":{"kind":"string","value":"tensorflow/examples/skflow/multiple_gpu.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1664"},"content":{"kind":"string","value":"# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom sklearn import datasets, metrics, cross_validation\nimport tensorflow as tf\nfrom tensorflow.contrib import skflow\n\niris = datasets.load_iris()\nX_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,\n test_size=0.2, random_state=42)\n\ndef my_model(X, y):\n \"\"\"\n This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability.\n\n Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and \n CUDNN 6.5 V2 from NVIDIA need to be installed beforehand. \n \"\"\"\n with tf.device('/gpu:1'):\n \tlayers = skflow.ops.dnn(X, [10, 20, 10], keep_prob=0.5)\n with tf.device('/gpu:2'):\n \treturn skflow.models.logistic_regression(layers, y)\n\nclassifier = skflow.TensorFlowEstimator(model_fn=my_model, n_classes=3)\nclassifier.fit(X_train, y_train)\nscore = metrics.accuracy_score(y_test, classifier.predict(X_test))\nprint('Accuracy: {0:f}'.format(score))\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382677,"cells":{"repo_name":{"kind":"string","value":"skudriashev/incubator-airflow"},"path":{"kind":"string","value":"airflow/hooks/base_hook.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"2950"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport random\n\nfrom airflow import settings\nfrom airflow.models import Connection\nfrom airflow.exceptions import AirflowException\nfrom airflow.utils.log.logging_mixin import LoggingMixin\n\nCONN_ENV_PREFIX = 'AIRFLOW_CONN_'\n\n\nclass BaseHook(LoggingMixin):\n \"\"\"\n Abstract base class for hooks, hooks are meant as an interface to\n interact with external systems. MySqlHook, HiveHook, PigHook return\n object that can handle the connection and interaction to specific\n instances of these systems, and expose consistent methods to interact\n with them.\n \"\"\"\n def __init__(self, source):\n pass\n\n\n @classmethod\n def _get_connections_from_db(cls, conn_id):\n session = settings.Session()\n db = (\n session.query(Connection)\n .filter(Connection.conn_id == conn_id)\n .all()\n )\n session.expunge_all()\n session.close()\n if not db:\n raise AirflowException(\n \"The conn_id `{0}` isn't defined\".format(conn_id))\n return db\n\n @classmethod\n def _get_connection_from_env(cls, conn_id):\n environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())\n conn = None\n if environment_uri:\n conn = Connection(conn_id=conn_id, uri=environment_uri)\n return conn\n\n @classmethod\n def get_connections(cls, conn_id):\n conn = cls._get_connection_from_env(conn_id)\n if conn:\n conns = [conn]\n else:\n conns = cls._get_connections_from_db(conn_id)\n return conns\n\n @classmethod\n def get_connection(cls, conn_id):\n conn = random.choice(cls.get_connections(conn_id))\n if conn.host:\n log = LoggingMixin().log\n log.info(\"Using connection to: %s\", conn.host)\n return conn\n\n @classmethod\n def get_hook(cls, conn_id):\n connection = cls.get_connection(conn_id)\n return connection.get_hook()\n\n def get_conn(self):\n raise NotImplementedError()\n\n def get_records(self, sql):\n raise NotImplementedError()\n\n def get_pandas_df(self, sql):\n raise NotImplementedError()\n\n def run(self, sql):\n raise NotImplementedError()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":382678,"cells":{"repo_name":{"kind":"string","value":"tinkerinestudio/Tinkerine-Suite"},"path":{"kind":"string","value":"TinkerineSuite/python/Lib/numpy/core/function_base.py"},"copies":{"kind":"string","value":"82"},"size":{"kind":"string","value":"5474"},"content":{"kind":"string","value":"__all__ = ['logspace', 'linspace']\n\nimport numeric as _nx\nfrom numeric import array\n\ndef linspace(start, stop, num=50, endpoint=True, retstep=False):\n \"\"\"\n Return evenly spaced numbers over a specified interval.\n\n Returns `num` evenly spaced samples, calculated over the\n interval [`start`, `stop` ].\n\n The endpoint of the interval can optionally be excluded.\n\n Parameters\n ----------\n start : scalar\n The starting value of the sequence.\n stop : scalar\n The end value of the sequence, unless `endpoint` is set to False.\n In that case, the sequence consists of all but the last of ``num + 1``\n evenly spaced samples, so that `stop` is excluded. Note that the step\n size changes when `endpoint` is False.\n num : int, optional\n Number of samples to generate. Default is 50.\n endpoint : bool, optional\n If True, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (`samples`, `step`), where `step` is the spacing\n between samples.\n\n Returns\n -------\n samples : ndarray\n There are `num` equally spaced samples in the closed interval\n ``[start, stop]`` or the half-open interval ``[start, stop)``\n (depending on whether `endpoint` is True or False).\n step : float (only if `retstep` is True)\n Size of spacing between samples.\n\n\n See Also\n --------\n arange : Similiar to `linspace`, but uses a step size (instead of the\n number of samples).\n logspace : Samples uniformly distributed in log space.\n\n Examples\n --------\n >>> np.linspace(2.0, 3.0, num=5)\n array([ 2. , 2.25, 2.5 , 2.75, 3. ])\n >>> np.linspace(2.0, 3.0, num=5, endpoint=False)\n array([ 2. , 2.2, 2.4, 2.6, 2.8])\n >>> np.linspace(2.0, 3.0, num=5, retstep=True)\n (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 8\n >>> y = np.zeros(N)\n >>> x1 = np.linspace(0, 10, N, endpoint=True)\n >>> x2 = np.linspace(0, 10, N, endpoint=False)\n >>> plt.plot(x1, y, 'o')\n []\n >>> plt.plot(x2, y + 0.5, 'o')\n []\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n \"\"\"\n num = int(num)\n if num <= 0:\n return array([], float)\n if endpoint:\n if num == 1:\n return array([float(start)])\n step = (stop-start)/float((num-1))\n y = _nx.arange(0, num) * step + start\n y[-1] = stop\n else:\n step = (stop-start)/float(num)\n y = _nx.arange(0, num) * step + start\n if retstep:\n return y, step\n else:\n return y\n\ndef logspace(start,stop,num=50,endpoint=True,base=10.0):\n \"\"\"\n Return numbers spaced evenly on a log scale.\n\n In linear space, the sequence starts at ``base ** start``\n (`base` to the power of `start`) and ends with ``base ** stop``\n (see `endpoint` below).\n\n Parameters\n ----------\n start : float\n ``base ** start`` is the starting value of the sequence.\n stop : float\n ``base ** stop`` is the final value of the sequence, unless `endpoint`\n is False. In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length ``num``) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n base : float, optional\n The base of the log space. The step size between the elements in\n ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.\n Default is 10.0.\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n arange : Similiar to linspace, with the step size specified instead of the\n number of samples. Note that, when used with a float endpoint, the\n endpoint may or may not be included.\n linspace : Similar to logspace, but with the samples uniformly distributed\n in linear space, instead of log space.\n\n Notes\n -----\n Logspace is equivalent to the code\n\n >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)\n ... # doctest: +SKIP\n >>> power(base, y)\n ... # doctest: +SKIP\n\n Examples\n --------\n >>> np.logspace(2.0, 3.0, num=4)\n array([ 100. , 215.443469 , 464.15888336, 1000. ])\n >>> np.logspace(2.0, 3.0, num=4, endpoint=False)\n array([ 100. , 177.827941 , 316.22776602, 562.34132519])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0)\n array([ 4. , 5.0396842 , 6.34960421, 8. ])\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 10\n >>> x1 = np.logspace(0.1, 1, N, endpoint=True)\n >>> x2 = np.logspace(0.1, 1, N, endpoint=False)\n >>> y = np.zeros(N)\n >>> plt.plot(x1, y, 'o')\n []\n >>> plt.plot(x2, y + 0.5, 'o')\n []\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n \"\"\"\n y = linspace(start,stop,num=num,endpoint=endpoint)\n return _nx.power(base,y)\n\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":382679,"cells":{"repo_name":{"kind":"string","value":"kwentz10/Photosynthesis_Optimization_Modeling"},"path":{"kind":"string","value":"Traits_Physical_Factorial_CummulativeGS.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9910"},"content":{"kind":"string","value":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 25 10:10:28 2017\n\n@author: Katherine\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 23 16:04:21 2017\n\n@author: Katherine\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nPhotosynthesis and Stomatal Conductance Model \nCreated 9/27/2016\nKatherine Wentz\n\nThis is a program that runs photosynthesis and\nstomatal conductance models given changes in leaf-\nlevel traits. \n\nThe end product is graphs of NUE vs. WUE.\n\n\nUpdate: I am going to run the model for plants with \ntraits that are distinctive of the meadow moisture \ngradient in the alpine tundra.\n\nFix: correct for atmospheric pressure differences in co2, o2, and vapor pressure\n\nFix: vcmax temp dependence (pg 63 in plant physiological ecology book)\n\nFix: NEW VARIBALE TRAIT-->make the fraction of leaf N in rubisco go down with increasing SLA,\nchlorophyll content, and decreasing light (wet meadow)--more N is allocated\nto thylakoids. The only way for chl/m2 to increase even when g N/m2 goes down\nor is constant is for the leaf to allocate more of leaf N to chl...also, note\nthat there is more organic N designated to photo in leaf when SLA goes up\nbecause less N is used in structure. see \"Photosynthesis or persistence: N allocation\nin leaves of evergreen and deciduous... by Takashima et al. 2004. Also see Photosynthetic\nnitrogen-use efficiency of species...by Poorter and Evans 1998\n\nNote to self: NUE and WUE relationship flipflops with change in air temperature;\nNUE makes sense because C:N decreases from dry to wet meadows; WUE increasing\nin snowbed does not necessarilly make sense--look in the literature for this\n\nherbs have a higher NUE\n\n\"\"\"\n\n#---------------Import Modules---------------#\n\nimport itertools as it\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#Import combinations of variable parameters \nfrom uncertain_params import monte_carlo\n\n#Import photosynthesis model\nfrom Photosynthesis_Model import photo_bound_meso_eqstom as photo\n\n#Import functions to switch between Pa and umol/mol at sea level\nfrom photo_functions import pa_con_atmfrac\n\n\n#import timeseries of vwc and temp\nfrom time_dep_params import surtemp_dm, surtemp_wm, vwc_dm, vwc_wm, na_dm_min_inter,na_wm_min_inter,na_dm_max_inter,na_wm_max_inter\n\n\n#---------------Determine if I Want to Keep Any of the Variable Parameters Constant---------------#\n\nconst_params=[]\nfor xxx in it.combinations(['ht'],0): #keep ht and t constant for constant vpd\n const_params+=[xxx]\n\n#do this when I do not put any of the variable parameters as constant. instead I \n#vary each parameter one at a time while keeping the other parameters constant.\nif const_params==[()]:\n const_params=[[-999999]] \n\n#---------------Begin Looping Through Photosynthesis Model---------------#\n\n#each loop is for a constant value, or combinatin of constant values, of variable parameter as determined above\nfor ii in range(len(const_params)):\n\n\n\n\n #---------------Run through time series---------------#\n \n days=np.linspace(1,365,365)\n \n #dry meadow\n tot_nue_dm_avg=[]\n tot_wue_dm_avg=[]\n tot_nue_dm_min=[]\n tot_wue_dm_min=[]\n tot_nue_dm_max=[]\n tot_wue_dm_max=[]\n tot_A_dm_avg=[]\n tot_gs_dm_avg=[]\n \n #moist meadow\n tot_nue_mm_avg=[]\n tot_wue_mm_avg=[]\n tot_nue_mm_min=[]\n tot_wue_mm_min=[]\n tot_nue_mm_max=[]\n tot_wue_mm_max=[] \n tot_A_mm_avg=[]\n tot_gs_mm_avg=[]\n\n #wet meadow\n tot_nue_wm_avg=[]\n tot_wue_wm_avg=[]\n tot_nue_wm_min=[]\n tot_wue_wm_min=[]\n tot_nue_wm_max=[]\n tot_wue_wm_max=[]\n tot_A_wm_avg=[]\n tot_gs_wm_avg=[]\n\n\n \n\n \n \n\n\n\n #---------------Photosynthesis + Stomatal Conductance Model---------------#\n\n \n ##---Constant Parameter Arrays for Model---##\n\n #----Params Used in Model Currently----#\n \n tk_25=298.16; #absolute temperature at 25 C\n ekc=80500.0 #Activation energy for K of CO2 (J mol-1)\n eko=14500.0 #Activation energy for K of O2 (J mol-1)\n etau=-29000.0 #Activation energy for tau (???) (J mol-1)\n ev=55000.0 #Activation energy for carboxylation (J mol-1)\n ej=55000.0 #Activation energy for electron transport (J mol-1)\n toptv=303.0 #Optimum temperature for maximum carboxylation (K)\n toptj=303.0 #Optimum temperature for maximum electron transport (K)\n ra=np.zeros(shape=1)+20.7 #specific rubisco activity (umol CO2/g Rub s)\n flnr=np.zeros(shape=1)+0.1 #fraction of leaf nitrogen in rubisco (g N Rub/g N leaf)\n frnr=np.zeros(shape=1)+6.25 #weight fraction of nitrogen in rubisco molecule (g Rub/g N Rub) \n rh=np.zeros(shape=1)+0.5 #relative humidity (kPa/kPa)\n ca=np.zeros(shape=1)+405 #ambient carbon dioxide (umol CO2/mol air)\n ko25=np.zeros(shape=1)+30000 #Michaelis-Menten kinetic coefficient for oxygen at 25 C(Pa) \n kc25=np.zeros(shape=1)+30 #Michaelis-Menten kinetic coefficient for carbon dioxide at 25 C (Pa)\n o=np.zeros(shape=1)+210000 #concentration of ambient oxygen (umol/mol)\n g0=np.zeros(shape=1)+0.002 #Ball-Berry stomatal conductance intercept parameter (mol H2O/m2s)\n a=np.zeros(shape=1)+1.6 #Conversion Coefficient between stomatal conductance to water and carbon dioxide (unitless)\n ij=np.zeros(shape=1)+1.0 #leaf angle index--downregulates jmax\n m=np.zeros(shape=1)+9.0 #ball-berry parameter (unitless)\n b=1.37 #Conversion Coefficient between boundary layer conductance to water and carbon dioxide \n u=5.0 #windspeed (m/s)\n qeff=0.32 #leaf quantum yield, electrons\n PAR=2000 #photosynthetic active radiation (umol/m2s)\n jm=2.68 #slope coefficient \n vwc_min=0.08 #minimum soil water content for photosynthesis to occur (permanent wilting point) (cm3/cm3) \n vwc_max=0.68 #maximum soil water content where increases in soil water do not affect photosynthesis (field capacity?) (cm3/cm3)\n q=0.2 #parameter for soil water affect on photosynthesis (unitless)\n \n \n #------constant variable params for sensitivty analysis-----#\n \n chl_c=np.zeros(shape=1)+(np.mean([396,465,476])) #Chlorophyll Content of the Leaf (umol chl/m2)\n ht_c=np.zeros(shape=1)+(np.mean([9.2,19.5,20.0])) #Temperature of the Leaf (K)\n dia_c=np.zeros(shape=1)+(np.mean([1.4,2.3,2.6])/100.) #Mean diameter or size of leaf (m)\n na_c=np.zeros(shape=1)+(np.mean([2.5,5.6,6.3])) #leaf nitrogen (g N/ m2)\n t_c=np.zeros(shape=1)+15.0 #temp (C)\n\n\n #-----which timeseries should I use--based on factorial meadow type---#\n \n na_min=[na_dm_min_inter, na_dm_min_inter, na_wm_min_inter, na_wm_min_inter]\n na_max=[na_dm_max_inter, na_dm_max_inter, na_wm_max_inter, na_wm_max_inter]\n\n vwc_type=[vwc_dm,vwc_dm,vwc_wm,vwc_wm]\n temp_type=[surtemp_dm,surtemp_dm,surtemp_wm,surtemp_wm]\n\n \n A_tot_all=[]\n \n chl_mean=[[395.7132],[475.8913],[395.7132],[475.8913]]\n chl_sd=[[24.410199999999975],[29.185099999999977],[24.410199999999975],[29.185099999999977]]\n dia_mean=[[1.6/100.],[3.0/100.],[1.6/100.],[3.0/100.]]\n dia_sd=[[0.9/100.0],[1.2/100.0],[0.9/100.0],[1.2/100.0]]\n ht_mean=[[9.183549],[19.98519],[9.183549],[19.98519]]\n ht_sd=[[1.5],[3.1],[1.5],[3.1]]\n \n depth=[0.2,0.2,0.2,0.4,0.4,0.4]\n \n\n#---------------Import Variable Parameter Arrays from Leaf Parameter File---------------#\n for iii in range(len(chl_mean)):\n A_tot=0\n for time in range(129):\n params=monte_carlo(chl_mean[iii], chl_sd[iii], dia_mean[iii], dia_sd[iii], [na_min[iii][time]], [na_max[iii][time]], ht_mean[iii], ht_sd[iii]) \n A_day=[]\n for xx in range(len(params)):\n for yy in range(len(params[xx])):\n for key,val in params[xx][yy].items():\n exec(key + '=val')\n \n #set variable parameters constant if I specify this above\n if 'na' in const_params[ii]:\n na=na_c\n if 'dia' in const_params[ii]:\n dia=dia_c\n if 'chl' in const_params[ii]:\n chl=chl_c\n if 'ht' in const_params[ii]:\n ht=ht_c\n \n \n #------calculate vapor pressure-----#\n pa_v=611*np.exp((17.27*temp_type[iii][time])/(temp_type[iii][time]+237.3)) #saturation vapor pressure of air (Pa)\n ea_str=pa_con_atmfrac(pa_v,3528) #saturation vapor pressure of air (Pa-->umol h20/mol air)\n ea=rh*ea_str #vapor pressure (umol h2O/mol air) \n \n \n #correct for leaf temperatures using leaf height\n \n t_diff=18-0.4*ht\n \n tl=temp_type[iii][time]+t_diff \n \n z=depth[iii]\n \n #---------------Photosynthesis Function---------------#\n \n #alter this line of code for when implementing different photosynthesis functions\n wue, nue, A, E, cs, ci, gsw, gs, gbw, gb, gm, cc,dd =photo(tk_25,ekc,eko,etau,ev,ej,toptv,toptj,na, qeff, PAR,tl,ea,chl,ij,kc25,ko25,o,ca,rh,m,a,frnr,flnr,ra,jm,g0,b,dia,u,q,vwc_min,vwc_max,vwc_type[iii][time],z)\n \n \n #test to make sure wue and nue are positive at not 'nan'\n if wue[0]==-999 and nue[0]==-999:\n \n continue \n \n if np.isnan(A[0]):\n A[0]=0.0\n \n A_day+=[(A[0]*3600*6)/1000000.*44.]\n \n\n A_tot+=np.mean(A_day)\n\n \n \n A_tot_all+=[A_tot]\n print A_tot_all\n\n \n \nprint A_tot_all \n \n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382680,"cells":{"repo_name":{"kind":"string","value":"tu-rbo/omip"},"path":{"kind":"string","value":"shape_reconstruction/src/plot_statistics.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1935"},"content":{"kind":"string","value":"#!/usr/bin/python\n##############\n# View the statistics for a single experiment (bag file)\n#\n# Statistics are plotted separately, i.e. one window per object per variant,\n# showing precision and recall.\n\n\nimport numpy as np\nimport matplotlib.pylab as plt\nimport matplotlib\nmatplotlib.rcParams['ps.useafm'] = True\nmatplotlib.rcParams['pdf.use14corefonts'] = True\nmatplotlib.rcParams['text.usetex'] = True\n\nimport sys\nimport os\nimport os.path\n\n\ndef plot_statistics(folder):\n for f in os.listdir(folder):\n if f[-3:] != \"txt\":\n continue\n filename = os.path.join(folder, f)\n \n data = np.genfromtxt(filename, dtype=float, delimiter=' ', names=True)\n\n precision = data['tp'] / (data['tp']+data['fp'])\n recall = data['tp'] / (data['tp']+data['fn'])\n seg_acc = data['tp'] / (data['tp']+data['fp']+data['fn'])\n \n time = data['time'] - data['time'][0]\n \n # clean up\n precision[np.where(np.isnan(precision))] = 0.\n recall[np.where(np.isnan(recall))] = 0.\n seg_acc[np.where(np.isnan(seg_acc))] = 0.\n\n plt.figure(figsize=(7.5,5))\n for y in np.arange(0.0, 1.1, 0.2): \n plt.plot(time, [y] * len(time), \"--\", lw=0.5, color=\"black\", alpha=0.3) \n \n plt.plot(time, seg_acc, \"k\", label=\"Segmentation Accuracy\", lw=3.0)\n plt.plot(time, precision, \"b\", label=\"Precision\", lw=2.0, ls=\"--\")\n plt.plot(time, recall, \"r\", label=\"Recall\", lw=2.0, ls=\"--\")\n \n plt.xlim(0, data['time'][-1] - data['time'][0])\n plt.ylim(-0.1, 1.1)\n plt.xlabel(\"time\")\n \n plt.title(os.path.basename(filename[:-3]))\n plt.legend(loc=4)\n \n img_path = os.path.join(folder, f[:-3]+\"pdf\")\n print (\"Saving image at %s\" % img_path)\n plt.savefig(img_path, bbox_inches=\"tight\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print \"Usage: plot_statistics.py \"\n sys.exit()\n\n print (\"Starting plot statistics\")\n plot_statistics(sys.argv[1])\n\n plt.show()\n\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382681,"cells":{"repo_name":{"kind":"string","value":"nrhine1/scikit-learn"},"path":{"kind":"string","value":"examples/decomposition/plot_kernel_pca.py"},"copies":{"kind":"string","value":"353"},"size":{"kind":"string","value":"2011"},"content":{"kind":"string","value":"\"\"\"\n==========\nKernel PCA\n==========\n\nThis example shows that Kernel PCA is able to find a projection of the data\nthat makes data linearly separable.\n\"\"\"\nprint(__doc__)\n\n# Authors: Mathieu Blondel\n# Andreas Mueller\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.decomposition import PCA, KernelPCA\nfrom sklearn.datasets import make_circles\n\nnp.random.seed(0)\n\nX, y = make_circles(n_samples=400, factor=.3, noise=.05)\n\nkpca = KernelPCA(kernel=\"rbf\", fit_inverse_transform=True, gamma=10)\nX_kpca = kpca.fit_transform(X)\nX_back = kpca.inverse_transform(X_kpca)\npca = PCA()\nX_pca = pca.fit_transform(X)\n\n# Plot results\n\nplt.figure()\nplt.subplot(2, 2, 1, aspect='equal')\nplt.title(\"Original space\")\nreds = y == 0\nblues = y == 1\n\nplt.plot(X[reds, 0], X[reds, 1], \"ro\")\nplt.plot(X[blues, 0], X[blues, 1], \"bo\")\nplt.xlabel(\"$x_1$\")\nplt.ylabel(\"$x_2$\")\n\nX1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))\nX_grid = np.array([np.ravel(X1), np.ravel(X2)]).T\n# projection on the first principal component (in the phi space)\nZ_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)\nplt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')\n\nplt.subplot(2, 2, 2, aspect='equal')\nplt.plot(X_pca[reds, 0], X_pca[reds, 1], \"ro\")\nplt.plot(X_pca[blues, 0], X_pca[blues, 1], \"bo\")\nplt.title(\"Projection by PCA\")\nplt.xlabel(\"1st principal component\")\nplt.ylabel(\"2nd component\")\n\nplt.subplot(2, 2, 3, aspect='equal')\nplt.plot(X_kpca[reds, 0], X_kpca[reds, 1], \"ro\")\nplt.plot(X_kpca[blues, 0], X_kpca[blues, 1], \"bo\")\nplt.title(\"Projection by KPCA\")\nplt.xlabel(\"1st principal component in space induced by $\\phi$\")\nplt.ylabel(\"2nd component\")\n\nplt.subplot(2, 2, 4, aspect='equal')\nplt.plot(X_back[reds, 0], X_back[reds, 1], \"ro\")\nplt.plot(X_back[blues, 0], X_back[blues, 1], \"bo\")\nplt.title(\"Original space after inverse transform\")\nplt.xlabel(\"$x_1$\")\nplt.ylabel(\"$x_2$\")\n\nplt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)\n\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382682,"cells":{"repo_name":{"kind":"string","value":"cjayb/mne-python"},"path":{"kind":"string","value":"mne/decoding/receptive_field.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"20189"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Authors: Chris Holdgraf \n# Eric Larson \n\n# License: BSD (3-clause)\n\nimport numbers\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .base import get_coef, BaseEstimator, _check_estimator\nfrom .time_delaying_ridge import TimeDelayingRidge\nfrom ..fixes import is_regressor\nfrom ..utils import _validate_type, verbose\n\n\nclass ReceptiveField(BaseEstimator):\n \"\"\"Fit a receptive field model.\n\n This allows you to fit an encoding model (stimulus to brain) or a decoding\n model (brain to stimulus) using time-lagged input features (for example, a\n spectro- or spatio-temporal receptive field, or STRF).\n\n Parameters\n ----------\n tmin : float\n The starting lag, in seconds (or samples if ``sfreq`` == 1).\n tmax : float\n The ending lag, in seconds (or samples if ``sfreq`` == 1).\n Must be >= tmin.\n sfreq : float\n The sampling frequency used to convert times into samples.\n feature_names : array, shape (n_features,) | None\n Names for input features to the model. If None, feature names will\n be auto-generated from the shape of input data after running `fit`.\n estimator : instance of sklearn.base.BaseEstimator | float | None\n The model used in fitting inputs and outputs. This can be any\n scikit-learn-style model that contains a fit and predict method. If a\n float is passed, it will be interpreted as the ``alpha`` parameter\n to be passed to a Ridge regression model. If `None`, then a Ridge\n regression model with an alpha of 0 will be used.\n fit_intercept : bool | None\n If True (default), the sample mean is removed before fitting.\n If ``estimator`` is a :class:`sklearn.base.BaseEstimator`,\n this must be None or match ``estimator.fit_intercept``.\n scoring : ['r2', 'corrcoef']\n Defines how predictions will be scored. Currently must be one of\n 'r2' (coefficient of determination) or 'corrcoef' (the correlation\n coefficient).\n patterns : bool\n If True, inverse coefficients will be computed upon fitting using the\n covariance matrix of the inputs, and the cross-covariance of the\n inputs/outputs, according to [5]_. Defaults to False.\n n_jobs : int | str\n Number of jobs to run in parallel. Can be 'cuda' if CuPy\n is installed properly and ``estimator is None``.\n\n .. versionadded:: 0.18\n edge_correction : bool\n If True (default), correct the autocorrelation coefficients for\n non-zero delays for the fact that fewer samples are available.\n Disabling this speeds up performance at the cost of accuracy\n depending on the relationship between epoch length and model\n duration. Only used if ``estimator`` is float or None.\n\n .. versionadded:: 0.18\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation `\n for more).\n\n Attributes\n ----------\n coef_ : array, shape ([n_outputs, ]n_features, n_delays)\n The coefficients from the model fit, reshaped for easy visualization.\n During :meth:`mne.decoding.ReceptiveField.fit`, if ``y`` has one\n dimension (time), the ``n_outputs`` dimension here is omitted.\n patterns_ : array, shape ([n_outputs, ]n_features, n_delays)\n If fit, the inverted coefficients from the model.\n delays_ : array, shape (n_delays,), dtype int\n The delays used to fit the model, in indices. To return the delays\n in seconds, use ``self.delays_ / self.sfreq``\n valid_samples_ : slice\n The rows to keep during model fitting after removing rows with\n missing values due to time delaying. This can be used to get an\n output equivalent to using :func:`numpy.convolve` or\n :func:`numpy.correlate` with ``mode='valid'``.\n\n See Also\n --------\n mne.decoding.TimeDelayingRidge\n\n Notes\n -----\n For a causal system, the encoding model will have significant\n non-zero values only at positive lags. In other words, lags point\n backward in time relative to the input, so positive lags correspond\n to previous input time samples, while negative lags correspond to\n future input time samples.\n\n References\n ----------\n .. [1] Theunissen, F. E. et al. Estimating spatio-temporal receptive\n fields of auditory and visual neurons from their responses to\n natural stimuli. Network 12, 289-316 (2001).\n\n .. [2] Willmore, B. & Smyth, D. Methods for first-order kernel\n estimation: simple-cell receptive fields from responses to\n natural scenes. Network 14, 553-77 (2003).\n\n .. [3] Crosse, M. J., Di Liberto, G. M., Bednar, A. & Lalor, E. C. (2016).\n The Multivariate Temporal Response Function (mTRF) Toolbox:\n A MATLAB Toolbox for Relating Neural Signals to Continuous Stimuli.\n Frontiers in Human Neuroscience 10, 604.\n doi:10.3389/fnhum.2016.00604\n\n .. [4] Holdgraf, C. R. et al. Rapid tuning shifts in human auditory cortex\n enhance speech intelligibility. Nature Communications,\n 7, 13654 (2016). doi:10.1038/ncomms13654\n\n .. [5] Haufe, S., Meinecke, F., Goergen, K., Daehne, S., Haynes, J.-D.,\n Blankertz, B., & Biessmann, F. (2014). On the interpretation of\n weight vectors of linear models in multivariate neuroimaging.\n NeuroImage, 87, 96-110. doi:10.1016/j.neuroimage.2013.10.067\n \"\"\"\n\n @verbose\n def __init__(self, tmin, tmax, sfreq, feature_names=None, estimator=None,\n fit_intercept=None, scoring='r2', patterns=False,\n n_jobs=1, edge_correction=True, verbose=None):\n self.feature_names = feature_names\n self.sfreq = float(sfreq)\n self.tmin = tmin\n self.tmax = tmax\n self.estimator = 0. if estimator is None else estimator\n self.fit_intercept = fit_intercept\n self.scoring = scoring\n self.patterns = patterns\n self.n_jobs = n_jobs\n self.edge_correction = edge_correction\n self.verbose = verbose\n\n def __repr__(self): # noqa: D105\n s = \"tmin, tmax : (%.3f, %.3f), \" % (self.tmin, self.tmax)\n estimator = self.estimator\n if not isinstance(estimator, str):\n estimator = type(self.estimator)\n s += \"estimator : %s, \" % (estimator,)\n if hasattr(self, 'coef_'):\n if self.feature_names is not None:\n feats = self.feature_names\n if len(feats) == 1:\n s += \"feature: %s, \" % feats[0]\n else:\n s += \"features : [%s, ..., %s], \" % (feats[0], feats[-1])\n s += \"fit: True\"\n else:\n s += \"fit: False\"\n if hasattr(self, 'scores_'):\n s += \"scored (%s)\" % self.scoring\n return \"\" % s\n\n def _delay_and_reshape(self, X, y=None):\n \"\"\"Delay and reshape the variables.\"\"\"\n if not isinstance(self.estimator_, TimeDelayingRidge):\n # X is now shape (n_times, n_epochs, n_feats, n_delays)\n X = _delay_time_series(X, self.tmin, self.tmax, self.sfreq,\n fill_mean=self.fit_intercept)\n X = _reshape_for_est(X)\n # Concat times + epochs\n if y is not None:\n y = y.reshape(-1, y.shape[-1], order='F')\n return X, y\n\n @verbose\n def fit(self, X, y):\n \"\"\"Fit a receptive field model.\n\n Parameters\n ----------\n X : array, shape (n_times[, n_epochs], n_features)\n The input features for the model.\n y : array, shape (n_times[, n_epochs][, n_outputs])\n The output features for the model.\n\n Returns\n -------\n self : instance\n The instance so you can chain operations.\n \"\"\"\n if self.scoring not in _SCORERS.keys():\n raise ValueError('scoring must be one of %s, got'\n '%s ' % (sorted(_SCORERS.keys()), self.scoring))\n from sklearn.base import clone\n X, y, _, self._y_dim = self._check_dimensions(X, y)\n\n if self.tmin > self.tmax:\n raise ValueError('tmin (%s) must be at most tmax (%s)'\n % (self.tmin, self.tmax))\n # Initialize delays\n self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq)\n\n # Define the slice that we should use in the middle\n self.valid_samples_ = _delays_to_slice(self.delays_)\n\n if isinstance(self.estimator, numbers.Real):\n if self.fit_intercept is None:\n self.fit_intercept = True\n estimator = TimeDelayingRidge(\n self.tmin, self.tmax, self.sfreq, alpha=self.estimator,\n fit_intercept=self.fit_intercept, n_jobs=self.n_jobs,\n edge_correction=self.edge_correction)\n elif is_regressor(self.estimator):\n estimator = clone(self.estimator)\n if self.fit_intercept is not None and \\\n estimator.fit_intercept != self.fit_intercept:\n raise ValueError(\n 'Estimator fit_intercept (%s) != initialization '\n 'fit_intercept (%s), initialize ReceptiveField with the '\n 'same fit_intercept value or use fit_intercept=None'\n % (estimator.fit_intercept, self.fit_intercept))\n self.fit_intercept = estimator.fit_intercept\n else:\n raise ValueError('`estimator` must be a float or an instance'\n ' of `BaseEstimator`,'\n ' got type %s.' % type(self.estimator))\n self.estimator_ = estimator\n del estimator\n _check_estimator(self.estimator_)\n\n # Create input features\n n_times, n_epochs, n_feats = X.shape\n n_outputs = y.shape[-1]\n n_delays = len(self.delays_)\n\n # Update feature names if we have none\n if ((self.feature_names is not None) and\n (len(self.feature_names) != n_feats)):\n raise ValueError('n_features in X does not match feature names '\n '(%s != %s)' % (n_feats, len(self.feature_names)))\n\n # Create input features\n X, y = self._delay_and_reshape(X, y)\n\n self.estimator_.fit(X, y)\n coef = get_coef(self.estimator_, 'coef_') # (n_targets, n_features)\n shape = [n_feats, n_delays]\n if self._y_dim > 1:\n shape.insert(0, -1)\n self.coef_ = coef.reshape(shape)\n\n # Inverse-transform model weights\n if self.patterns:\n if isinstance(self.estimator_, TimeDelayingRidge):\n cov_ = self.estimator_.cov_ / float(n_times * n_epochs - 1)\n y = y.reshape(-1, y.shape[-1], order='F')\n else:\n X = X - X.mean(0, keepdims=True)\n cov_ = np.cov(X.T)\n del X\n\n # Inverse output covariance\n if y.ndim == 2 and y.shape[1] != 1:\n y = y - y.mean(0, keepdims=True)\n inv_Y = linalg.pinv(np.cov(y.T))\n else:\n inv_Y = 1. / float(n_times * n_epochs - 1)\n del y\n\n # Inverse coef according to Haufe's method\n # patterns has shape (n_feats * n_delays, n_outputs)\n coef = np.reshape(self.coef_, (n_feats * n_delays, n_outputs))\n patterns = cov_.dot(coef.dot(inv_Y))\n self.patterns_ = patterns.reshape(shape)\n\n return self\n\n def predict(self, X):\n \"\"\"Generate predictions with a receptive field.\n\n Parameters\n ----------\n X : array, shape (n_times[, n_epochs], n_channels)\n The input features for the model.\n\n Returns\n -------\n y_pred : array, shape (n_times[, n_epochs][, n_outputs])\n The output predictions. \"Note that valid samples (those\n unaffected by edge artifacts during the time delaying step) can\n be obtained using ``y_pred[rf.valid_samples_]``.\n \"\"\"\n if not hasattr(self, 'delays_'):\n raise ValueError('Estimator has not been fit yet.')\n X, _, X_dim = self._check_dimensions(X, None, predict=True)[:3]\n del _\n # convert to sklearn and back\n pred_shape = X.shape[:-1]\n if self._y_dim > 1:\n pred_shape = pred_shape + (self.coef_.shape[0],)\n X, _ = self._delay_and_reshape(X)\n y_pred = self.estimator_.predict(X)\n y_pred = y_pred.reshape(pred_shape, order='F')\n shape = list(y_pred.shape)\n if X_dim <= 2:\n shape.pop(1) # epochs\n extra = 0\n else:\n extra = 1\n shape = shape[:self._y_dim + extra]\n y_pred.shape = shape\n return y_pred\n\n def score(self, X, y):\n \"\"\"Score predictions generated with a receptive field.\n\n This calls ``self.predict``, then masks the output of this\n and ``y` with ``self.mask_prediction_``. Finally, it passes\n this to a :mod:`sklearn.metrics` scorer.\n\n Parameters\n ----------\n X : array, shape (n_times[, n_epochs], n_channels)\n The input features for the model.\n y : array, shape (n_times[, n_epochs][, n_outputs])\n Used for scikit-learn compatibility.\n\n Returns\n -------\n scores : list of float, shape (n_outputs,)\n The scores estimated by the model for each output (e.g. mean\n R2 of ``predict(X)``).\n \"\"\"\n # Create our scoring object\n scorer_ = _SCORERS[self.scoring]\n\n # Generate predictions, then reshape so we can mask time\n X, y = self._check_dimensions(X, y, predict=True)[:2]\n n_times, n_epochs, n_outputs = y.shape\n y_pred = self.predict(X)\n y_pred = y_pred[self.valid_samples_]\n y = y[self.valid_samples_]\n\n # Re-vectorize and call scorer\n y = y.reshape([-1, n_outputs], order='F')\n y_pred = y_pred.reshape([-1, n_outputs], order='F')\n assert y.shape == y_pred.shape\n scores = scorer_(y, y_pred, multioutput='raw_values')\n return scores\n\n def _check_dimensions(self, X, y, predict=False):\n X_dim = X.ndim\n y_dim = y.ndim if y is not None else 0\n if X_dim == 2:\n # Ensure we have a 3D input by adding singleton epochs dimension\n X = X[:, np.newaxis, :]\n if y is not None:\n if y_dim == 1:\n y = y[:, np.newaxis, np.newaxis] # epochs, outputs\n elif y_dim == 2:\n y = y[:, np.newaxis, :] # epochs\n else:\n raise ValueError('y must be shape (n_times[, n_epochs]'\n '[,n_outputs], got %s' % (y.shape,))\n elif X.ndim == 3:\n if y is not None:\n if y.ndim == 2:\n y = y[:, :, np.newaxis] # Add an outputs dim\n elif y.ndim != 3:\n raise ValueError('If X has 3 dimensions, '\n 'y must have 2 or 3 dimensions')\n else:\n raise ValueError('X must be shape (n_times[, n_epochs],'\n ' n_features), got %s' % (X.shape,))\n if y is not None:\n if X.shape[0] != y.shape[0]:\n raise ValueError('X and y do not have the same n_times\\n'\n '%s != %s' % (X.shape[0], y.shape[0]))\n if X.shape[1] != y.shape[1]:\n raise ValueError('X and y do not have the same n_epochs\\n'\n '%s != %s' % (X.shape[1], y.shape[1]))\n if predict and y.shape[-1] != len(self.estimator_.coef_):\n raise ValueError('Number of outputs does not match'\n ' estimator coefficients dimensions')\n return X, y, X_dim, y_dim\n\n\ndef _delay_time_series(X, tmin, tmax, sfreq, fill_mean=False):\n \"\"\"Return a time-lagged input time series.\n\n Parameters\n ----------\n X : array, shape (n_times[, n_epochs], n_features)\n The time series to delay. Must be 2D or 3D.\n tmin : int | float\n The starting lag.\n tmax : int | float\n The ending lag.\n Must be >= tmin.\n sfreq : int | float\n The sampling frequency of the series. Defaults to 1.0.\n fill_mean : bool\n If True, the fill value will be the mean along the time dimension\n of the feature, and each cropped and delayed segment of data\n will be shifted to have the same mean value (ensuring that mean\n subtraction works properly). If False, the fill value will be zero.\n\n Returns\n -------\n delayed : array, shape(n_times[, n_epochs][, n_features], n_delays)\n The delayed data. It has the same shape as X, with an extra dimension\n appended to the end.\n\n Examples\n --------\n >>> tmin, tmax = -0.1, 0.2\n >>> sfreq = 10.\n >>> x = np.arange(1, 6)\n >>> x_del = _delay_time_series(x, tmin, tmax, sfreq)\n >>> print(x_del) # doctest:+SKIP\n [[2. 1. 0. 0.]\n [3. 2. 1. 0.]\n [4. 3. 2. 1.]\n [5. 4. 3. 2.]\n [0. 5. 4. 3.]]\n \"\"\"\n _check_delayer_params(tmin, tmax, sfreq)\n delays = _times_to_delays(tmin, tmax, sfreq)\n # Iterate through indices and append\n delayed = np.zeros(X.shape + (len(delays),))\n if fill_mean:\n mean_value = X.mean(axis=0)\n if X.ndim == 3:\n mean_value = np.mean(mean_value, axis=0)\n delayed[:] = mean_value[:, np.newaxis]\n for ii, ix_delay in enumerate(delays):\n # Create zeros to populate w/ delays\n if ix_delay < 0:\n out = delayed[:ix_delay, ..., ii]\n use_X = X[-ix_delay:]\n elif ix_delay > 0:\n out = delayed[ix_delay:, ..., ii]\n use_X = X[:-ix_delay]\n else: # == 0\n out = delayed[..., ii]\n use_X = X\n out[:] = use_X\n if fill_mean:\n out[:] += (mean_value - use_X.mean(axis=0))\n return delayed\n\n\ndef _times_to_delays(tmin, tmax, sfreq):\n \"\"\"Convert a tmin/tmax in seconds to delays.\"\"\"\n # Convert seconds to samples\n delays = np.arange(int(np.round(tmin * sfreq)),\n int(np.round(tmax * sfreq) + 1))\n return delays\n\n\ndef _delays_to_slice(delays):\n \"\"\"Find the slice to be taken in order to remove missing values.\"\"\"\n # Negative values == cut off rows at the end\n min_delay = None if delays[-1] <= 0 else delays[-1]\n # Positive values == cut off rows at the end\n max_delay = None if delays[0] >= 0 else delays[0]\n return slice(min_delay, max_delay)\n\n\ndef _check_delayer_params(tmin, tmax, sfreq):\n \"\"\"Check delayer input parameters. For future custom delay support.\"\"\"\n _validate_type(sfreq, 'numeric', '`sfreq`')\n\n for tlim in (tmin, tmax):\n _validate_type(tlim, 'numeric', 'tmin/tmax')\n if not tmin <= tmax:\n raise ValueError('tmin must be <= tmax')\n\n\ndef _reshape_for_est(X_del):\n \"\"\"Convert X_del to a sklearn-compatible shape.\"\"\"\n n_times, n_epochs, n_feats, n_delays = X_del.shape\n X_del = X_del.reshape(n_times, n_epochs, -1) # concatenate feats\n X_del = X_del.reshape(n_times * n_epochs, -1, order='F')\n return X_del\n\n\n# Create a correlation scikit-learn-style scorer\ndef _corr_score(y_true, y, multioutput=None):\n from scipy.stats import pearsonr\n assert multioutput == 'raw_values'\n for this_y in (y_true, y):\n if this_y.ndim != 2:\n raise ValueError('inputs must be shape (samples, outputs), got %s'\n % (this_y.shape,))\n return np.array([pearsonr(y_true[:, ii], y[:, ii])[0]\n for ii in range(y.shape[-1])])\n\n\ndef _r2_score(y_true, y, multioutput=None):\n from sklearn.metrics import r2_score\n return r2_score(y_true, y, multioutput=multioutput)\n\n\n_SCORERS = {'r2': _r2_score, 'corrcoef': _corr_score}\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382683,"cells":{"repo_name":{"kind":"string","value":"IntelLabs/hpat"},"path":{"kind":"string","value":"sdc/datatypes/hpat_pandas_stringmethods_functions.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"44761"},"content":{"kind":"string","value":"# *****************************************************************************\n# Copyright (c) 2020, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\"\n\n| :class:`pandas.core.strings.StringMethods` functions and operators implementations in HPAT\n\n .. only:: developer\n\n This is autogenerated sources for all Unicode string functions supported by Numba.\n Currently tested 45 functions only. List of functions obtained automatically from\n `numba.types.misc.UnicodeType` class\n\n Example of the generated method (for method upper()):\n `hpat_pandas_stringmethods_upper_parallel_impl` is paralell version\n (required additional import mentioned in the body)\n\n @sdc_overload_method(StringMethodsType, 'upper')\n def hpat_pandas_stringmethods_upper(self):\n\n ty_checker = TypeChecker('Method stringmethods.upper().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_upper_parallel_impl(self):\n from numba.parfor import (init_prange, min_checker, internal_prange)\n\n init_prange()\n result = []\n item_count = len(self._data)\n min_checker(item_count)\n for i in internal_prange(item_count):\n item = self._data[i]\n item_method = item.upper()\n result.append(item_method)\n\n return pandas.Series(result)\n\n return hpat_pandas_stringmethods_upper_parallel_impl\n\n def hpat_pandas_stringmethods_upper_impl(self):\n result = []\n item_count = len(self._data)\n for i in range(item_count):\n item = self._data[i]\n item_method = item.upper()\n result.append(item_method)\n\n return pandas.Series(result)\n\n return hpat_pandas_stringmethods_upper_impl\n\n Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_split_filter\n\n\"\"\"\n\n\nimport numpy\nimport pandas\n\nimport numba\nfrom numba.types import (Boolean, Integer, NoneType,\n Omitted, StringLiteral, UnicodeType)\n\nfrom sdc.utilities.sdc_typing_utils import TypeChecker\nfrom sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType\nfrom sdc.utilities.utils import sdc_overload_method\nfrom sdc.hiframes.api import get_nan_mask\nfrom sdc.str_arr_ext import str_arr_set_na_by_mask, create_str_arr_from_list\n\n_hpat_pandas_stringmethods_autogen_global_dict = {\n 'pandas': pandas,\n 'numpy': numpy,\n 'numba': numba,\n 'StringMethodsType': StringMethodsType,\n 'TypeChecker': TypeChecker\n}\n\n_hpat_pandas_stringmethods_functions_params = {\n 'cat': ', others=None, sep=None, na_rep=None, join=\"left\"',\n 'center': ', width, fillchar=\" \"',\n 'contains': ', pat, case=True, flags=0, na=numpy.nan, regex=True',\n 'count': ', pat, flags=0',\n 'decode': ', encoding, errors=\"strict\"',\n 'encode': ', encoding, errors=\"strict\"',\n 'endswith': ', pat, na=numpy.nan',\n 'extractall': ', pat, flags=0',\n 'extract': ', pat, flags=0, expand=True',\n 'findall': ', pat, flags=0',\n 'find': ', sub, start=0, end=None',\n 'get': ', i',\n 'get_dummies': ', sep=\"|\"',\n 'index': ', sub, start=0, end=None',\n 'join': ', sep',\n 'ljust': ', width, fillchar=\" \"',\n 'lstrip': ', to_strip=None',\n 'match': ', pat, case=True, flags=0, na=numpy.nan',\n 'normalize': ', form',\n 'pad': ', width, side=\"left\", fillchar=\" \"',\n 'partition': ', sep=\" \", expand=True',\n 'repeat': ', repeats',\n 'replace': ', pat, repl, n=-1, case=None, flags=0, regex=True',\n 'rfind': ', sub, start=0, end=None',\n 'rindex': ', sub, start=0, end=None',\n 'rjust': ', width, fillchar=\" \"',\n 'rpartition': ', sep=\" \", expand=True',\n 'rsplit': ', pat=None, n=-1, expand=False',\n 'rstrip': ', to_strip=None',\n 'slice_replace': ', start=None, stop=None, repl=None',\n 'slice': ', start=None, stop=None, step=None',\n 'split': ', pat=None, n=-1, expand=False',\n 'startswith': ', pat, na=numpy.nan',\n 'strip': ', to_strip=None',\n 'translate': ', table',\n 'wrap': ', width',\n 'zfill': ', width',\n}\n\n_hpat_pandas_stringmethods_functions_template = \"\"\"\n# @sdc_overload_method(StringMethodsType, '{methodname}')\ndef hpat_pandas_stringmethods_{methodname}(self{methodparams}):\n \\\"\\\"\\\"\n Pandas Series method :meth:`pandas.core.strings.StringMethods.{methodname}()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests sdc.tests.test_strings.TestStrings.test_str2str\n python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str2str\n python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_get\n python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_replace_noregex\n python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_split\n python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_contains_regex\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n other: {methodparams}\n input arguments decription in\n https://pandas.pydata.org/pandas-docs/version/0.25/reference/series.html#string-handling\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \\\"\\\"\\\"\n\n ty_checker = TypeChecker('Method {methodname}().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_{methodname}_impl(self{methodparams}):\n item_count = len(self._data)\n result = [''] * item_count\n # result = numba.typed.List.empty_list(numba.types.unicode_type)\n\n for it in range(item_count):\n item = self._data._data[it]\n if len(item) > 0:\n result[it] = item.{methodname}({methodparams_call})\n else:\n result[it] = item\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_{methodname}_impl\n\"\"\"\n\n\n@sdc_overload_method(StringMethodsType, 'center')\ndef hpat_pandas_stringmethods_center(self, width, fillchar=' '):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.center\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_center.py\n :language: python\n :lines: 27-\n :caption: Filling left and right side of strings in the Series with an additional character\n :name: ex_series_str_center\n\n .. command-output:: python ./series/str/series_str_center.py\n :cwd: ../../../examples\n\n .. todo:: Add support of 32-bit Unicode for `str.center()`\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.center()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_center\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n width: :obj:`int`\n Minimum width of resulting string\n fillchar: :obj:`str`\n Additional character for filling, default is whitespace\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method center().')\n ty_checker.check(self, StringMethodsType)\n\n if not isinstance(width, Integer):\n ty_checker.raise_exc(width, 'int', 'width')\n\n accepted_types = (Omitted, StringLiteral, UnicodeType)\n if not isinstance(fillchar, accepted_types) and fillchar != ' ':\n ty_checker.raise_exc(fillchar, 'str', 'fillchar')\n\n def hpat_pandas_stringmethods_center_impl(self, width, fillchar=' '):\n item_count = len(self._data)\n result = [''] * item_count\n for idx, item in enumerate(self._data._data):\n result[idx] = item.center(width, fillchar)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_center_impl\n\n\n@sdc_overload_method(StringMethodsType, 'endswith')\ndef hpat_pandas_stringmethods_endswith(self, pat, na=None):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.endswith\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_endswith.py\n :language: python\n :lines: 27-\n :caption: Test if the end of each string element matches a string\n :name: ex_series_str_endswith\n\n .. command-output:: python ./series/str/series_str_endswith.py\n :cwd: ../../../examples\n\n .. todo::\n - Add support of matching the end of each string by a pattern\n - Add support of parameter ``na``\n\n .. seealso::\n `str.endswith `_\n Python standard library string method.\n :ref:`Series.str.startswith `\n Same as endswith, but tests the start of string.\n :ref:`Series.str.contains `\n Tests if string element contains a pattern.\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.endswith()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_endswith\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n pat: :obj:`str`\n Character sequence\n na: :obj:`bool`\n Object shown if element tested is not a string\n *unsupported*\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method endswith().')\n ty_checker.check(self, StringMethodsType)\n\n if not isinstance(pat, (StringLiteral, UnicodeType)):\n ty_checker.raise_exc(pat, 'str', 'pat')\n\n if not isinstance(na, (Boolean, NoneType, Omitted)) and na is not None:\n ty_checker.raise_exc(na, 'bool', 'na')\n\n def hpat_pandas_stringmethods_endswith_impl(self, pat, na=None):\n if na is not None:\n msg = 'Method endswith(). The object na\\n expected: None'\n raise ValueError(msg)\n\n item_endswith = len(self._data)\n result = numpy.empty(item_endswith, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.endswith(pat)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_endswith_impl\n\n\n@sdc_overload_method(StringMethodsType, 'find')\ndef hpat_pandas_stringmethods_find(self, sub, start=0, end=None):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.find\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_find.py\n :language: python\n :lines: 27-\n :caption: Return lowest indexes in each strings in the Series\n :name: ex_series_str_find\n\n .. command-output:: python ./series/str/series_str_find.py\n :cwd: ../../../examples\n\n .. todo:: Add support of parameters ``start`` and ``end``\n\n .. seealso::\n :ref:`Series.str.rfind `\n Return highest indexes in each strings.\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.find()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_find\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n sub: :obj:`str`\n Substring being searched\n start: :obj:`int`\n Left edge index\n *unsupported*\n end: :obj:`int`\n Right edge index\n *unsupported*\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method find().')\n ty_checker.check(self, StringMethodsType)\n\n if not isinstance(sub, (StringLiteral, UnicodeType)):\n ty_checker.raise_exc(sub, 'str', 'sub')\n\n accepted_types = (Integer, NoneType, Omitted)\n if not isinstance(start, accepted_types) and start != 0:\n ty_checker.raise_exc(start, 'None, int', 'start')\n\n if not isinstance(end, accepted_types) and end is not None:\n ty_checker.raise_exc(end, 'None, int', 'end')\n\n def hpat_pandas_stringmethods_find_impl(self, sub, start=0, end=None):\n if start != 0:\n raise ValueError('Method find(). The object start\\n expected: 0')\n if end is not None:\n raise ValueError('Method find(). The object end\\n expected: None')\n\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.int64)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.find(sub)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_find_impl\n\n\n@sdc_overload_method(StringMethodsType, 'isupper')\ndef hpat_pandas_stringmethods_isupper(self):\n ty_checker = TypeChecker('Method isupper().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_isupper_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.isupper()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_isupper_impl\n\n\n@sdc_overload_method(StringMethodsType, 'len')\ndef hpat_pandas_stringmethods_len(self):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.len\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_len.py\n :language: python\n :lines: 27-\n :caption: Compute the length of each element in the Series\n :name: ex_series_str_len\n\n .. command-output:: python ./series/str/series_str_len.py\n :cwd: ../../../examples\n\n .. seealso::\n `str.len`\n Python built-in function returning the length of an object.\n :ref:`Series.size `\n Returns the length of the Series.\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.len()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str_len1\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method len().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_len_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.int64)\n for idx, item in enumerate(self._data._data):\n result[idx] = len(item)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_len_impl\n\n\n@sdc_overload_method(StringMethodsType, 'ljust')\ndef hpat_pandas_stringmethods_ljust(self, width, fillchar=' '):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.ljust\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_ljust.py\n :language: python\n :lines: 27-\n :caption: Filling right side of strings in the Series with an additional character\n :name: ex_series_str_ljust\n\n .. command-output:: python ./series/str/series_str_ljust.py\n :cwd: ../../../examples\n\n .. todo:: Add support of 32-bit Unicode for `str.ljust()`\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.ljust()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_ljust\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n width: :obj:`int`\n Minimum width of resulting string\n fillchar: :obj:`str`\n Additional character for filling, default is whitespace\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method ljust().')\n ty_checker.check(self, StringMethodsType)\n\n if not isinstance(width, Integer):\n ty_checker.raise_exc(width, 'int', 'width')\n\n accepted_types = (Omitted, StringLiteral, UnicodeType)\n if not isinstance(fillchar, accepted_types) and fillchar != ' ':\n ty_checker.raise_exc(fillchar, 'str', 'fillchar')\n\n def hpat_pandas_stringmethods_ljust_impl(self, width, fillchar=' '):\n item_count = len(self._data)\n result = [''] * item_count\n for idx, item in enumerate(self._data._data):\n result[idx] = item.ljust(width, fillchar)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_ljust_impl\n\n\n@sdc_overload_method(StringMethodsType, 'rjust')\ndef hpat_pandas_stringmethods_rjust(self, width, fillchar=' '):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.rjust\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_rjust.py\n :language: python\n :lines: 27-\n :caption: Filling left side of strings in the Series with an additional character\n :name: ex_series_str_rjust\n\n .. command-output:: python ./series/str/series_str_rjust.py\n :cwd: ../../../examples\n\n .. todo:: Add support of 32-bit Unicode for `str.rjust()`\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.rjust()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_rjust\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n width: :obj:`int`\n Minimum width of resulting string\n fillchar: :obj:`str`\n Additional character for filling, default is whitespace\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method rjust().')\n ty_checker.check(self, StringMethodsType)\n\n if not isinstance(width, Integer):\n ty_checker.raise_exc(width, 'int', 'width')\n\n accepted_types = (Omitted, StringLiteral, UnicodeType)\n if not isinstance(fillchar, accepted_types) and fillchar != ' ':\n ty_checker.raise_exc(fillchar, 'str', 'fillchar')\n\n def hpat_pandas_stringmethods_rjust_impl(self, width, fillchar=' '):\n item_count = len(self._data)\n result = [''] * item_count\n for idx, item in enumerate(self._data._data):\n result[idx] = item.rjust(width, fillchar)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_rjust_impl\n\n\n@sdc_overload_method(StringMethodsType, 'startswith')\ndef hpat_pandas_stringmethods_startswith(self, pat, na=None):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.startswith\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_startswith.py\n :language: python\n :lines: 27-\n :caption: Test if the start of each string element matches a string\n :name: ex_series_str_startswith\n\n .. command-output:: python ./series/str/series_str_startswith.py\n :cwd: ../../../examples\n\n .. todo::\n - Add support of matching the start of each string by a pattern\n - Add support of parameter ``na``\n\n .. seealso::\n `str.startswith `_\n Python standard library string method.\n :ref:`Series.str.endswith `\n Same as startswith, but tests the end of string.\n :ref:`Series.str.contains `\n Tests if string element contains a pattern.\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.startswith()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_startswith\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n pat: :obj:`str`\n Character sequence\n na: :obj:`bool`\n Object shown if element tested is not a string\n *unsupported*\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method startswith().')\n ty_checker.check(self, StringMethodsType)\n\n if not isinstance(pat, (StringLiteral, UnicodeType)):\n ty_checker.raise_exc(pat, 'str', 'pat')\n\n if not isinstance(na, (Boolean, NoneType, Omitted)) and na is not None:\n ty_checker.raise_exc(na, 'bool', 'na')\n\n def hpat_pandas_stringmethods_startswith_impl(self, pat, na=None):\n if na is not None:\n msg = 'Method startswith(). The object na\\n expected: None'\n raise ValueError(msg)\n\n item_startswith = len(self._data)\n result = numpy.empty(item_startswith, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.startswith(pat)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_startswith_impl\n\n\n@sdc_overload_method(StringMethodsType, 'zfill')\ndef hpat_pandas_stringmethods_zfill(self, width):\n \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.zfill\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_zfill.py\n :language: python\n :lines: 27-\n :caption: Pad strings in the Series by prepending '0' characters\n :name: ex_series_str_zfill\n\n .. command-output:: python ./series/str/series_str_zfill.py\n :cwd: ../../../examples\n\n .. todo:: Add support of 32-bit Unicode for `str.zfill()`\n\n .. seealso::\n :ref:`Series.str.rjust `\n Fills the left side of strings with an arbitrary character.\n :ref:`Series.str.ljust `\n Fills the right side of strings with an arbitrary character.\n :ref:`Series.str.pad `\n Fills the specified sides of strings with an arbitrary character.\n :ref:`Series.str.center `\n Fills boths sides of strings with an arbitrary character.\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.zfill()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_zfill\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n width: :obj:`int`\n Minimum width of resulting string\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n \"\"\"\n\n ty_checker = TypeChecker('Method zfill().')\n ty_checker.check(self, StringMethodsType)\n\n if not isinstance(width, Integer):\n ty_checker.raise_exc(width, 'int', 'width')\n\n def hpat_pandas_stringmethods_zfill_impl(self, width):\n item_count = len(self._data)\n result = [''] * item_count\n for idx, item in enumerate(self._data._data):\n result[idx] = item.zfill(width)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_zfill_impl\n\n\ndef _hpat_pandas_stringmethods_autogen(method_name):\n \"\"\"\"\n The function generates a function for 'method_name' from source text that is created on the fly.\n \"\"\"\n\n params = \"\"\n params_call = \"\"\n\n # get function parameters by name\n params_dict = _hpat_pandas_stringmethods_functions_params.get(method_name)\n if params_dict is not None:\n params = params_dict\n\n if len(params) > 0:\n \"\"\"\n Translate parameters string for method\n\n For example:\n parameters for split(): ', pat=None, n=-1, expand=False'\n translate into: 'pat, n, expand'\n \"\"\"\n\n params_call_splitted = params.split(',')\n params_call_list = []\n for item in params_call_splitted:\n params_call_list.append(item.split(\"=\")[0])\n params_call = \",\".join(params_call_list)\n if len(params_call) > 1:\n params_call = params_call[2:]\n\n sourcecode = _hpat_pandas_stringmethods_functions_template.format(methodname=method_name,\n methodparams=params,\n methodparams_call=params_call)\n exec(sourcecode, _hpat_pandas_stringmethods_autogen_global_dict)\n\n global_dict_name = 'hpat_pandas_stringmethods_{methodname}'.format(methodname=method_name)\n return _hpat_pandas_stringmethods_autogen_global_dict[global_dict_name]\n\n\nsdc_pandas_series_str_docstring_template = \"\"\"\n Intel Scalable Dataframe Compiler User Guide\n ********************************************\n Pandas API: pandas.Series.str.{method_name}\n\n Limitations\n -----------\n Series elements are expected to be Unicode strings. Elements cannot be NaN.\n\n Examples\n --------\n .. literalinclude:: ../../../examples/series/str/series_str_{method_name}.py\n :language: python\n :lines: 27-\n :caption: {caption}\n :name: ex_series_str_{method_name}\n\n .. command-output:: python ./series/str/series_str_{method_name}.py\n :cwd: ../../../examples\n\n .. seealso::\n {seealso}\n\n Intel Scalable Dataframe Compiler Developer Guide\n *************************************************\n\n Pandas Series method :meth:`pandas.core.strings.StringMethods.{method_name}()` implementation.\n\n Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements.\n\n .. only:: developer\n\n Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_{method_name}_str\n\n Parameters\n ----------\n self: :class:`pandas.core.strings.StringMethods`\n input arg\n\n Returns\n -------\n :obj:`pandas.Series`\n returns :obj:`pandas.Series` object\n\"\"\"\n\n\n@sdc_overload_method(StringMethodsType, 'istitle')\ndef hpat_pandas_stringmethods_istitle(self):\n\n ty_checker = TypeChecker('Method istitle().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_istitle_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.istitle()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_istitle_impl\n\n\n@sdc_overload_method(StringMethodsType, 'isspace')\ndef hpat_pandas_stringmethods_isspace(self):\n\n ty_checker = TypeChecker('Method isspace().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_isspace_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.isspace()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_isspace_impl\n\n\n@sdc_overload_method(StringMethodsType, 'isalpha')\ndef hpat_pandas_stringmethods_isalpha(self):\n\n ty_checker = TypeChecker('Method isalpha().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_isalpha_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.isalpha()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_isalpha_impl\n\n\n@sdc_overload_method(StringMethodsType, 'islower')\ndef hpat_pandas_stringmethods_islower(self):\n\n ty_checker = TypeChecker('Method islower().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_islower_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.islower()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_islower_impl\n\n\n@sdc_overload_method(StringMethodsType, 'isalnum')\ndef hpat_pandas_stringmethods_isalnum(self):\n\n ty_checker = TypeChecker('Method isalnum().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_isalnum_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.isalnum()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_isalnum_impl\n\n\n@sdc_overload_method(StringMethodsType, 'isnumeric')\ndef hpat_pandas_stringmethods_isnumeric(self):\n ty_checker = TypeChecker('Method isnumeric().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_isnumeric_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.isnumeric()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_isnumeric_impl\n\n\n@sdc_overload_method(StringMethodsType, 'isdigit')\ndef hpat_pandas_stringmethods_isdigit(self):\n ty_checker = TypeChecker('Method isdigit().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_isdigit_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.isdigit()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_isdigit_impl\n\n\n@sdc_overload_method(StringMethodsType, 'isdecimal')\ndef hpat_pandas_stringmethods_isdecimal(self):\n ty_checker = TypeChecker('Method isdecimal().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_isdecimal_impl(self):\n item_count = len(self._data)\n result = numpy.empty(item_count, numba.types.boolean)\n for idx, item in enumerate(self._data._data):\n result[idx] = item.isdecimal()\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_isdecimal_impl\n\n\n@sdc_overload_method(StringMethodsType, 'capitalize')\ndef hpat_pandas_stringmethods_capitalize(self):\n ty_checker = TypeChecker('Method capitalize().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_capitalize_impl(self):\n mask = get_nan_mask(self._data._data)\n item_count = len(self._data)\n res_list = [''] * item_count\n for idx in numba.prange(item_count):\n res_list[idx] = self._data._data[idx].capitalize()\n str_arr = create_str_arr_from_list(res_list)\n result = str_arr_set_na_by_mask(str_arr, mask)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_capitalize_impl\n\n\n@sdc_overload_method(StringMethodsType, 'title')\ndef hpat_pandas_stringmethods_title(self):\n ty_checker = TypeChecker('Method title().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_title_impl(self):\n mask = get_nan_mask(self._data._data)\n item_count = len(self._data)\n res_list = [''] * item_count\n for idx in numba.prange(item_count):\n res_list[idx] = self._data._data[idx].title()\n str_arr = create_str_arr_from_list(res_list)\n result = str_arr_set_na_by_mask(str_arr, mask)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_title_impl\n\n\n@sdc_overload_method(StringMethodsType, 'swapcase')\ndef hpat_pandas_stringmethods_swapcase(self):\n ty_checker = TypeChecker('Method swapcase().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_swapcase_impl(self):\n mask = get_nan_mask(self._data._data)\n item_count = len(self._data)\n res_list = [''] * item_count\n for idx in numba.prange(item_count):\n res_list[idx] = self._data._data[idx].swapcase()\n str_arr = create_str_arr_from_list(res_list)\n result = str_arr_set_na_by_mask(str_arr, mask)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_swapcase_impl\n\n\n@sdc_overload_method(StringMethodsType, 'casefold')\ndef hpat_pandas_stringmethods_casefold(self):\n ty_checker = TypeChecker('Method casefold().')\n ty_checker.check(self, StringMethodsType)\n\n def hpat_pandas_stringmethods_casefold_impl(self):\n mask = get_nan_mask(self._data._data)\n item_count = len(self._data)\n res_list = [''] * item_count\n for idx in numba.prange(item_count):\n res_list[idx] = self._data._data[idx].casefold()\n str_arr = create_str_arr_from_list(res_list)\n result = str_arr_set_na_by_mask(str_arr, mask)\n\n return pandas.Series(result, self._data._index, name=self._data._name)\n\n return hpat_pandas_stringmethods_casefold_impl\n\n\nseealso_check_methods = \"\"\"\n :ref:`Series.str.isalpha `\n Check whether all characters are alphabetic.\n :ref:`Series.str.isnumeric `\n Check whether all characters are numeric.\n :ref:`Series.str.isalnum `\n Check whether all characters are alphanumeric.\n :ref:`Series.str.isdigit `\n Check whether all characters are digits.\n :ref:`Series.str.isdecimal `\n Check whether all characters are decimal.\n :ref:`Series.str.isspace `\n Check whether all characters are whitespace.\n :ref:`Series.str.islower `\n Check whether all characters are lowercase.\n :ref:`Series.str.isupper `\n Check whether all characters are uppercase.\n :ref:`Series.str.istitle `\n Check whether all characters are titlecase.\n \"\"\"\n\n\nseealso_transform_methods = \"\"\"\n :ref:`Series.str.lower `\n Converts all characters to lowercase.\n :ref:`Series.str.upper `\n Converts all characters to uppercase.\n :ref:`Series.str.title `\n Converts first character of each word to uppercase and remaining to lowercase.\n :ref:`Series.str.capitalize `\n Converts first character to uppercase and remaining to lowercase.\n :ref:`Series.str.swapcase `\n Converts uppercase to lowercase and lowercase to uppercase.\n :ref:`Series.str.casefold `\n Removes all case distinctions in the string.\n \"\"\"\n\n\nstringmethods_funcs = {\n 'istitle': {'method': hpat_pandas_stringmethods_istitle,\n 'caption': 'Check if each word start with an upper case letter',\n 'seealso': seealso_check_methods},\n 'isspace': {'method': hpat_pandas_stringmethods_isspace,\n 'caption': 'Check if all the characters in the text are whitespaces',\n 'seealso': seealso_check_methods},\n 'isalpha': {'method': hpat_pandas_stringmethods_isalpha,\n 'caption': 'Check whether all characters in each string are alphabetic',\n 'seealso': seealso_check_methods},\n 'islower': {'method': hpat_pandas_stringmethods_islower,\n 'caption': 'Check if all the characters in the text are alphanumeric',\n 'seealso': seealso_check_methods},\n 'isalnum': {'method': hpat_pandas_stringmethods_isalnum,\n 'caption': 'Check if all the characters in the text are alphanumeric',\n 'seealso': seealso_check_methods},\n 'isnumeric': {'method': hpat_pandas_stringmethods_isnumeric,\n 'caption': 'Check whether all characters in each string are numeric.',\n 'seealso': seealso_check_methods},\n 'isdigit': {'method': hpat_pandas_stringmethods_isdigit,\n 'caption': 'Check whether all characters in each string in the Series/Index are digits.',\n 'seealso': seealso_check_methods},\n 'isdecimal': {'method': hpat_pandas_stringmethods_isdecimal,\n 'caption': 'Check whether all characters in each string are decimal.',\n 'seealso': seealso_check_methods},\n 'isupper': {'method': hpat_pandas_stringmethods_isupper,\n 'caption': 'Check whether all characters in each string are uppercase.',\n 'seealso': seealso_check_methods},\n 'capitalize': {'method': hpat_pandas_stringmethods_capitalize,\n 'caption': 'Convert strings in the Series/Index to be capitalized.',\n 'seealso': seealso_transform_methods},\n 'title': {'method': hpat_pandas_stringmethods_title,\n 'caption': 'Convert strings in the Series/Index to titlecase.',\n 'seealso': seealso_transform_methods},\n 'swapcase': {'method': hpat_pandas_stringmethods_swapcase,\n 'caption': 'Convert strings in the Series/Index to be swapcased.',\n 'seealso': seealso_transform_methods},\n 'casefold': {'method': hpat_pandas_stringmethods_casefold,\n 'caption': 'Convert strings in the Series/Index to be casefolded.',\n 'seealso': seealso_transform_methods},\n}\n\n\nfor name, data in stringmethods_funcs.items():\n data['method'].__doc__ = sdc_pandas_series_str_docstring_template.format(**{'method_name': name,\n 'caption': data['caption'],\n 'seealso': data['seealso']})\n\n\n# _hpat_pandas_stringmethods_autogen_methods = sorted(dir(numba.types.misc.UnicodeType.__getattribute__.__qualname__))\n_hpat_pandas_stringmethods_autogen_methods = ['upper', 'lower', 'lstrip', 'rstrip', 'strip']\n\"\"\"\n This is the list of function which are autogenerated to be used from Numba directly.\n\"\"\"\n\n_hpat_pandas_stringmethods_autogen_exceptions = ['split', 'get', 'replace']\n\nfor method_name in _hpat_pandas_stringmethods_autogen_methods:\n if not (method_name.startswith('__') or method_name in _hpat_pandas_stringmethods_autogen_exceptions):\n sdc_overload_method(StringMethodsType, method_name)(_hpat_pandas_stringmethods_autogen(method_name))\n"},"license":{"kind":"string","value":"bsd-2-clause"}}},{"rowIdx":382684,"cells":{"repo_name":{"kind":"string","value":"abimannans/scikit-learn"},"path":{"kind":"string","value":"examples/datasets/plot_iris_dataset.py"},"copies":{"kind":"string","value":"283"},"size":{"kind":"string","value":"1928"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n=========================================================\nThe Iris Dataset\n=========================================================\nThis data sets consists of 3 different types of irises'\n(Setosa, Versicolour, and Virginica) petal and sepal\nlength, stored in a 150x4 numpy.ndarray\n\nThe rows being the samples and the columns being:\nSepal Length, Sepal Width, Petal Length\tand Petal Width.\n\nThe below plot uses the first two features.\nSee `here `_ for more\ninformation on this dataset.\n\"\"\"\nprint(__doc__)\n\n\n# Code source: Gaël Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\n\n# import some data to play with\niris = datasets.load_iris()\nX = iris.data[:, :2] # we only take the first two features.\nY = iris.target\n\nx_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\ny_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n\nplt.figure(2, figsize=(8, 6))\nplt.clf()\n\n# Plot the training points\nplt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)\nplt.xlabel('Sepal length')\nplt.ylabel('Sepal width')\n\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\nplt.xticks(())\nplt.yticks(())\n\n# To getter a better understanding of interaction of the dimensions\n# plot the first three PCA dimensions\nfig = plt.figure(1, figsize=(8, 6))\nax = Axes3D(fig, elev=-150, azim=110)\nX_reduced = PCA(n_components=3).fit_transform(iris.data)\nax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,\n cmap=plt.cm.Paired)\nax.set_title(\"First three PCA directions\")\nax.set_xlabel(\"1st eigenvector\")\nax.w_xaxis.set_ticklabels([])\nax.set_ylabel(\"2nd eigenvector\")\nax.w_yaxis.set_ticklabels([])\nax.set_zlabel(\"3rd eigenvector\")\nax.w_zaxis.set_ticklabels([])\n\nplt.show()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382685,"cells":{"repo_name":{"kind":"string","value":"mdeff/ntds_2017"},"path":{"kind":"string","value":"projects/reports/arab_springs/lib/clustering.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1319"},"content":{"kind":"string","value":"import pickle\nimport pandas as pd\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import spatial, sparse\nimport scipy.sparse.linalg\nimport scipy\nfrom sklearn.cluster import KMeans\nfrom pygsp import graphs, filters, plotting\nimport operator\nimport io\nfrom lib import models, graph, coarsening, utils\nget_ipython().magic('matplotlib inline')\n\ndef take_eigenvectors(laplacian, K=5):\n\teigenvalues, eigenvectors = sparse.linalg.eigsh(laplacian, k=K, which = 'SA')\n\treturn eigenvalues, eigenvectors\n\ndef do_kmeans(eigenvectors, K=5):\n\t#kmeans to find clusters\n\tkmeans = KMeans(n_clusters=K, random_state=0).fit(eigenvectors)\n\treturn kmeans.labels_\n\ndef label_data(df, kmeans_labels, K=5, NUMBER = 40):\n\tcounts = [dict() for x in range(K)]\n\tfor i, label in enumerate(kmeans_labels):\n\t\twords = df.loc[i].Tokens\n\t\tfor w in words:\n\t\t\ttry: \n\t\t\t\tcounts[label][w]+=1\n\t\t\texcept: counts[label][w]=1 \n\ttotal = {}\n\tfor k in range(K):\n\t\tsorted_words = sorted(counts[k], key=operator.itemgetter(1), reverse=True)[:NUMBER]\n\t\tfor w in sorted_words:\n\t\t\ttry:\n\t\t\t\ttotal[w]+=1\n\t\t\texcept: total[w]=1\n\tlabels = [[] for i in range(K)]\n\tfor k in range(K):\n\t\tsorted_words = sorted(counts[k], key=operator.itemgetter(1), reverse=True)[:NUMBER]\n\t\tfor w in sorted_words: \n\t\t\tif total[w]==1: labels[k].append(w)\n\treturn labels"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382686,"cells":{"repo_name":{"kind":"string","value":"bubae/gazeAssistRecognize"},"path":{"kind":"string","value":"train_model.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5633"},"content":{"kind":"string","value":"import init_path\r\nimport rcnnModule\r\nfrom sklearn import svm\r\nimport numpy as np\r\nimport os, sys, cv2\r\nimport csv\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.svm import LinearSVC\r\nfrom utils.timer import Timer\r\nfrom sklearn.externals import joblib\r\n\r\nCLASSES = ('__background__',\r\n\t\t 'aeroplane', 'bicycle', 'bird', 'boat',\r\n\t\t 'bottle', 'bus', 'car', 'cat', 'chair',\r\n\t\t 'cow', 'diningtable', 'dog', 'horse',\r\n\t\t 'motorbike', 'person', 'pottedplant',\r\n\t\t 'sheep', 'sofa', 'train', 'tvmonitor')\r\n\r\nNETS = {'vgg_cnn_m_1024': ('VGG_CNN_M_1024', 'vgg_cnn_m_1024_fast_rcnn_iter_40000.caffemodel'),\r\n\t\t'caffenet': ('CaffeNet', 'caffenet_fast_rcnn_iter_40000.caffemodel'),\r\n\t\t'zf': ('ZF', 'ZF_faster_rcnn_final.caffemodel')}\r\n\r\n\r\ndef init_train():\r\n\tprint \"Init Train...\"\r\n\tsetting = {}\r\n\tsetting['NET'] = 'zf'\r\n\tsetting['ROOT_DIR'] = os.getcwd()\r\n\tsetting['DATA_DIR'] = os.path.join(setting['ROOT_DIR'], 'data')\r\n\tsetting['IMAGE_DIR'] = os.path.join(setting['DATA_DIR'], 'imageNet', 'images')\r\n\tsetting['TEST_DIR'] = os.path.join(setting['DATA_DIR'], 'Test')\r\n\tsetting['DST_DIR'] = os.path.join(setting['DATA_DIR'], 'result')\r\n\tsetting['DST_MODEL_DIR'] = os.path.join(setting['DST_DIR'], 'imageNet', setting['NET'])\r\n\tsetting['featureDstDir'] = os.path.join(setting['DST_MODEL_DIR'], \"FEATURE\")\r\n\r\n\tcategories = sorted([f for f in os.listdir(setting['IMAGE_DIR'])])\r\n\tcategoryDirPath = [os.path.join(setting['IMAGE_DIR'], f) for f in categories]\r\n\r\n\tcid2name = categories\r\n\tcid2path = categoryDirPath\r\n\tiid2path = np.array([])\r\n\tiid2name = np.array([])\r\n\tiid2cid = np.array([])\r\n\r\n\tcNum = len(cid2path)\r\n\tcid = 0\r\n\tfor dirPath in categoryDirPath:\r\n\t\t# dirPath = cid2path[i]\r\n\t\timList = np.array(sorted([f for f in os.listdir(dirPath)]))\r\n\t\timPath = np.array([os.path.join(dirPath, im) for im in imList])\r\n\t\tiid2name = np.append(iid2name, imList)\r\n\t\tiid2path = np.append(iid2path, imPath)\r\n\t\tiid2cid = np.append(iid2cid, np.ones(len(imPath))*cid)\r\n\t\tcid = cid + 1\r\n\r\n\tiid2cid = iid2cid.astype(int)\r\n\tcid2name = np.array(cid2name)\r\n\tcid2path = np.array(cid2path)\r\n\r\n\treturn setting, cid2name, cid2path, iid2path, iid2name, iid2cid\r\n\r\ndef train_SVM(setting, y):\r\n\tprint \"train SVM\"\r\n\t# SVM Training\r\n\r\n\t# SVM options\r\n\t# svm_kernel \t= 'rbf';\r\n\t# svm_C\t\t\t\t\t\t\t= 1.0;\r\n\t# svm_loss\t\t\t\t\t\t= 'squared_hinge'\r\n\t# svm_penalty\t\t\t\t\t= 'l2'\r\n\t# svm_multi_class\t\t\t\t= 'ovr'\r\n\t# svm_random_state\t\t\t\t= 0 \r\n\r\n\r\n\tfilePath = os.path.join(setting['DST_MODEL_DIR'], \"svm_trained.pkl\")\r\n\ttry:\r\n\t\tclf = joblib.load(filePath)\r\n\t\tprint \"using trained model\"\t\t\r\n\texcept:\r\n\t\tprint \"building svm model\"\r\n\t\tX = loadDesc(setting)\r\n\t\tX = X.astype('float')\r\n\t\ttimer = Timer()\t\r\n\r\n\t\ttimer.tic()\r\n\t\tclf = OneVsRestClassifier(LinearSVC(random_state=0)).fit(X, y)\r\n\t\ttimer.toc()\r\n\t\tprint timer.total_time\r\n\r\n\t\tjoblib.dump(clf, filePath)\r\n\r\n\t# TEST\r\n\t# print clf.decision_function(X[0])\r\n\t# print clf.predict(X[5000])\r\n\treturn clf\r\n\r\ndef loadDesc(setting):\r\n\tprint \"Load Desc...\"\r\n\ttimer = Timer()\t\r\n\r\n\tfeatureDstDir = setting['featureDstDir']\r\n\tsortedList = sorted([ f for f in os.listdir(featureDstDir)])\r\n\tdescPath = np.array([ os.path.join(featureDstDir, x) for x in sortedList])\r\n\r\n\tX = []\r\n\tcnt = 0\r\n\tsize = len(descPath)\r\n\ttimer.tic()\r\n\tfor path in descPath:\r\n\t\tfeature = readCSV(path)\r\n\t\tX.append(feature)\r\n\t\tprint \"%d / %d file loaded\" % (cnt, size)\r\n\t\tcnt = cnt + 1\r\n\r\n\ttimer.toc()\r\n\r\n\t# print timer.total_time\r\n\r\n\tX = np.array(X)\r\n\tX = np.reshape(X, X.shape[0:2])\r\n\treturn X\r\n\t\r\ndef readCSV(path):\r\n\trlist = []\r\n\twith open(path, 'rb') as f:\r\n\t\treader = csv.reader(f, delimiter=' ')\r\n\t\tfor row in reader:\r\n\t\t\trlist.append(row)\r\n\r\n\treturn np.array(rlist)\r\n\r\ndef writeCSV(data, path):\r\n\twith open(path, 'wb') as fout:\r\n\t writer = csv.writer(fout, delimiter=',')\r\n\t for d in data:\r\n\t \twriter.writerow([d])\r\n\r\ndef featureExtraction(setting, cid2name, cid2path, iid2path, iid2name, iid2cid, rcnnModel):\r\n\tprint \"Feature Extraction..\"\r\n\tfeatureDstDir = setting['featureDstDir']\r\n\r\n\tif not os.path.exists(featureDstDir):\r\n\t\tos.makedirs(featureDstDir)\r\n\r\n\tnumIm = len(iid2path)\r\n\r\n\tdescExist = np.zeros(numIm)\r\n\tfList = np.array([ int(x[0:-4]) for x in os.listdir(featureDstDir) ])\r\n\r\n\tfor i in fList:\r\n\t\tdescExist[i] = 1\r\n\r\n\tnonDescList = np.where(descExist == 0)[0]\r\n\tnumDesc = len(nonDescList)\r\n\r\n\tif numDesc==0:\r\n\t\tprint \"No image to desc.\"\r\n\r\n\tcnt = 0\r\n\tfor i in nonDescList:\r\n\t\tprint i, cid2name[iid2cid[i]], iid2name[i],\": %0.2f percent finished\" % (cnt*100.0/numDesc)\r\n\t\tim = cv2.imread(iid2path[i])\r\n\t\t[features, bbox] = rcnnModel.getFeatureIm(im)\r\n\r\n\t\tfeature = np.mean(features, axis=0)\r\n\r\n\t\tfileName = \"%06d.csv\" % i\r\n\t\tfilePath = os.path.join(featureDstDir, fileName)\r\n\t\twriteCSV(feature, filePath)\r\n\t\tcnt = cnt+1\r\n\r\ndef TestModel(setting, rcnnModel, clf):\r\n\tprint \"Test trained Model\"\r\n\ttestDir = setting['TEST_DIR']\r\n\tsortedList = sorted([ f for f in os.listdir(testDir)])\r\n\r\n\timPath = np.array([ os.path.join(testDir, x) for x in sortedList])\r\n\tfor path in imPath:\r\n\t\tim = cv2.imread(path)\r\n\t\t[features, bbox] = rcnnModel.getFeatureIm(im)\r\n\r\n\t\tfeature = np.mean(features, axis=0)\r\n\r\n\t\tpredict_result = clf.predict(features)\r\n\r\n\t\tprint clf.predict(feature)\r\n\t\tprint len(np.where(predict_result==0)[0])\r\n\t# print imPath\r\n\r\ndef main():\r\n\r\n\t[setting, cid2name, cid2path, iid2path, iid2name, iid2cid] = init_train();\r\n\r\n\tprint \"rcnnModel loading...\"\r\n\trcnnModel = rcnnModule.RcnnObject('zf', False);\r\n\r\n\tfeatureExtraction(setting, cid2name, cid2path, iid2path, iid2name, iid2cid, rcnnModel)\r\n\r\n\tclf = train_SVM(setting, iid2cid)\r\n\r\n\tTestModel(setting, rcnnModel, clf)\r\n\r\nif __name__ == '__main__':\r\n\tmain()"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382687,"cells":{"repo_name":{"kind":"string","value":"robbymeals/scikit-learn"},"path":{"kind":"string","value":"sklearn/ensemble/partial_dependence.py"},"copies":{"kind":"string","value":"251"},"size":{"kind":"string","value":"15097"},"content":{"kind":"string","value":"\"\"\"Partial dependence plots for tree ensembles. \"\"\"\n\n# Authors: Peter Prettenhofer\n# License: BSD 3 clause\n\nfrom itertools import count\nimport numbers\n\nimport numpy as np\nfrom scipy.stats.mstats import mquantiles\n\nfrom ..utils.extmath import cartesian\nfrom ..externals.joblib import Parallel, delayed\nfrom ..externals import six\nfrom ..externals.six.moves import map, range, zip\nfrom ..utils import check_array\nfrom ..tree._tree import DTYPE\n\nfrom ._gradient_boosting import _partial_dependence_tree\nfrom .gradient_boosting import BaseGradientBoosting\n\n\ndef _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):\n \"\"\"Generate a grid of points based on the ``percentiles of ``X``.\n\n The grid is generated by placing ``grid_resolution`` equally\n spaced points between the ``percentiles`` of each column\n of ``X``.\n\n Parameters\n ----------\n X : ndarray\n The data\n percentiles : tuple of floats\n The percentiles which are used to construct the extreme\n values of the grid axes.\n grid_resolution : int\n The number of equally spaced points that are placed\n on the grid.\n\n Returns\n -------\n grid : ndarray\n All data points on the grid; ``grid.shape[1] == X.shape[1]``\n and ``grid.shape[0] == grid_resolution * X.shape[1]``.\n axes : seq of ndarray\n The axes with which the grid has been created.\n \"\"\"\n if len(percentiles) != 2:\n raise ValueError('percentile must be tuple of len 2')\n if not all(0. <= x <= 1. for x in percentiles):\n raise ValueError('percentile values must be in [0, 1]')\n\n axes = []\n for col in range(X.shape[1]):\n uniques = np.unique(X[:, col])\n if uniques.shape[0] < grid_resolution:\n # feature has low resolution use unique vals\n axis = uniques\n else:\n emp_percentiles = mquantiles(X, prob=percentiles, axis=0)\n # create axis based on percentiles and grid resolution\n axis = np.linspace(emp_percentiles[0, col],\n emp_percentiles[1, col],\n num=grid_resolution, endpoint=True)\n axes.append(axis)\n\n return cartesian(axes), axes\n\n\ndef partial_dependence(gbrt, target_variables, grid=None, X=None,\n percentiles=(0.05, 0.95), grid_resolution=100):\n \"\"\"Partial dependence of ``target_variables``.\n\n Partial dependence plots show the dependence between the joint values\n of the ``target_variables`` and the function represented\n by the ``gbrt``.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n gbrt : BaseGradientBoosting\n A fitted gradient boosting model.\n target_variables : array-like, dtype=int\n The target features for which the partial dependecy should be\n computed (size should be smaller than 3 for visual renderings).\n grid : array-like, shape=(n_points, len(target_variables))\n The grid of ``target_variables`` values for which the\n partial dependecy should be evaluated (either ``grid`` or ``X``\n must be specified).\n X : array-like, shape=(n_samples, n_features)\n The data on which ``gbrt`` was trained. It is used to generate\n a ``grid`` for the ``target_variables``. The ``grid`` comprises\n ``grid_resolution`` equally spaced points between the two\n ``percentiles``.\n percentiles : (low, high), default=(0.05, 0.95)\n The lower and upper percentile used create the extreme values\n for the ``grid``. Only if ``X`` is not None.\n grid_resolution : int, default=100\n The number of equally spaced points on the ``grid``.\n\n Returns\n -------\n pdp : array, shape=(n_classes, n_points)\n The partial dependence function evaluated on the ``grid``.\n For regression and binary classification ``n_classes==1``.\n axes : seq of ndarray or None\n The axes with which the grid has been created or None if\n the grid has been given.\n\n Examples\n --------\n >>> samples = [[0, 0, 2], [1, 0, 0]]\n >>> labels = [0, 1]\n >>> from sklearn.ensemble import GradientBoostingClassifier\n >>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)\n >>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)\n >>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP\n (array([[-4.52..., 4.52...]]), [array([ 0., 1.])])\n \"\"\"\n if not isinstance(gbrt, BaseGradientBoosting):\n raise ValueError('gbrt has to be an instance of BaseGradientBoosting')\n if gbrt.estimators_.shape[0] == 0:\n raise ValueError('Call %s.fit before partial_dependence' %\n gbrt.__class__.__name__)\n if (grid is None and X is None) or (grid is not None and X is not None):\n raise ValueError('Either grid or X must be specified')\n\n target_variables = np.asarray(target_variables, dtype=np.int32,\n order='C').ravel()\n\n if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):\n raise ValueError('target_variables must be in [0, %d]'\n % (gbrt.n_features - 1))\n\n if X is not None:\n X = check_array(X, dtype=DTYPE, order='C')\n grid, axes = _grid_from_X(X[:, target_variables], percentiles,\n grid_resolution)\n else:\n assert grid is not None\n # dont return axes if grid is given\n axes = None\n # grid must be 2d\n if grid.ndim == 1:\n grid = grid[:, np.newaxis]\n if grid.ndim != 2:\n raise ValueError('grid must be 2d but is %dd' % grid.ndim)\n\n grid = np.asarray(grid, dtype=DTYPE, order='C')\n assert grid.shape[1] == target_variables.shape[0]\n\n n_trees_per_stage = gbrt.estimators_.shape[1]\n n_estimators = gbrt.estimators_.shape[0]\n pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,\n order='C')\n for stage in range(n_estimators):\n for k in range(n_trees_per_stage):\n tree = gbrt.estimators_[stage, k].tree_\n _partial_dependence_tree(tree, grid, target_variables,\n gbrt.learning_rate, pdp[k])\n\n return pdp, axes\n\n\ndef plot_partial_dependence(gbrt, X, features, feature_names=None,\n label=None, n_cols=3, grid_resolution=100,\n percentiles=(0.05, 0.95), n_jobs=1,\n verbose=0, ax=None, line_kw=None,\n contour_kw=None, **fig_kw):\n \"\"\"Partial dependence plots for ``features``.\n\n The ``len(features)`` plots are arranged in a grid with ``n_cols``\n columns. Two-way partial dependence plots are plotted as contour\n plots.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n gbrt : BaseGradientBoosting\n A fitted gradient boosting model.\n X : array-like, shape=(n_samples, n_features)\n The data on which ``gbrt`` was trained.\n features : seq of tuples or ints\n If seq[i] is an int or a tuple with one int value, a one-way\n PDP is created; if seq[i] is a tuple of two ints, a two-way\n PDP is created.\n feature_names : seq of str\n Name of each feature; feature_names[i] holds\n the name of the feature with index i.\n label : object\n The class label for which the PDPs should be computed.\n Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.\n n_cols : int\n The number of columns in the grid plot (default: 3).\n percentiles : (low, high), default=(0.05, 0.95)\n The lower and upper percentile used to create the extreme values\n for the PDP axes.\n grid_resolution : int, default=100\n The number of equally spaced points on the axes.\n n_jobs : int\n The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.\n Defaults to 1.\n verbose : int\n Verbose output during PD computations. Defaults to 0.\n ax : Matplotlib axis object, default None\n An axis object onto which the plots will be drawn.\n line_kw : dict\n Dict with keywords passed to the ``pylab.plot`` call.\n For one-way partial dependence plots.\n contour_kw : dict\n Dict with keywords passed to the ``pylab.plot`` call.\n For two-way partial dependence plots.\n fig_kw : dict\n Dict with keywords passed to the figure() call.\n Note that all keywords not recognized above will be automatically\n included here.\n\n Returns\n -------\n fig : figure\n The Matplotlib Figure object.\n axs : seq of Axis objects\n A seq of Axis objects, one for each subplot.\n\n Examples\n --------\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.ensemble import GradientBoostingRegressor\n >>> X, y = make_friedman1()\n >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)\n >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP\n ...\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib import transforms\n from matplotlib.ticker import MaxNLocator\n from matplotlib.ticker import ScalarFormatter\n\n if not isinstance(gbrt, BaseGradientBoosting):\n raise ValueError('gbrt has to be an instance of BaseGradientBoosting')\n if gbrt.estimators_.shape[0] == 0:\n raise ValueError('Call %s.fit before partial_dependence' %\n gbrt.__class__.__name__)\n\n # set label_idx for multi-class GBRT\n if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:\n if label is None:\n raise ValueError('label is not given for multi-class PDP')\n label_idx = np.searchsorted(gbrt.classes_, label)\n if gbrt.classes_[label_idx] != label:\n raise ValueError('label %s not in ``gbrt.classes_``' % str(label))\n else:\n # regression and binary classification\n label_idx = 0\n\n X = check_array(X, dtype=DTYPE, order='C')\n if gbrt.n_features != X.shape[1]:\n raise ValueError('X.shape[1] does not match gbrt.n_features')\n\n if line_kw is None:\n line_kw = {'color': 'green'}\n if contour_kw is None:\n contour_kw = {}\n\n # convert feature_names to list\n if feature_names is None:\n # if not feature_names use fx indices as name\n feature_names = [str(i) for i in range(gbrt.n_features)]\n elif isinstance(feature_names, np.ndarray):\n feature_names = feature_names.tolist()\n\n def convert_feature(fx):\n if isinstance(fx, six.string_types):\n try:\n fx = feature_names.index(fx)\n except ValueError:\n raise ValueError('Feature %s not in feature_names' % fx)\n return fx\n\n # convert features into a seq of int tuples\n tmp_features = []\n for fxs in features:\n if isinstance(fxs, (numbers.Integral,) + six.string_types):\n fxs = (fxs,)\n try:\n fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)\n except TypeError:\n raise ValueError('features must be either int, str, or tuple '\n 'of int/str')\n if not (1 <= np.size(fxs) <= 2):\n raise ValueError('target features must be either one or two')\n\n tmp_features.append(fxs)\n\n features = tmp_features\n\n names = []\n try:\n for fxs in features:\n l = []\n # explicit loop so \"i\" is bound for exception below\n for i in fxs:\n l.append(feature_names[i])\n names.append(l)\n except IndexError:\n raise ValueError('features[i] must be in [0, n_features) '\n 'but was %d' % i)\n\n # compute PD functions\n pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(partial_dependence)(gbrt, fxs, X=X,\n grid_resolution=grid_resolution,\n percentiles=percentiles)\n for fxs in features)\n\n # get global min and max values of PD grouped by plot type\n pdp_lim = {}\n for pdp, axes in pd_result:\n min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()\n n_fx = len(axes)\n old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))\n min_pd = min(min_pd, old_min_pd)\n max_pd = max(max_pd, old_max_pd)\n pdp_lim[n_fx] = (min_pd, max_pd)\n\n # create contour levels for two-way plots\n if 2 in pdp_lim:\n Z_level = np.linspace(*pdp_lim[2], num=8)\n\n if ax is None:\n fig = plt.figure(**fig_kw)\n else:\n fig = ax.get_figure()\n fig.clear()\n\n n_cols = min(n_cols, len(features))\n n_rows = int(np.ceil(len(features) / float(n_cols)))\n axs = []\n for i, fx, name, (pdp, axes) in zip(count(), features, names,\n pd_result):\n ax = fig.add_subplot(n_rows, n_cols, i + 1)\n\n if len(axes) == 1:\n ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)\n else:\n # make contour plot\n assert len(axes) == 2\n XX, YY = np.meshgrid(axes[0], axes[1])\n Z = pdp[label_idx].reshape(list(map(np.size, axes))).T\n CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,\n colors='k')\n ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],\n vmin=Z_level[0], alpha=0.75, **contour_kw)\n ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)\n\n # plot data deciles + axes labels\n deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))\n trans = transforms.blended_transform_factory(ax.transData,\n ax.transAxes)\n ylim = ax.get_ylim()\n ax.vlines(deciles, [0], 0.05, transform=trans, color='k')\n ax.set_xlabel(name[0])\n ax.set_ylim(ylim)\n\n # prevent x-axis ticks from overlapping\n ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))\n tick_formatter = ScalarFormatter()\n tick_formatter.set_powerlimits((-3, 4))\n ax.xaxis.set_major_formatter(tick_formatter)\n\n if len(axes) > 1:\n # two-way PDP - y-axis deciles + labels\n deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))\n trans = transforms.blended_transform_factory(ax.transAxes,\n ax.transData)\n xlim = ax.get_xlim()\n ax.hlines(deciles, [0], 0.05, transform=trans, color='k')\n ax.set_ylabel(name[1])\n # hline erases xlim\n ax.set_xlim(xlim)\n else:\n ax.set_ylabel('Partial dependence')\n\n if len(axes) == 1:\n ax.set_ylim(pdp_lim[1])\n axs.append(ax)\n\n fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,\n hspace=0.3)\n return fig, axs\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382688,"cells":{"repo_name":{"kind":"string","value":"BeatsonLab-MicrobialGenomics/DiscoPlot"},"path":{"kind":"string","value":"discoplot/DiscoPlot.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"23712"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# DiscoPlot: identify genomic rearrangements, misassemblies and sequencing\n# artefacts in NGS data\n# Copyright (C) 2013-2015 Mitchell Sullivan\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# Mitchell Sullivan\n# mjsull@gmail.com\n# School of Chemistry & Molecular Biosciences\n# The University of Queensland\n# Brisbane, QLD 4072.\n# Australia\n\n__title__ = 'DiscoPlot'\n__version__ = '1.0.2'\n__description__ = (\"DiscoPlot: identify genomic rearrangements, misassemblies \"\n \"and sequencing artefacts in NGS data\")\n__author__ = 'Mitchell Sullivan'\n__license__ = 'GPLv3'\n__author_email__ = \"mjsull@gmail.com\"\n__url__ = 'https://github.com/BeatsonLab-MicrobialGenomics/DiscoPlot'\n\nimport argparse\nimport numpy\nimport sys\nimport subprocess\n\n\ndef read_sbam(args):\n import pysam\n if not args.bam_file is None:\n sam = pysam.Samfile(args.bam_file, 'rb')\n elif not args.sam_file:\n sam = pysam.Samfile(args.sam_file)\n global refpos\n global cuta\n global cutb\n cuta = 0\n cutb = float('inf')\n refpos = {}\n if not args.subsection is None:\n if len(args.subsection) == 1:\n refpos[args.subsection[0]] = 0\n totallength = None\n for i in range(0, len(sam.references)):\n if sam.references[i] == args.subsection[0]:\n totallength = sam.lengths[i]\n if totallength is None:\n sys.stderr.write('Selected reference not found.')\n sys.exit()\n elif len(args.subsection) == 2:\n refpos[sam.references[0]] = 0\n cuta = int(args.subsection[0])\n cutb = int(args.subsection[1])\n totallength = cutb - cuta\n elif len(args.subsection) == 3:\n refpos[args.subsection[0]] = 0\n cuta = int(args.subsection[1])\n cutb = int(args.subsection[2])\n totallength = cutb - cuta\n else:\n sys.stderr.write('Too many arguments given for subsection')\n sys.exit()\n if args.bin_size is None:\n args.bin_size = totallength / args.size + 1\n else:\n args.size = totallength / args.bin_size + 1\n else:\n references = sam.references\n reflengths = sam.lengths\n currpos = 0\n if args.bin_size is None:\n args.bin_size = sum(reflengths) / (args.size - (len(reflengths) -1) * (args.gap + 1)) + 1\n else:\n args.size = sum(map(lambda x: x/args.bin_size, reflengths)) + (len(reflengths) -1) * args.gap + 1\n for i in range(len(references)):\n refpos[references[i]] = currpos\n currpos += reflengths[i] / args.bin_size + args.gap\n global invgrid, dirgrid, unmapped_for, unmapped_rev\n unmapped_rev = {}\n unmapped_for = {}\n invgrid = {}\n dirgrid = {}\n for read in sam.fetch():\n ref = sam.getrname(read.tid)\n if ref in refpos:\n if read.is_read1:\n if cuta <= read.pos <= cutb:\n pos1 = (read.pos - cuta) / args.bin_size + refpos[ref]\n if read.mate_is_unmapped:\n if read.is_reverse:\n if pos1 in unmapped_rev:\n unmapped_rev[pos1] += 1\n else:\n unmapped_rev[pos1] = 1\n else:\n if pos1 in unmapped_for:\n unmapped_for[pos1] += 1\n else:\n unmapped_for[pos1] = 1\n else:\n mref = sam.getrname(read.rnext)\n if mref in refpos:\n if cuta <= read.pnext <= cutb:\n pos2 = (read.pnext - cuta) / args.bin_size + refpos[mref]\n if read.is_reverse:\n if read.mate_is_reverse:\n if pos1 < pos2:\n if pos2 in dirgrid and pos1 in dirgrid[pos2]:\n dirgrid[pos2][pos1] += 1\n elif pos2 in dirgrid:\n dirgrid[pos2][pos1] = 1\n else:\n dirgrid[pos2] = {pos1:1}\n else:\n if pos1 in dirgrid and pos2 in dirgrid[pos1]:\n dirgrid[pos1][pos2] += 1\n elif pos1 in dirgrid:\n dirgrid[pos1][pos2] = 1\n else:\n dirgrid[pos1] = {pos2:1}\n else:\n if pos2 in invgrid and pos1 in invgrid[pos2]:\n invgrid[pos2][pos1] += 1\n elif pos2 in invgrid:\n invgrid[pos2][pos1] = 1\n else:\n invgrid[pos2] = {pos1:1}\n else:\n if read.mate_is_reverse:\n if pos1 in invgrid and pos2 in invgrid[pos1]:\n invgrid[pos1][pos2] += 1\n elif pos1 in invgrid:\n invgrid[pos1][pos2] = 1\n else:\n invgrid[pos1] = {pos2:1}\n else:\n if pos1 < pos2:\n if pos1 in dirgrid and pos2 in dirgrid[pos1]:\n dirgrid[pos1][pos2] += 1\n elif pos1 in dirgrid:\n dirgrid[pos1][pos2] = 1\n else:\n dirgrid[pos1] = {pos2:1}\n else:\n if pos2 in dirgrid and pos1 in dirgrid[pos2]:\n dirgrid[pos2][pos1] += 1\n elif pos2 in dirgrid:\n dirgrid[pos2][pos1] = 1\n else:\n dirgrid[pos2] = {pos1:1}\n else:\n if read.mate_is_unmapped:\n ref = sam.getrname(read.tid)\n if ref in refpos:\n if cuta <= read.pos <= cutb:\n pos = (read.pos - cuta) / args.bin_size + refpos[ref]\n if read.is_reverse:\n if pos in unmapped_rev:\n unmapped_rev[pos] += 1\n else:\n unmapped_rev[pos] = 1\n else:\n if pos in unmapped_for:\n unmapped_for[pos] += 1\n else:\n unmapped_for[pos] = 1\n\n\ndef read_sing(args):\n readlen = None\n if not args.read_file is None:\n reads = open(args.read_file)\n first = True\n getfq = 0\n readlen = {}\n for line in reads:\n if first:\n first = False\n if line.startswith('@'):\n getfq = 2\n name = line.rstrip()[1:]\n seq = ''\n elif line.startswith('>'):\n readlen[name] = len(seq)\n name = line.rstrip()[1:]\n seq = ''\n elif getfq == 0:\n seq += line.rstrip()\n elif getfq == 1:\n readlen[name] = len(seq)\n name = line.rstrip()\n seq = ''\n elif getfq == 2:\n seq += line.rstrip()\n getfq = 3\n elif getfq == 3:\n getfq = 4\n elif getfq == 4:\n getfq = 1\n readlen[name] = len(seq)\n if not args.reference_file is None:\n ref = open(args.reference_file)\n first = True\n references = []\n reflengths = []\n for line in ref:\n if line.startswith('>'):\n if first:\n first = False\n else:\n references.append(name)\n reflengths.append(len(seq))\n name = line.rstrip()[1:]\n seq = ''\n else:\n seq += line\n references.append(name)\n reflengths.append(len(seq))\n else:\n blast = open(args.blast_file)\n refdict = {}\n for line in blast:\n if line.split()[1] in refdict:\n if max([int(line.split()[8]), int(line.split()[9])]) > refdict[line.split()[1]]:\n refdict[line.split()[1]] = max([int(line.split()[8]), int(line.split()[9])])\n else:\n refdict[line.split()[1]] = max([int(line.split()[8]), int(line.split()[9])])\n blast.close()\n references = []\n reflengths = []\n for i in refdict:\n references.append(i)\n reflengths.append(refdict[i])\n cuta = 0\n cutb = float('inf')\n refpos = {}\n if not args.subsection is None:\n if len(args.subsection) == 1:\n refpos[args.subsection[0]] = 0\n totallength = None\n for i in range(0, len(references)):\n if references[i] == args.subsection[0]:\n totallength = reflengths[i]\n if totallength is None:\n sys.stderr.write('Selected reference not found.')\n sys.exit()\n elif len(args.subsection) == 2:\n refpos[references[0]] = 0\n cuta = int(args.subsection[0])\n cutb = int(args.subsection[1])\n totallength = cutb - cuta\n elif len(args.subsection) == 3:\n refpos[args.subsection[0]] = 0\n cuta = int(args.subsection[0])\n cutb = int(args.subsection[1])\n totallength = cutb - cuta\n else:\n sys.stderr.write('Too many arguments given for subsection')\n sys.exit()\n if args.bin_size is None:\n args.bin_size = totallength / args.size\n else:\n args.size = totallength / args.bin_size\n else:\n currpos = 0\n if args.bin_size is None:\n args.bin_size = sum(reflengths) / (args.size - (len(reflengths) -1) * (args.gap + 1))\n else:\n args.size = sum(map(lambda x: x/args.bin_size, reflengths)) + (len(reflengths) -1) * args.gap\n for i in range(len(references)):\n refpos[references[i]] = currpos\n currpos += reflengths[i] / args.bin_size + args.gap\n global invgrid, dirgrid, unmapped_for, unmapped_rev\n unmapped_rev = {}\n unmapped_for = {}\n invgrid = {}\n dirgrid = {}\n blast = open(args.blast_file)\n lastquery = ''\n hits = []\n for line in blast:\n query, subject, ident, length, mm, indel, qstart, qstop, rstart, rstop, eval, bitscore = line.split()\n qstart, qstop, rstart, rstop, length, mm, indel = map(int, [qstart, qstop, rstart, rstop, length, mm, indel])\n if query != lastquery and lastquery != '':\n hits.sort(reverse=True)\n newhits = [hits[0]]\n qtaken = set()\n for i in range(hits[2], hits[3] + 1):\n qtaken.add(i)\n for i in hits[1:]:\n if i[:-3] == newhits[-1][:-3]:\n newhits.append(i)\n else:\n getit = False\n for j in range(hits[2], hits[3] + 1):\n if not j in qtaken:\n getit = True\n qtaken.add(j)\n if getit:\n newhits.append(i)\n anchor = None\n revseq = None\n for i in newhits:\n bitscore, length, qstart, qstop, rstart, rstop, subject = i\n if anchor is None:\n if rstart < rstop:\n anchor = rstart\n revseq = False\n else:\n anchor = rstop\n revseq = True\n if min(qtaken) >= args.unmapped:\n if revseq:\n if anchor in unmapped_for:\n unmapped_for[anchor] += 1\n else:\n unmapped_for[anchor] = 1\n else:\n if anchor in unmapped_rev:\n unmapped_rev[anchor] += 1\n else:\n unmapped_rev[anchor] = 1\n if max(qtaken) <= readlen[lastquery] - args.unmapped:\n if revseq:\n if anchor in unmapped_rev:\n unmapped_rev[anchor] += 1\n else:\n unmapped_rev[anchor] = 1\n else:\n if anchor in unmapped_for:\n unmapped_for[anchor] += 1\n else:\n unmapped_for[anchor] = 1\n lastxpos = None\n lastypos = None\n oldstart, oldstop = qstart, qstop\n if revseq:\n rstart, rstop = rstop, rstart\n qstart = readlen[lastquery] - qstop\n qstop = readlen[lastquery] - oldstart\n for j in range(qstart, qstop):\n xpos = refpos[subject] + (anchor + j - cuta) / args.bin_size\n ypos = refpos[subject] + (rstart + int(((j - qstart) * 1.0 / (qstop - qstart)) * (rstop - rstart))) / args.bin_size\n if xpos != lastxpos or ypos != lastypos:\n if rstart < rstop:\n if xpos in dirgrid:\n if ypos in dirgrid[xpos]:\n dirgrid[xpos][ypos] += 1\n else:\n dirgrid[xpos][ypos] = 1\n else:\n dirgrid[xpos] = {ypos:1}\n else:\n if xpos in invgrid:\n if ypos in invgrid[xpos]:\n invgrid[xpos][ypos] += 1\n else:\n invgrid[xpos][ypos] = 1\n else:\n invgrid[xpos] = {ypos:1}\n lastxpos, lastypos = xpos, ypos\n\n if ident >= args.min_ident and length >= args.min_length and subject in refpos and ((cuta <= rstart <= cutb) or (cuta <= rstop <= cutb)):\n hits.append((float(bitscore), length, qstart, qstop, rstart, rstop, subject))\n lastquery = query\n\n\ndef generate_blast(args):\n subprocess.Popen('makeblastdb -dbtype nucl -out ' + args.gen_blast + '.db -in ' +\n args.reference_file, shell=True, stdout=subprocess.PIPE).wait()\n subprocess.Popen('blastn -db ' + args.gen_blast + '.db -outfmt 6 -query ' +\n args.read_file + ' -out ' + args.gen_blast + '.out', shell=True).wait()\n args.blast_file = args.gen_blast + '.out'\n\n\ndef draw_dotplot(args):\n global refpos\n global cuta\n global cutb\n vals1, vals2 = [], []\n for i in invgrid:\n for j in invgrid[i]:\n vals1.append(invgrid[i][j])\n vals2.append(invgrid[i][j])\n for i in dirgrid:\n for j in dirgrid[i]:\n vals1.append(dirgrid[i][j])\n vals2.append(dirgrid[i][j])\n vals2 = numpy.array(vals2)\n for i in unmapped_rev:\n vals1.append(unmapped_rev[i])\n for i in unmapped_for:\n vals1.append(unmapped_for[i])\n vals1 = numpy.array(vals1)\n med = numpy.median(vals2)\n numvals = numpy.size(vals1)\n sizemod = 2000.0 / args.size / med\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111, aspect='equal')\n x = numpy.zeros(numvals, dtype='u4')\n y = numpy.zeros(numvals, dtype='u4')\n sizes = numpy.zeros(numvals, dtype='f4')\n colours = numpy.array(['x' for i in range(numvals)])\n count = 0\n for i in dirgrid:\n for j in dirgrid[i]:\n if args.max_hits >= dirgrid[i][j] >= args.min_hits:\n x[count] = i * args.bin_size + cuta\n y[count] = j * args.bin_size + cuta\n sizes[count] = dirgrid[i][j] * sizemod\n colours[count] = 'r'\n count += 1\n for i in invgrid:\n for j in invgrid[i]:\n if args.max_hits >= invgrid[i][j] >= args.min_hits:\n x[count] = i * args.bin_size + cuta\n y[count] = j * args.bin_size + cuta\n sizes[count] = invgrid[i][j] * sizemod\n colours[count] = 'b'\n count += 1\n for i in unmapped_for:\n if args.max_hits >= unmapped_for[i] >= args.min_hits:\n x[count] = cuta\n y[count] = i * args.bin_size + cuta\n sizes[count] = unmapped_for[i] * sizemod\n colours[count] = 'g'\n count += 1\n for i in unmapped_rev:\n if args.max_hits >= unmapped_rev[i] >= args.min_hits:\n x[count] = i * args.bin_size + cuta\n y[count] = cuta\n sizes[count] = unmapped_rev[i] * sizemod\n colours[count] = 'g'\n count += 1\n count1, count2, count3 = 0, 0, 0\n for i in colours:\n if i == 'b':\n count1 += 1\n elif i == 'r':\n count2 += 1\n elif i == 'g':\n count3 += 1\n ax.scatter(x, y, s=sizes, c=colours, edgecolor='none', alpha=0.3)\n sizes = []\n names = []\n for i in [10, 25, 50, 75, 90]:\n sizes.append(numpy.percentile(vals2, i))\n names.append(str(i) + '% Normal ' + str(sizes[-1]))\n names.append('50% Inverted ' + str(sizes[2]))\n a = plt.scatter(-100, -100, s=sizes[2] * sizemod, c='b', edgecolor='none', alpha=0.3)\n b = plt.scatter(-100, -100, s=sizes[0] * sizemod, c='r', edgecolor='none', alpha=0.3)\n c = plt.scatter(-100, -100, s=sizes[1] * sizemod, c='r', edgecolor='none', alpha=0.3)\n d = plt.scatter(-100, -100, s=sizes[2] * sizemod, c='r', edgecolor='none', alpha=0.3)\n e = plt.scatter(-100, -100, s=sizes[3] * sizemod, c='r', edgecolor='none', alpha=0.3)\n f = plt.scatter(-100, -100, s=sizes[4] * sizemod, c='r', edgecolor='none', alpha=0.3)\n leg = ax.legend([b, c, d, e, f, a], names, loc=4)\n leg.draggable(state=True)\n for i in refpos:\n if not refpos[i] == 0:\n ax.axhspan(refpos[i] * args.bin_size, refpos[i] * args.bin_size - args.gap * args.bin_size, facecolor='g', alpha=0.3)\n ax.axvspan(refpos[i] * args.bin_size, refpos[i] * args.bin_size - args.gap * args.bin_size, facecolor='g', alpha=0.3)\n if cutb == float('inf'):\n cutb = args.size * args.bin_size + cuta\n plt.xlim([cuta - args.bin_size * 10, cutb])\n plt.ylim([cuta - args.bin_size * 10, cutb])\n plt.grid(True)\n if not args.output_file is None:\n plt.savefig(args.output_file, dpi=args.image_quality)\n else:\n plt.show()\n\n\n\nparser = argparse.ArgumentParser(prog='DiscoPlot', formatter_class=argparse.RawDescriptionHelpFormatter, description='''\nDiscoPlot - read mapping visualisation in the large\n\nUSAGE: DiscoPlot -bam bamfile.bam -o output_file.bmp -size 5000\n Create a bmp file from a bamfile of paired-end reads with a width and height of 5000px\n DiscoPlot -r reads.fa -B blast_prefix -r reference -o output_file.png -bin bin_size\n Create a png file from reads.fa, generate blast file. Image size will be reference length / bin_size\n''', epilog=\"Thanks for using DiscoPlot\")\nparser.add_argument('-r', '--read_file', action='store', default=None, help='read file')\nparser.add_argument('-ref', '--reference_file', action='store', default=None, help='reference file')\nparser.add_argument('-bam', '--bam_file', action='store', default=None, help='bam file')\nparser.add_argument('-sam', '--sam_file', action='store', default=None, help='sam file')\nparser.add_argument('-B', '--gen_blast', action='store', default=None, help='Generate blast files, use argument as prefix for output.')\nparser.add_argument('-b', '--blast_file', action='store', default=None, help='Blast file (output format 6)')\nparser.add_argument('-o', '--output_file', action='store', default=None, help='output file [gif/bmp/png]')\nparser.add_argument('-s', '--size', action='store', type=int, default=None, help='Number of bins')\nparser.add_argument('-bin', '--bin_size', action='store', type=int, default=None, help='Bin size (in bp)')\nparser.add_argument('-g', '--gap', action='store', type=int, default=5, help='Gap size')\nparser.add_argument('-sub', '--subsection', nargs='+', action='store', default=None, help='Only display subection of genome [ref]/[min_cutoff max_cutoff]/[ref min_cutoff max_cutoff]')\nparser.add_argument('-c', '--min_hits', action='store', type=int, default=1, help='Min hits to be shown')\nparser.add_argument('-m', '--max_hits', action='store', type=float, default=float('inf'), help='Bins with more hits than this will be skipped.')\nparser.add_argument('-dpi', '--image_quality', action='store', type=int, default=1600, help='Image quality (in DPI)')\n\n\nargs = parser.parse_args()\nif args.size is None and args.bin_size is None:\n sys.stderr.write('Please give a image size or bin size.')\n sys.exit()\n\nif not args.gen_blast is None:\n if args.reference_file is None:\n sys.stderr.write('Please provide a reference file')\n sys.exit()\n if args.read_file is None:\n sys.stderr.write('Please provide a read file (FASTA)')\n sys.exit()\n generate_blast(args)\n\nif not args.output_file is None:\n import matplotlib\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\nif not args.size is None and not args.bin_size is None:\n sys.stderr.write('Only provide bin size or image size, not both.')\n sys.exit()\nif not args.sam_file is None or not args.bam_file is None:\n read_sbam(args)\nelif args.blast_file is None:\n sys.stderr.write('Please either generate or provide a BLAST comparison')\n sys.exit()\nelse:\n read_sing(args)\ndraw_dotplot(args)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382689,"cells":{"repo_name":{"kind":"string","value":"SSDS-Croatia/SSDS-2017"},"path":{"kind":"string","value":"Day-5/util.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5277"},"content":{"kind":"string","value":"import os, sys, gzip, math, urllib\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nclass Dataset:\n\n def __init__(self, data, labels=None):\n self.data = data \n if type(labels) == None:\n self.supervised = False \n else:\n self.supervised = True\n self.labels = labels \n\n self.n = len(data)\n\n self.batches_complete = 0\n self.position_in_epoch = 0 \n\n def next_batch(self, batch_size, return_labels=False):\n new_epoch = False\n if self.position_in_epoch + batch_size >= self.n:\n self.position_in_epoch = 0\n self.batches_complete += 1\n new_epoch = True\n\n batch = self.data[self.position_in_epoch:self.position_in_epoch + batch_size]\n\n if self.supervised and return_labels:\n batch_labels = self.labels[self.position_in_epoch, self.position_in_epoch + batch_size] \n batch = (batch, batch_labels)\n self.position_in_epoch += batch_size\n\n return new_epoch, batch \n\ndef plot(samples):\n fig = plt.figure(figsize=(4, 4))\n gs = gridspec.GridSpec(4, 4)\n gs.update(wspace=0.05, hspace=0.05)\n\n for i, sample in enumerate(samples):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape(28, 28), cmap='Greys_r')\n\n return fig\n\ndef plot_single(sample, epoch=0):\n plt.axis('off')\n ax = plt.gca()\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape(28, 28), cmap='Greys_r')\n\n\ndef download_mnist(data_folder, dataset):\n \"\"\"\n Download and extract database\n :param database_name: Database name\n \"\"\"\n\n image_files = ['train-images-idx3-ubyte.gz', 't10k-images-idx3-ubyte.gz']\n label_files = ['train-labels-idx1-ubyte.gz', 't10k-labels-idx1-ubyte.gz']\n url = 'http://yann.lecun.com/exdb/mnist/'\n dataset_folder = os.path.join(data_folder, dataset)\n\n\n if not os.path.exists(dataset_folder):\n os.makedirs(dataset_folder)\n for filename in image_files + label_files:\n filepath = os.path.join(dataset_folder, filename)\n filepath, _ = urllib.request.urlretrieve(url + filename, filepath)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n else:\n print('Found {} Data'.format(dataset))\n\n return dataset_folder\n\ndef extract_data(filename, num_data, head_size, data_size):\n with gzip.open(filename) as bytestream:\n bytestream.read(head_size)\n buf = bytestream.read(data_size * num_data)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float)\n return data\n\ndef load_mnist(dataset_folder):\n data = extract_data(dataset_folder + '/train-images-idx3-ubyte.gz', 60000, 16, 28 * 28)\n trX = data.reshape((60000, 28, 28, 1))\n\n data = extract_data(dataset_folder + '/train-labels-idx1-ubyte.gz', 60000, 8, 1)\n trY = data.reshape((60000))\n\n data = extract_data(dataset_folder + '/t10k-images-idx3-ubyte.gz', 10000, 16, 28 * 28)\n teX = data.reshape((10000, 28, 28, 1))\n\n data = extract_data(dataset_folder + '/t10k-labels-idx1-ubyte.gz', 10000, 8, 1)\n teY = data.reshape((10000))\n\n trY = np.asarray(trY)\n teY = np.asarray(teY)\n\n X = np.concatenate((trX, teX), axis=0)\n y = np.concatenate((trY, teY), axis=0).astype(np.int)\n\n seed = 547\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n\n y_vec = np.zeros((len(y), 10), dtype=np.float)\n for i, label in enumerate(y):\n y_vec[i, y[i]] = 1.0\n\n return X / 255., y_vec\n\ndef images_square_grid(images, mode):\n \"\"\"\n Save images as a square grid\n :param images: Images to be used for the grid\n :param mode: The mode to use for images\n :return: Image of images in a square grid\n \"\"\"\n # Get maximum size for square grid of images\n save_size = math.floor(np.sqrt(images.shape[0]))\n\n # Scale to 0-255\n images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)\n\n # Put images in a square arrangement\n images_in_square = np.reshape(\n images[:save_size*save_size],\n (save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))\n if mode == 'L':\n images_in_square = np.squeeze(images_in_square, 4)\n\n # Combine images to grid image\n new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))\n for col_i, col_images in enumerate(images_in_square):\n for image_i, image in enumerate(col_images):\n im = Image.fromarray(image, mode)\n new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))\n\n return new_im\n\ndef get_sample_images(data, dataset='mnist', n=25):\n \"\"\"\n Get a sample of n images from a dataset, able to be displayed with matplotlib\n :param data_dir: Root directory of the dataset\n :param dataset: \n \"\"\"\n # Display options\n if dataset == 'mnist':\n mode = 'L'\n else:\n mode = 'RGB'\n\n return data[:n], mode\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382690,"cells":{"repo_name":{"kind":"string","value":"miku/siskin"},"path":{"kind":"string","value":"docs/btag-2017/scripts/pie.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1614"},"content":{"kind":"string","value":"# coding: utf-8\n\n\"\"\"\nSources and sizes.\n\"\"\"\nimport base64\nimport json\n\nimport requests\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\n\naddr = base64.b64decode(\"\"\"aHR0cDovLzE3Mi4xOC4xMTMuNzo4MDg1L3NvbHIvYmlibGlv\"\"\")\n\n\ndef total():\n \"\"\" Return the total number of docs. \"\"\"\n r = requests.get('%s/select?wt=json&q=*:*' % (addr, label))\n if r.status_code >= 300:\n raise RuntimeError(\"got HTTP %s on %s\" % (r.status_code, r.url))\n doc = json.loads(r.text)\n return doc['response']['numFound']\n\nsources = (\n ('28', 'DOAJ'),\n ('48', 'WISO'),\n ('49', 'Crossref'),\n ('50', 'De Gruyter'),\n ('55', 'JSTOR'),\n ('60', 'Thieme'),\n ('85', 'Elsevier'),\n ('89', 'IEEE'),\n ('105', 'Springer'),\n ('121', 'Arxiv'),\n)\n\nlabels, names, sizes = [s[0] for s in sources], [s[1] for s in sources], []\n\nfor label in labels:\n r = requests.get('%s/select?wt=json&q=source_id:%s' % (addr, label))\n if r.status_code >= 300:\n raise RuntimeError(\"got HTTP %s on %s\" % (r.status_code, r.url))\n doc = json.loads(r.text)\n found = doc['response']['numFound']\n sizes.append(found)\n\nexplode = [0 for _ in range(len(labels))]\nexplode[2] = 0.1\n\nfig1, ax1 = plt.subplots()\n\ncmap = plt.get_cmap('Set1')\ncolors = [cmap(i) for i in np.linspace(0, 1, len(labels))]\n\npatches, texts = plt.pie(sizes, startangle=90, colors=colors, shadow=False, explode=explode)\nplt.legend(patches, names, loc=\"lower left\")\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\nplt.title('Article Metadata Index Sources (2017)')\nplt.savefig('pie.png')\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382691,"cells":{"repo_name":{"kind":"string","value":"pratapvardhan/pandas"},"path":{"kind":"string","value":"pandas/tests/indexes/timedeltas/test_timedelta_range.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"3021"},"content":{"kind":"string","value":"import pytest\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom pandas.tseries.offsets import Day, Second\nfrom pandas import to_timedelta, timedelta_range\n\n\nclass TestTimedeltas(object):\n\n def test_timedelta_range(self):\n\n expected = to_timedelta(np.arange(5), unit='D')\n result = timedelta_range('0 days', periods=5, freq='D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(11), unit='D')\n result = timedelta_range('0 days', '10 days', freq='D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()\n result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',\n freq='D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)\n result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(50), unit='T') * 30\n result = timedelta_range('0 days', freq='30T', periods=50)\n tm.assert_index_equal(result, expected)\n\n # GH 11776\n arr = np.arange(10).reshape(2, 5)\n df = pd.DataFrame(np.arange(10).reshape(2, 5))\n for arg in (arr, df):\n with tm.assert_raises_regex(TypeError, \"1-d array\"):\n to_timedelta(arg)\n for errors in ['ignore', 'raise', 'coerce']:\n with tm.assert_raises_regex(TypeError, \"1-d array\"):\n to_timedelta(arg, errors=errors)\n\n # issue10583\n df = pd.DataFrame(np.random.normal(size=(10, 4)))\n df.index = pd.timedelta_range(start='0s', periods=10, freq='s')\n expected = df.loc[pd.Timedelta('0s'):, :]\n result = df.loc['0s':, :]\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize('periods, freq', [\n (3, '2D'), (5, 'D'), (6, '19H12T'), (7, '16H'), (9, '12H')])\n def test_linspace_behavior(self, periods, freq):\n # GH 20976\n result = timedelta_range(start='0 days', end='4 days', periods=periods)\n expected = timedelta_range(start='0 days', end='4 days', freq=freq)\n tm.assert_index_equal(result, expected)\n\n def test_errors(self):\n # not enough params\n msg = ('Of the four parameters: start, end, periods, and freq, '\n 'exactly three must be specified')\n with tm.assert_raises_regex(ValueError, msg):\n timedelta_range(start='0 days')\n\n with tm.assert_raises_regex(ValueError, msg):\n timedelta_range(end='5 days')\n\n with tm.assert_raises_regex(ValueError, msg):\n timedelta_range(periods=2)\n\n with tm.assert_raises_regex(ValueError, msg):\n timedelta_range()\n\n # too many params\n with tm.assert_raises_regex(ValueError, msg):\n timedelta_range(start='0 days', end='5 days', periods=10, freq='H')\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382692,"cells":{"repo_name":{"kind":"string","value":"tttr222/autumn_ner"},"path":{"kind":"string","value":"test.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5277"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport sys, os, random, pickle, json, codecs, time\nimport numpy as np\nimport sklearn.metrics as skm\nimport argparse\nfrom model import AutumnNER\nfrom utility import load_dataset\nfrom utility import load_embeddings\nfrom utility import report_performance\n\nparser = argparse.ArgumentParser(description='Train and evaluate BiLSTM on a given dataset')\nparser.add_argument('--datapath', dest='datapath', type=str,\n default='CoNLL2003', \n help='path to the datasets')\nparser.add_argument('--embeddings', dest='embeddings_path', type=str,\n default=None, \n help='path to the testing dataset')\nparser.add_argument('--optimizer', dest='optimizer', type=str,\n default='default', \n help='choose the optimizer: default, rmsprop, adagrad, adam.')\nparser.add_argument('--batch-size', dest='batch_size', type=int, \n default=64, help='number of instances in a minibatch')\nparser.add_argument('--num-epoch', dest='num_epoch', type=int, \n default=50, help='number of passes over the training set')\nparser.add_argument('--learning-rate', dest='learning_rate', type=str,\n default='default', help='learning rate')\nparser.add_argument('--embedding-factor', dest='embedding_factor', type=float,\n default=1.0, help='learning rate multiplier for embeddings')\nparser.add_argument('--decay', dest='decay_rate', type=float,\n default=0.95, help='exponential decay for learning rate')\nparser.add_argument('--keep-prob', dest='keep_prob', type=float,\n default=0.7, help='dropout keep rate')\nparser.add_argument('--num-cores', dest='num_cores', type=int, \n default=5, help='seed for training')\nparser.add_argument('--seed', dest='seed', type=int, \n default=1, help='seed for training')\n\ndef main(args):\n print >> sys.stderr, \"Running Autumn NER model testing module\"\n print >> sys.stderr, args\n random.seed(args.seed)\n \n trainset = []\n devset = []\n testset_standalone = {}\n word_vocab = []\n \n print \"Loading dataset..\"\n assert(os.path.isdir(args.datapath))\n for fname in sorted(os.listdir(args.datapath)):\n if os.path.isdir(fname): \n continue\n \n if fname.endswith('.ner.txt'):\n dataset, vocab = load_dataset(os.path.join(args.datapath,fname))\n word_vocab += vocab\n if fname.endswith('train.ner.txt'):\n trainset += dataset\n if fname.endswith('dev.ner.txt'):\n devset += dataset\n if fname.endswith('test.ner.txt'):\n testset_standalone[fname] = dataset\n \n print \"Loaded {} instances with a vocab size of {} from {}\".format(len(dataset),len(vocab),fname)\n \n word_vocab = sorted(set(word_vocab))\n if args.embeddings_path:\n embeddings = load_embeddings(args.embeddings_path, word_vocab, 300)\n else:\n embeddings = None\n \n print \"Loaded {}/{} instances from training/dev set\".format(len(trainset),len(devset))\n \n X_train, y_train = zip(*trainset) \n X_dev, y_dev = zip(*devset) \n \n labels = []\n for lb in y_train + y_dev:\n labels += lb\n \n labels = sorted(set(labels))\n \n # Create the model, passing in relevant parameters\n bilstm = AutumnNER(labels=labels,\n word_vocab=word_vocab,\n word_embeddings=embeddings,\n optimizer=args.optimizer,\n embedding_size=300, \n char_embedding_size=32,\n lstm_dim=200,\n num_cores=args.num_cores,\n embedding_factor=args.embedding_factor,\n learning_rate=args.learning_rate,\n decay_rate=args.decay_rate,\n dropout_keep=args.keep_prob)\n \n model_path = './scratch/saved_model_d{}_s{}'.format(hash(args.datapath),args.seed)\n if not os.path.exists(model_path + '.meta'):\n if not os.path.exists('./scratch'):\n os.mkdir('./scratch')\n \n print \"Training..\"\n bilstm.fit(X_train,y_train, \n X_dev, y_dev,\n num_epoch=args.num_epoch,\n batch_size=args.batch_size,\n seed=args.seed)\n \n bilstm.save(model_path)\n else:\n print \"Loading saved model..\"\n bilstm.restore(model_path)\n \n print \"Evaluating..\"\n print \"Performance on DEV set ----------------------------\"\n \n report_performance(bilstm, X_dev,y_dev, 'evaluation/devset_predictions.txt')\n \n print \"Performance on TEST set(s) ----------------------------\"\n \n overall_testset = []\n for key, testset in testset_standalone:\n X_test, y_test = zip(*testset)\n report_performance(bilstm, X_test,y_test, 'evaluation/testset_{}_predictions.txt'.format(key))\n overall_testset += testset\n \n X_test, y_test = zip(*overall_testset)\n report_performance(bilstm, X_test,y_test, 'evaluation/testset_overall_predictions.txt')\n\nif __name__ == '__main__':\n main(parser.parse_args())\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":382693,"cells":{"repo_name":{"kind":"string","value":"RachitKansal/scikit-learn"},"path":{"kind":"string","value":"doc/conf.py"},"copies":{"kind":"string","value":"210"},"size":{"kind":"string","value":"8446"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# scikit-learn documentation build configuration file, created by\n# sphinx-quickstart on Fri Jan 8 09:13:42 2010.\n#\n# This file is execfile()d with the current directory set to its containing\n# dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom __future__ import print_function\nimport sys\nimport os\nfrom sklearn.externals.six import u\n\n# If extensions (or modules to document with autodoc) are in another\n# directory, add these directories to sys.path here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.insert(0, os.path.abspath('sphinxext'))\n\nfrom github_link import make_linkcode_resolve\n\n# -- General configuration ---------------------------------------------------\n\n# Try to override the matplotlib configuration as early as possible\ntry:\n import gen_rst\nexcept:\n pass\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['gen_rst',\n 'sphinx.ext.autodoc', 'sphinx.ext.autosummary',\n 'sphinx.ext.pngmath', 'numpy_ext.numpydoc',\n 'sphinx.ext.linkcode',\n ]\n\nautosummary_generate = True\n\nautodoc_default_flags = ['members', 'inherited-members']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['templates']\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# Generate the plots for the gallery\nplot_gallery = True\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u('scikit-learn')\ncopyright = u('2010 - 2014, scikit-learn developers (BSD License)')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nimport sklearn\nversion = sklearn.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = sklearn.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be\n# searched for source files.\nexclude_trees = ['_build', 'templates', 'includes']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = False\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'scikit-learn'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {'oldversion': False, 'collapsiblesidebar': True,\n 'google_analytics': True, 'surveybanner': False,\n 'sprintbanner': True}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['themes']\n\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\nhtml_short_title = 'scikit-learn'\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = 'logos/scikit-learn-logo-small.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = 'logos/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['images']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'scikit-learndoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass\n# [howto/manual]).\nlatex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),\n u('scikit-learn developers'), 'manual'), ]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\nlatex_logo = \"logos/scikit-learn-logo.png\"\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\nlatex_preamble = r\"\"\"\n\\usepackage{amsmath}\\usepackage{amsfonts}\\usepackage{bm}\\usepackage{morefloats}\n\\usepackage{enumitem} \\setlistdepth{10}\n\"\"\"\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\nlatex_domain_indices = False\n\ntrim_doctests_flags = True\n\n\ndef generate_example_rst(app, what, name, obj, options, lines):\n # generate empty examples files, so that we don't get\n # inclusion errors if there are no examples for a class / module\n examples_path = os.path.join(app.srcdir, \"modules\", \"generated\",\n \"%s.examples\" % name)\n if not os.path.exists(examples_path):\n # touch file\n open(examples_path, 'w').close()\n\n\ndef setup(app):\n # to hide/show the prompt in code examples:\n app.add_javascript('js/copybutton.js')\n app.connect('autodoc-process-docstring', generate_example_rst)\n\n\n# The following is used by sphinx.ext.linkcode to provide links to github\nlinkcode_resolve = make_linkcode_resolve('sklearn',\n u'https://github.com/scikit-learn/'\n 'scikit-learn/blob/{revision}/'\n '{package}/{path}#L{lineno}')\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382694,"cells":{"repo_name":{"kind":"string","value":"KonradBreitsprecher/espresso"},"path":{"kind":"string","value":"samples/lb_profile.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1902"},"content":{"kind":"string","value":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport espressomd\nimport espressomd.lb\nimport espressomd.observables\nimport espressomd.shapes\nimport espressomd.lbboundaries\nimport espressomd.accumulators\n\n\nsystem = espressomd.System(box_l=[10.0, 10.0, 5.0])\nsystem.time_step = 0.01\nsystem.cell_system.skin = 0.4\n\nlb_fluid = espressomd.lb.LBFluidGPU(agrid=1.0, fric=1.0, dens=1.0, visc=1.0, tau=0.01, ext_force=[0, 0, 0.15])\nsystem.actors.add(lb_fluid)\nsystem.thermostat.set_lb(kT=1.0)\nfluid_obs = espressomd.observables.CylindricalLBVelocityProfile(\n center = [5.0, 5.0, 0.0],\n axis = 'z',\n n_r_bins = 100,\n n_phi_bins = 1,\n n_z_bins = 1,\n min_r = 0.0,\n max_r = 4.0,\n min_phi = -np.pi,\n max_phi = np.pi,\n min_z = 0.0,\n max_z = 10.0,\n sampling_delta_x = 0.05,\n sampling_delta_y = 0.05,\n sampling_delta_z = 1.0)\ncylinder_shape = espressomd.shapes.Cylinder(\n center = [5.0, 5.0, 5.0],\n axis = [0, 0, 1],\n direction = -1,\n radius = 4.0,\n length = 20.0)\ncylinder_boundary = espressomd.lbboundaries.LBBoundary(shape=cylinder_shape)\nsystem.lbboundaries.add(cylinder_boundary)\nsystem.integrator.run(5000)\n\n\naccumulator = espressomd.accumulators.MeanVarianceCalculator(obs=fluid_obs)\nsystem.auto_update_accumulators.add(accumulator)\nsystem.integrator.run(5000)\n\nlb_fluid_profile = accumulator.get_mean()\nlb_fluid_profile = np.reshape(lb_fluid_profile, (100, 1, 1, 3))\n\ndef poiseuille_flow(r, R, ext_force):\n return ext_force * 1./4 * (R**2.0-r**2.0)\n\n\n# Please note that due to symmetry and interpolation a plateau is seen near r=0.\nn_bins = len(lb_fluid_profile[:, 0, 0, 2])\nr_max = 4.0\nr = np.linspace(0.0, r_max, n_bins)\nplt.plot(r, lb_fluid_profile[:, 0, 0, 2], label='LB profile')\nplt.plot(r, poiseuille_flow(r, r_max, 0.15), label='analytical solution')\nplt.show()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":382695,"cells":{"repo_name":{"kind":"string","value":"sambitgaan/nupic"},"path":{"kind":"string","value":"external/linux32/lib/python2.6/site-packages/matplotlib/axes.py"},"copies":{"kind":"string","value":"69"},"size":{"kind":"string","value":"259904"},"content":{"kind":"string","value":"from __future__ import division, generators\nimport math, sys, warnings, datetime, new\n\nimport numpy as np\nfrom numpy import ma\n\nimport matplotlib\nrcParams = matplotlib.rcParams\n\nimport matplotlib.artist as martist\nimport matplotlib.axis as maxis\nimport matplotlib.cbook as cbook\nimport matplotlib.collections as mcoll\nimport matplotlib.colors as mcolors\nimport matplotlib.contour as mcontour\nimport matplotlib.dates as mdates\nimport matplotlib.font_manager as font_manager\nimport matplotlib.image as mimage\nimport matplotlib.legend as mlegend\nimport matplotlib.lines as mlines\nimport matplotlib.mlab as mlab\nimport matplotlib.patches as mpatches\nimport matplotlib.quiver as mquiver\nimport matplotlib.scale as mscale\nimport matplotlib.table as mtable\nimport matplotlib.text as mtext\nimport matplotlib.ticker as mticker\nimport matplotlib.transforms as mtransforms\n\niterable = cbook.iterable\nis_string_like = cbook.is_string_like\n\n\ndef _process_plot_format(fmt):\n \"\"\"\n Process a matlab(TM) style color/line style format string. Return a\n (*linestyle*, *color*) tuple as a result of the processing. Default\n values are ('-', 'b'). Example format strings include:\n\n * 'ko': black circles\n * '.b': blue dots\n * 'r--': red dashed lines\n\n .. seealso::\n :func:`~matplotlib.Line2D.lineStyles` and\n :func:`~matplotlib.pyplot.colors`:\n for all possible styles and color format string.\n \"\"\"\n\n linestyle = None\n marker = None\n color = None\n\n # Is fmt just a colorspec?\n try:\n color = mcolors.colorConverter.to_rgb(fmt)\n return linestyle, marker, color # Yes.\n except ValueError:\n pass # No, not just a color.\n\n # handle the multi char special cases and strip them from the\n # string\n if fmt.find('--')>=0:\n linestyle = '--'\n fmt = fmt.replace('--', '')\n if fmt.find('-.')>=0:\n linestyle = '-.'\n fmt = fmt.replace('-.', '')\n if fmt.find(' ')>=0:\n linestyle = 'None'\n fmt = fmt.replace(' ', '')\n\n chars = [c for c in fmt]\n\n for c in chars:\n if c in mlines.lineStyles:\n if linestyle is not None:\n raise ValueError(\n 'Illegal format string \"%s\"; two linestyle symbols' % fmt)\n linestyle = c\n elif c in mlines.lineMarkers:\n if marker is not None:\n raise ValueError(\n 'Illegal format string \"%s\"; two marker symbols' % fmt)\n marker = c\n elif c in mcolors.colorConverter.colors:\n if color is not None:\n raise ValueError(\n 'Illegal format string \"%s\"; two color symbols' % fmt)\n color = c\n else:\n raise ValueError(\n 'Unrecognized character %c in format string' % c)\n\n if linestyle is None and marker is None:\n linestyle = rcParams['lines.linestyle']\n if linestyle is None:\n linestyle = 'None'\n if marker is None:\n marker = 'None'\n\n return linestyle, marker, color\n\ndef set_default_color_cycle(clist):\n \"\"\"\n Change the default cycle of colors that will be used by the plot\n command. This must be called before creating the\n :class:`Axes` to which it will apply; it will\n apply to all future axes.\n\n *clist* is a sequence of mpl color specifiers\n\n \"\"\"\n _process_plot_var_args.defaultColors = clist[:]\n rcParams['lines.color'] = clist[0]\n\nclass _process_plot_var_args:\n \"\"\"\n\n Process variable length arguments to the plot command, so that\n plot commands like the following are supported::\n\n plot(t, s)\n plot(t1, s1, t2, s2)\n plot(t1, s1, 'ko', t2, s2)\n plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)\n\n an arbitrary number of *x*, *y*, *fmt* are allowed\n \"\"\"\n\n defaultColors = ['b','g','r','c','m','y','k']\n def __init__(self, axes, command='plot'):\n self.axes = axes\n self.command = command\n self._clear_color_cycle()\n\n def _clear_color_cycle(self):\n self.colors = _process_plot_var_args.defaultColors[:]\n # if the default line color is a color format string, move it up\n # in the que\n try: ind = self.colors.index(rcParams['lines.color'])\n except ValueError:\n self.firstColor = rcParams['lines.color']\n else:\n self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]\n self.firstColor = self.colors[0]\n\n self.Ncolors = len(self.colors)\n\n self.count = 0\n\n def set_color_cycle(self, clist):\n self.colors = clist[:]\n self.firstColor = self.colors[0]\n self.Ncolors = len(self.colors)\n self.count = 0\n\n def _get_next_cycle_color(self):\n if self.count==0:\n color = self.firstColor\n else:\n color = self.colors[int(self.count % self.Ncolors)]\n self.count += 1\n return color\n\n def __call__(self, *args, **kwargs):\n\n if self.axes.xaxis is not None and self.axes.yaxis is not None:\n xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)\n yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)\n if xunits!=self.axes.xaxis.units:\n self.axes.xaxis.set_units(xunits)\n if yunits!=self.axes.yaxis.units:\n self.axes.yaxis.set_units(yunits)\n\n ret = self._grab_next_args(*args, **kwargs)\n return ret\n\n def set_lineprops(self, line, **kwargs):\n assert self.command == 'plot', 'set_lineprops only works with \"plot\"'\n for key, val in kwargs.items():\n funcName = \"set_%s\"%key\n if not hasattr(line,funcName):\n raise TypeError, 'There is no line property \"%s\"'%key\n func = getattr(line,funcName)\n func(val)\n\n def set_patchprops(self, fill_poly, **kwargs):\n assert self.command == 'fill', 'set_patchprops only works with \"fill\"'\n for key, val in kwargs.items():\n funcName = \"set_%s\"%key\n if not hasattr(fill_poly,funcName):\n raise TypeError, 'There is no patch property \"%s\"'%key\n func = getattr(fill_poly,funcName)\n func(val)\n\n def _xy_from_y(self, y):\n if self.axes.yaxis is not None:\n b = self.axes.yaxis.update_units(y)\n if b: return np.arange(len(y)), y, False\n\n if not ma.isMaskedArray(y):\n y = np.asarray(y)\n if len(y.shape) == 1:\n y = y[:,np.newaxis]\n nr, nc = y.shape\n x = np.arange(nr)\n if len(x.shape) == 1:\n x = x[:,np.newaxis]\n return x,y, True\n\n def _xy_from_xy(self, x, y):\n if self.axes.xaxis is not None and self.axes.yaxis is not None:\n bx = self.axes.xaxis.update_units(x)\n by = self.axes.yaxis.update_units(y)\n # right now multicol is not supported if either x or y are\n # unit enabled but this can be fixed..\n if bx or by: return x, y, False\n\n x = ma.asarray(x)\n y = ma.asarray(y)\n if len(x.shape) == 1:\n x = x[:,np.newaxis]\n if len(y.shape) == 1:\n y = y[:,np.newaxis]\n nrx, ncx = x.shape\n nry, ncy = y.shape\n assert nrx == nry, 'Dimensions of x and y are incompatible'\n if ncx == ncy:\n return x, y, True\n if ncx == 1:\n x = np.repeat(x, ncy, axis=1)\n if ncy == 1:\n y = np.repeat(y, ncx, axis=1)\n assert x.shape == y.shape, 'Dimensions of x and y are incompatible'\n return x, y, True\n\n\n def _plot_1_arg(self, y, **kwargs):\n assert self.command == 'plot', 'fill needs at least 2 arguments'\n ret = []\n\n x, y, multicol = self._xy_from_y(y)\n\n if multicol:\n for j in xrange(y.shape[1]):\n color = self._get_next_cycle_color()\n seg = mlines.Line2D(x, y[:,j],\n color = color,\n axes=self.axes,\n )\n self.set_lineprops(seg, **kwargs)\n ret.append(seg)\n else:\n color = self._get_next_cycle_color()\n seg = mlines.Line2D(x, y,\n color = color,\n axes=self.axes,\n )\n self.set_lineprops(seg, **kwargs)\n ret.append(seg)\n\n return ret\n\n def _plot_2_args(self, tup2, **kwargs):\n ret = []\n if is_string_like(tup2[1]):\n\n assert self.command == 'plot', ('fill needs at least 2 non-string '\n 'arguments')\n y, fmt = tup2\n x, y, multicol = self._xy_from_y(y)\n\n linestyle, marker, color = _process_plot_format(fmt)\n\n def makeline(x, y):\n _color = color\n if _color is None:\n _color = self._get_next_cycle_color()\n seg = mlines.Line2D(x, y,\n color=_color,\n linestyle=linestyle, marker=marker,\n axes=self.axes,\n )\n self.set_lineprops(seg, **kwargs)\n ret.append(seg)\n\n if multicol:\n for j in xrange(y.shape[1]):\n makeline(x[:,j], y[:,j])\n else:\n makeline(x, y)\n\n return ret\n else:\n\n x, y = tup2\n x, y, multicol = self._xy_from_xy(x, y)\n\n def makeline(x, y):\n color = self._get_next_cycle_color()\n seg = mlines.Line2D(x, y,\n color=color,\n axes=self.axes,\n )\n self.set_lineprops(seg, **kwargs)\n ret.append(seg)\n\n def makefill(x, y):\n x = self.axes.convert_xunits(x)\n y = self.axes.convert_yunits(y)\n facecolor = self._get_next_cycle_color()\n seg = mpatches.Polygon(np.hstack(\n (x[:,np.newaxis],y[:,np.newaxis])),\n facecolor = facecolor,\n fill=True,\n closed=closed\n )\n self.set_patchprops(seg, **kwargs)\n ret.append(seg)\n\n if self.command == 'plot':\n func = makeline\n else:\n closed = kwargs.get('closed', True)\n func = makefill\n if multicol:\n for j in xrange(y.shape[1]):\n func(x[:,j], y[:,j])\n else:\n func(x, y)\n\n\n return ret\n\n def _plot_3_args(self, tup3, **kwargs):\n ret = []\n\n x, y, fmt = tup3\n x, y, multicol = self._xy_from_xy(x, y)\n\n linestyle, marker, color = _process_plot_format(fmt)\n\n def makeline(x, y):\n _color = color\n if _color is None:\n _color = self._get_next_cycle_color()\n seg = mlines.Line2D(x, y,\n color=_color,\n linestyle=linestyle, marker=marker,\n axes=self.axes,\n )\n self.set_lineprops(seg, **kwargs)\n ret.append(seg)\n\n def makefill(x, y):\n facecolor = color\n x = self.axes.convert_xunits(x)\n y = self.axes.convert_yunits(y)\n seg = mpatches.Polygon(np.hstack(\n (x[:,np.newaxis],y[:,np.newaxis])),\n facecolor = facecolor,\n fill=True,\n closed=closed\n )\n self.set_patchprops(seg, **kwargs)\n ret.append(seg)\n\n if self.command == 'plot':\n func = makeline\n else:\n closed = kwargs.get('closed', True)\n func = makefill\n\n if multicol:\n for j in xrange(y.shape[1]):\n func(x[:,j], y[:,j])\n else:\n func(x, y)\n return ret\n\n def _grab_next_args(self, *args, **kwargs):\n\n remaining = args\n while 1:\n\n if len(remaining)==0: return\n if len(remaining)==1:\n for seg in self._plot_1_arg(remaining[0], **kwargs):\n yield seg\n remaining = []\n continue\n if len(remaining)==2:\n for seg in self._plot_2_args(remaining, **kwargs):\n yield seg\n remaining = []\n continue\n if len(remaining)==3:\n if not is_string_like(remaining[2]):\n raise ValueError, 'third arg must be a format string'\n for seg in self._plot_3_args(remaining, **kwargs):\n yield seg\n remaining=[]\n continue\n if is_string_like(remaining[2]):\n for seg in self._plot_3_args(remaining[:3], **kwargs):\n yield seg\n remaining=remaining[3:]\n else:\n for seg in self._plot_2_args(remaining[:2], **kwargs):\n yield seg\n remaining=remaining[2:]\n\n\nclass Axes(martist.Artist):\n \"\"\"\n The :class:`Axes` contains most of the figure elements:\n :class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,\n :class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,\n :class:`~matplotlib.patches.Polygon`, etc., and sets the\n coordinate system.\n\n The :class:`Axes` instance supports callbacks through a callbacks\n attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`\n instance. The events you can connect to are 'xlim_changed' and\n 'ylim_changed' and the callback will be called with func(*ax*)\n where *ax* is the :class:`Axes` instance.\n \"\"\"\n name = \"rectilinear\"\n\n _shared_x_axes = cbook.Grouper()\n _shared_y_axes = cbook.Grouper()\n\n def __str__(self):\n return \"Axes(%g,%g;%gx%g)\" % tuple(self._position.bounds)\n def __init__(self, fig, rect,\n axisbg = None, # defaults to rc axes.facecolor\n frameon = True,\n sharex=None, # use Axes instance's xaxis info\n sharey=None, # use Axes instance's yaxis info\n label='',\n **kwargs\n ):\n \"\"\"\n Build an :class:`Axes` instance in\n :class:`~matplotlib.figure.Figure` *fig* with\n *rect=[left, bottom, width, height]* in\n :class:`~matplotlib.figure.Figure` coordinates\n\n Optional keyword arguments:\n\n ================ =========================================\n Keyword Description\n ================ =========================================\n *adjustable* [ 'box' | 'datalim' ]\n *alpha* float: the alpha transparency\n *anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',\n 'NW', 'W' ]\n *aspect* [ 'auto' | 'equal' | aspect_ratio ]\n *autoscale_on* [ *True* | *False* ] whether or not to\n autoscale the *viewlim*\n *axis_bgcolor* any matplotlib color, see\n :func:`~matplotlib.pyplot.colors`\n *axisbelow* draw the grids and ticks below the other\n artists\n *cursor_props* a (*float*, *color*) tuple\n *figure* a :class:`~matplotlib.figure.Figure`\n instance\n *frame_on* a boolean - draw the axes frame\n *label* the axes label\n *navigate* [ *True* | *False* ]\n *navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation\n toolbar button status\n *position* [left, bottom, width, height] in\n class:`~matplotlib.figure.Figure` coords\n *sharex* an class:`~matplotlib.axes.Axes` instance\n to share the x-axis with\n *sharey* an class:`~matplotlib.axes.Axes` instance\n to share the y-axis with\n *title* the title string\n *visible* [ *True* | *False* ] whether the axes is\n visible\n *xlabel* the xlabel\n *xlim* (*xmin*, *xmax*) view limits\n *xscale* [%(scale)s]\n *xticklabels* sequence of strings\n *xticks* sequence of floats\n *ylabel* the ylabel strings\n *ylim* (*ymin*, *ymax*) view limits\n *yscale* [%(scale)s]\n *yticklabels* sequence of strings\n *yticks* sequence of floats\n ================ =========================================\n \"\"\" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}\n martist.Artist.__init__(self)\n if isinstance(rect, mtransforms.Bbox):\n self._position = rect\n else:\n self._position = mtransforms.Bbox.from_bounds(*rect)\n self._originalPosition = self._position.frozen()\n self.set_axes(self)\n self.set_aspect('auto')\n self._adjustable = 'box'\n self.set_anchor('C')\n self._sharex = sharex\n self._sharey = sharey\n if sharex is not None:\n self._shared_x_axes.join(self, sharex)\n if sharex._adjustable == 'box':\n sharex._adjustable = 'datalim'\n #warnings.warn(\n # 'shared axes: \"adjustable\" is being changed to \"datalim\"')\n self._adjustable = 'datalim'\n if sharey is not None:\n self._shared_y_axes.join(self, sharey)\n if sharey._adjustable == 'box':\n sharey._adjustable = 'datalim'\n #warnings.warn(\n # 'shared axes: \"adjustable\" is being changed to \"datalim\"')\n self._adjustable = 'datalim'\n self.set_label(label)\n self.set_figure(fig)\n\n # this call may differ for non-sep axes, eg polar\n self._init_axis()\n\n if axisbg is None: axisbg = rcParams['axes.facecolor']\n self._axisbg = axisbg\n self._frameon = frameon\n self._axisbelow = rcParams['axes.axisbelow']\n\n self._hold = rcParams['axes.hold']\n self._connected = {} # a dict from events to (id, func)\n self.cla()\n # funcs used to format x and y - fall back on major formatters\n self.fmt_xdata = None\n self.fmt_ydata = None\n\n\n self.set_cursor_props((1,'k')) # set the cursor properties for axes\n\n self._cachedRenderer = None\n self.set_navigate(True)\n self.set_navigate_mode(None)\n\n if len(kwargs): martist.setp(self, **kwargs)\n\n if self.xaxis is not None:\n self._xcid = self.xaxis.callbacks.connect('units finalize',\n self.relim)\n\n if self.yaxis is not None:\n self._ycid = self.yaxis.callbacks.connect('units finalize',\n self.relim)\n\n def get_window_extent(self, *args, **kwargs):\n '''\n get the axes bounding box in display space; *args* and\n *kwargs* are empty\n '''\n return self.bbox\n\n def _init_axis(self):\n \"move this out of __init__ because non-separable axes don't use it\"\n self.xaxis = maxis.XAxis(self)\n self.yaxis = maxis.YAxis(self)\n self._update_transScale()\n\n def set_figure(self, fig):\n \"\"\"\n Set the class:`~matplotlib.axes.Axes` figure\n\n accepts a class:`~matplotlib.figure.Figure` instance\n \"\"\"\n martist.Artist.set_figure(self, fig)\n\n self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)\n #these will be updated later as data is added\n self.dataLim = mtransforms.Bbox.unit()\n self.viewLim = mtransforms.Bbox.unit()\n self.transScale = mtransforms.TransformWrapper(\n mtransforms.IdentityTransform())\n\n self._set_lim_and_transforms()\n\n def _set_lim_and_transforms(self):\n \"\"\"\n set the *dataLim* and *viewLim*\n :class:`~matplotlib.transforms.Bbox` attributes and the\n *transScale*, *transData*, *transLimits* and *transAxes*\n transformations.\n \"\"\"\n self.transAxes = mtransforms.BboxTransformTo(self.bbox)\n\n # Transforms the x and y axis separately by a scale factor\n # It is assumed that this part will have non-linear components\n self.transScale = mtransforms.TransformWrapper(\n mtransforms.IdentityTransform())\n\n # An affine transformation on the data, generally to limit the\n # range of the axes\n self.transLimits = mtransforms.BboxTransformFrom(\n mtransforms.TransformedBbox(self.viewLim, self.transScale))\n\n # The parentheses are important for efficiency here -- they\n # group the last two (which are usually affines) separately\n # from the first (which, with log-scaling can be non-affine).\n self.transData = self.transScale + (self.transLimits + self.transAxes)\n\n self._xaxis_transform = mtransforms.blended_transform_factory(\n self.axes.transData, self.axes.transAxes)\n self._yaxis_transform = mtransforms.blended_transform_factory(\n self.axes.transAxes, self.axes.transData)\n\n def get_xaxis_transform(self):\n \"\"\"\n Get the transformation used for drawing x-axis labels, ticks\n and gridlines. The x-direction is in data coordinates and the\n y-direction is in axis coordinates.\n\n .. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n \"\"\"\n return self._xaxis_transform\n\n def get_xaxis_text1_transform(self, pad_points):\n \"\"\"\n Get the transformation used for drawing x-axis labels, which\n will add the given amount of padding (in points) between the\n axes and the label. The x-direction is in data coordinates\n and the y-direction is in axis coordinates. Returns a\n 3-tuple of the form::\n\n (transform, valign, halign)\n\n where *valign* and *halign* are requested alignments for the\n text.\n\n .. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n \"\"\"\n return (self._xaxis_transform +\n mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,\n self.figure.dpi_scale_trans),\n \"top\", \"center\")\n\n def get_xaxis_text2_transform(self, pad_points):\n \"\"\"\n Get the transformation used for drawing the secondary x-axis\n labels, which will add the given amount of padding (in points)\n between the axes and the label. The x-direction is in data\n coordinates and the y-direction is in axis coordinates.\n Returns a 3-tuple of the form::\n\n (transform, valign, halign)\n\n where *valign* and *halign* are requested alignments for the\n text.\n\n .. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n \"\"\"\n return (self._xaxis_transform +\n mtransforms.ScaledTranslation(0, pad_points / 72.0,\n self.figure.dpi_scale_trans),\n \"bottom\", \"center\")\n\n def get_yaxis_transform(self):\n \"\"\"\n Get the transformation used for drawing y-axis labels, ticks\n and gridlines. The x-direction is in axis coordinates and the\n y-direction is in data coordinates.\n\n .. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n \"\"\"\n return self._yaxis_transform\n\n def get_yaxis_text1_transform(self, pad_points):\n \"\"\"\n Get the transformation used for drawing y-axis labels, which\n will add the given amount of padding (in points) between the\n axes and the label. The x-direction is in axis coordinates\n and the y-direction is in data coordinates. Returns a 3-tuple\n of the form::\n\n (transform, valign, halign)\n\n where *valign* and *halign* are requested alignments for the\n text.\n\n .. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n \"\"\"\n return (self._yaxis_transform +\n mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,\n self.figure.dpi_scale_trans),\n \"center\", \"right\")\n\n def get_yaxis_text2_transform(self, pad_points):\n \"\"\"\n Get the transformation used for drawing the secondary y-axis\n labels, which will add the given amount of padding (in points)\n between the axes and the label. The x-direction is in axis\n coordinates and the y-direction is in data coordinates.\n Returns a 3-tuple of the form::\n\n (transform, valign, halign)\n\n where *valign* and *halign* are requested alignments for the\n text.\n\n .. note::\n\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n \"\"\"\n return (self._yaxis_transform +\n mtransforms.ScaledTranslation(pad_points / 72.0, 0,\n self.figure.dpi_scale_trans),\n \"center\", \"left\")\n\n def _update_transScale(self):\n self.transScale.set(\n mtransforms.blended_transform_factory(\n self.xaxis.get_transform(), self.yaxis.get_transform()))\n if hasattr(self, \"lines\"):\n for line in self.lines:\n line._transformed_path.invalidate()\n\n def get_position(self, original=False):\n 'Return the a copy of the axes rectangle as a Bbox'\n if original:\n return self._originalPosition.frozen()\n else:\n return self._position.frozen()\n\n\n def set_position(self, pos, which='both'):\n \"\"\"\n Set the axes position with::\n\n pos = [left, bottom, width, height]\n\n in relative 0,1 coords, or *pos* can be a\n :class:`~matplotlib.transforms.Bbox`\n\n There are two position variables: one which is ultimately\n used, but which may be modified by :meth:`apply_aspect`, and a\n second which is the starting point for :meth:`apply_aspect`.\n\n\n Optional keyword arguments:\n *which*\n\n ========== ====================\n value description\n ========== ====================\n 'active' to change the first\n 'original' to change the second\n 'both' to change both\n ========== ====================\n\n \"\"\"\n if not isinstance(pos, mtransforms.BboxBase):\n pos = mtransforms.Bbox.from_bounds(*pos)\n if which in ('both', 'active'):\n self._position.set(pos)\n if which in ('both', 'original'):\n self._originalPosition.set(pos)\n\n def reset_position(self):\n 'Make the original position the active position'\n pos = self.get_position(original=True)\n self.set_position(pos, which='active')\n\n def _set_artist_props(self, a):\n 'set the boilerplate props for artists added to axes'\n a.set_figure(self.figure)\n if not a.is_transform_set():\n a.set_transform(self.transData)\n\n a.set_axes(self)\n\n def _gen_axes_patch(self):\n \"\"\"\n Returns the patch used to draw the background of the axes. It\n is also used as the clipping path for any data elements on the\n axes.\n\n In the standard axes, this is a rectangle, but in other\n projections it may not be.\n\n .. note::\n Intended to be overridden by new projection types.\n \"\"\"\n return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)\n\n def cla(self):\n 'Clear the current axes'\n # Note: this is called by Axes.__init__()\n self.xaxis.cla()\n self.yaxis.cla()\n\n self.ignore_existing_data_limits = True\n self.callbacks = cbook.CallbackRegistry(('xlim_changed',\n 'ylim_changed'))\n\n if self._sharex is not None:\n # major and minor are class instances with\n # locator and formatter attributes\n self.xaxis.major = self._sharex.xaxis.major\n self.xaxis.minor = self._sharex.xaxis.minor\n x0, x1 = self._sharex.get_xlim()\n self.set_xlim(x0, x1, emit=False)\n self.xaxis.set_scale(self._sharex.xaxis.get_scale())\n else:\n self.xaxis.set_scale('linear')\n\n if self._sharey is not None:\n self.yaxis.major = self._sharey.yaxis.major\n self.yaxis.minor = self._sharey.yaxis.minor\n y0, y1 = self._sharey.get_ylim()\n self.set_ylim(y0, y1, emit=False)\n self.yaxis.set_scale(self._sharey.yaxis.get_scale())\n else:\n self.yaxis.set_scale('linear')\n\n self._autoscaleon = True\n self._update_transScale() # needed?\n\n self._get_lines = _process_plot_var_args(self)\n self._get_patches_for_fill = _process_plot_var_args(self, 'fill')\n\n self._gridOn = rcParams['axes.grid']\n self.lines = []\n self.patches = []\n self.texts = []\n self.tables = []\n self.artists = []\n self.images = []\n self.legend_ = None\n self.collections = [] # collection.Collection instances\n\n self.grid(self._gridOn)\n props = font_manager.FontProperties(size=rcParams['axes.titlesize'])\n\n\n self.titleOffsetTrans = mtransforms.ScaledTranslation(\n 0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)\n self.title = mtext.Text(\n x=0.5, y=1.0, text='',\n fontproperties=props,\n verticalalignment='bottom',\n horizontalalignment='center',\n )\n self.title.set_transform(self.transAxes + self.titleOffsetTrans)\n self.title.set_clip_box(None)\n\n self._set_artist_props(self.title)\n\n # the patch draws the background of the axes. we want this to\n # be below the other artists; the axesPatch name is\n # deprecated. We use the frame to draw the edges so we are\n # setting the edgecolor to None\n self.patch = self.axesPatch = self._gen_axes_patch()\n self.patch.set_figure(self.figure)\n self.patch.set_facecolor(self._axisbg)\n self.patch.set_edgecolor('None')\n self.patch.set_linewidth(0)\n self.patch.set_transform(self.transAxes)\n\n # the frame draws the border around the axes and we want this\n # above. this is a place holder for a more sophisticated\n # artist that might just draw a left, bottom frame, or a\n # centered frame, etc the axesFrame name is deprecated\n self.frame = self.axesFrame = self._gen_axes_patch()\n self.frame.set_figure(self.figure)\n self.frame.set_facecolor('none')\n self.frame.set_edgecolor(rcParams['axes.edgecolor'])\n self.frame.set_linewidth(rcParams['axes.linewidth'])\n self.frame.set_transform(self.transAxes)\n self.frame.set_zorder(2.5)\n self.axison = True\n\n self.xaxis.set_clip_path(self.patch)\n self.yaxis.set_clip_path(self.patch)\n\n self._shared_x_axes.clean()\n self._shared_y_axes.clean()\n\n def clear(self):\n 'clear the axes'\n self.cla()\n\n def set_color_cycle(self, clist):\n \"\"\"\n Set the color cycle for any future plot commands on this Axes.\n\n clist is a list of mpl color specifiers.\n \"\"\"\n self._get_lines.set_color_cycle(clist)\n\n\n def ishold(self):\n 'return the HOLD status of the axes'\n return self._hold\n\n def hold(self, b=None):\n \"\"\"\n call signature::\n\n hold(b=None)\n\n Set the hold state. If *hold* is *None* (default), toggle the\n *hold* state. Else set the *hold* state to boolean value *b*.\n\n Examples:\n\n * toggle hold:\n >>> hold()\n * turn hold on:\n >>> hold(True)\n * turn hold off\n >>> hold(False)\n\n\n When hold is True, subsequent plot commands will be added to\n the current axes. When hold is False, the current axes and\n figure will be cleared on the next plot command\n\n \"\"\"\n if b is None:\n self._hold = not self._hold\n else:\n self._hold = b\n\n def get_aspect(self):\n return self._aspect\n\n def set_aspect(self, aspect, adjustable=None, anchor=None):\n \"\"\"\n *aspect*\n\n ======== ================================================\n value description\n ======== ================================================\n 'auto' automatic; fill position rectangle with data\n 'normal' same as 'auto'; deprecated\n 'equal' same scaling from data to plot units for x and y\n num a circle will be stretched such that the height\n is num times the width. aspect=1 is the same as\n aspect='equal'.\n ======== ================================================\n\n *adjustable*\n\n ========= ============================\n value description\n ========= ============================\n 'box' change physical size of axes\n 'datalim' change xlim or ylim\n ========= ============================\n\n *anchor*\n\n ===== =====================\n value description\n ===== =====================\n 'C' centered\n 'SW' lower left corner\n 'S' middle of bottom edge\n 'SE' lower right corner\n etc.\n ===== =====================\n\n \"\"\"\n if aspect in ('normal', 'auto'):\n self._aspect = 'auto'\n elif aspect == 'equal':\n self._aspect = 'equal'\n else:\n self._aspect = float(aspect) # raise ValueError if necessary\n\n if adjustable is not None:\n self.set_adjustable(adjustable)\n if anchor is not None:\n self.set_anchor(anchor)\n\n def get_adjustable(self):\n return self._adjustable\n\n def set_adjustable(self, adjustable):\n \"\"\"\n ACCEPTS: [ 'box' | 'datalim' ]\n \"\"\"\n if adjustable in ('box', 'datalim'):\n if self in self._shared_x_axes or self in self._shared_y_axes:\n if adjustable == 'box':\n raise ValueError(\n 'adjustable must be \"datalim\" for shared axes')\n self._adjustable = adjustable\n else:\n raise ValueError('argument must be \"box\", or \"datalim\"')\n\n def get_anchor(self):\n return self._anchor\n\n def set_anchor(self, anchor):\n \"\"\"\n *anchor*\n\n ===== ============\n value description\n ===== ============\n 'C' Center\n 'SW' bottom left\n 'S' bottom\n 'SE' bottom right\n 'E' right\n 'NE' top right\n 'N' top\n 'NW' top left\n 'W' left\n ===== ============\n\n \"\"\"\n if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:\n self._anchor = anchor\n else:\n raise ValueError('argument must be among %s' %\n ', '.join(mtransforms.BBox.coefs.keys()))\n\n def get_data_ratio(self):\n \"\"\"\n Returns the aspect ratio of the raw data.\n\n This method is intended to be overridden by new projection\n types.\n \"\"\"\n xmin,xmax = self.get_xbound()\n xsize = max(math.fabs(xmax-xmin), 1e-30)\n ymin,ymax = self.get_ybound()\n ysize = max(math.fabs(ymax-ymin), 1e-30)\n return ysize/xsize\n\n def apply_aspect(self, position=None):\n '''\n Use :meth:`_aspect` and :meth:`_adjustable` to modify the\n axes box or the view limits.\n '''\n if position is None:\n position = self.get_position(original=True)\n\n aspect = self.get_aspect()\n if aspect == 'auto':\n self.set_position( position , which='active')\n return\n\n if aspect == 'equal':\n A = 1\n else:\n A = aspect\n\n #Ensure at drawing time that any Axes involved in axis-sharing\n # does not have its position changed.\n if self in self._shared_x_axes or self in self._shared_y_axes:\n if self._adjustable == 'box':\n self._adjustable = 'datalim'\n warnings.warn(\n 'shared axes: \"adjustable\" is being changed to \"datalim\"')\n\n figW,figH = self.get_figure().get_size_inches()\n fig_aspect = figH/figW\n if self._adjustable == 'box':\n box_aspect = A * self.get_data_ratio()\n pb = position.frozen()\n pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)\n self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')\n return\n\n # reset active to original in case it had been changed\n # by prior use of 'box'\n self.set_position(position, which='active')\n\n xmin,xmax = self.get_xbound()\n xsize = max(math.fabs(xmax-xmin), 1e-30)\n ymin,ymax = self.get_ybound()\n ysize = max(math.fabs(ymax-ymin), 1e-30)\n\n l,b,w,h = position.bounds\n box_aspect = fig_aspect * (h/w)\n data_ratio = box_aspect / A\n\n y_expander = (data_ratio*xsize/ysize - 1.0)\n #print 'y_expander', y_expander\n # If y_expander > 0, the dy/dx viewLim ratio needs to increase\n if abs(y_expander) < 0.005:\n #print 'good enough already'\n return\n dL = self.dataLim\n xr = 1.05 * dL.width\n yr = 1.05 * dL.height\n xmarg = xsize - xr\n ymarg = ysize - yr\n Ysize = data_ratio * xsize\n Xsize = ysize / data_ratio\n Xmarg = Xsize - xr\n Ymarg = Ysize - yr\n xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.\n ym = 0\n #print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax\n #print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize\n\n changex = (self in self._shared_y_axes\n and self not in self._shared_x_axes)\n changey = (self in self._shared_x_axes\n and self not in self._shared_y_axes)\n if changex and changey:\n warnings.warn(\"adjustable='datalim' cannot work with shared \"\n \"x and y axes\")\n return\n if changex:\n adjust_y = False\n else:\n #print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg\n if xmarg > xm and ymarg > ym:\n adjy = ((Ymarg > 0 and y_expander < 0)\n or (Xmarg < 0 and y_expander > 0))\n else:\n adjy = y_expander > 0\n #print 'y_expander, adjy', y_expander, adjy\n adjust_y = changey or adjy #(Ymarg > xmarg)\n if adjust_y:\n yc = 0.5*(ymin+ymax)\n y0 = yc - Ysize/2.0\n y1 = yc + Ysize/2.0\n self.set_ybound((y0, y1))\n #print 'New y0, y1:', y0, y1\n #print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize\n else:\n xc = 0.5*(xmin+xmax)\n x0 = xc - Xsize/2.0\n x1 = xc + Xsize/2.0\n self.set_xbound((x0, x1))\n #print 'New x0, x1:', x0, x1\n #print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)\n\n def axis(self, *v, **kwargs):\n '''\n Convenience method for manipulating the x and y view limits\n and the aspect ratio of the plot.\n\n *kwargs* are passed on to :meth:`set_xlim` and\n :meth:`set_ylim`\n '''\n if len(v)==1 and is_string_like(v[0]):\n s = v[0].lower()\n if s=='on': self.set_axis_on()\n elif s=='off': self.set_axis_off()\n elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):\n self.set_autoscale_on(True)\n self.set_aspect('auto')\n self.autoscale_view()\n # self.apply_aspect()\n if s=='equal':\n self.set_aspect('equal', adjustable='datalim')\n elif s == 'scaled':\n self.set_aspect('equal', adjustable='box', anchor='C')\n self.set_autoscale_on(False) # Req. by Mark Bakker\n elif s=='tight':\n self.autoscale_view(tight=True)\n self.set_autoscale_on(False)\n elif s == 'image':\n self.autoscale_view(tight=True)\n self.set_autoscale_on(False)\n self.set_aspect('equal', adjustable='box', anchor='C')\n\n else:\n raise ValueError('Unrecognized string %s to axis; '\n 'try on or off' % s)\n xmin, xmax = self.get_xlim()\n ymin, ymax = self.get_ylim()\n return xmin, xmax, ymin, ymax\n\n try: v[0]\n except IndexError:\n emit = kwargs.get('emit', True)\n xmin = kwargs.get('xmin', None)\n xmax = kwargs.get('xmax', None)\n\n xmin, xmax = self.set_xlim(xmin, xmax, emit)\n ymin = kwargs.get('ymin', None)\n ymax = kwargs.get('ymax', None)\n ymin, ymax = self.set_ylim(ymin, ymax, emit)\n return xmin, xmax, ymin, ymax\n\n v = v[0]\n if len(v) != 4:\n raise ValueError('v must contain [xmin xmax ymin ymax]')\n\n\n self.set_xlim([v[0], v[1]])\n self.set_ylim([v[2], v[3]])\n\n return v\n\n def get_child_artists(self):\n \"\"\"\n Return a list of artists the axes contains.\n\n .. deprecated:: 0.98\n \"\"\"\n raise DeprecationWarning('Use get_children instead')\n\n def get_frame(self):\n 'Return the axes Rectangle frame'\n warnings.warn('use ax.patch instead', DeprecationWarning)\n return self.patch\n\n def get_legend(self):\n 'Return the legend.Legend instance, or None if no legend is defined'\n return self.legend_\n\n def get_images(self):\n 'return a list of Axes images contained by the Axes'\n return cbook.silent_list('AxesImage', self.images)\n\n def get_lines(self):\n 'Return a list of lines contained by the Axes'\n return cbook.silent_list('Line2D', self.lines)\n\n def get_xaxis(self):\n 'Return the XAxis instance'\n return self.xaxis\n\n def get_xgridlines(self):\n 'Get the x grid lines as a list of Line2D instances'\n return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())\n\n\n def get_xticklines(self):\n 'Get the xtick lines as a list of Line2D instances'\n return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())\n\n\n def get_yaxis(self):\n 'Return the YAxis instance'\n return self.yaxis\n\n def get_ygridlines(self):\n 'Get the y grid lines as a list of Line2D instances'\n return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())\n\n def get_yticklines(self):\n 'Get the ytick lines as a list of Line2D instances'\n return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())\n\n #### Adding and tracking artists\n\n def has_data(self):\n '''Return *True* if any artists have been added to axes.\n\n This should not be used to determine whether the *dataLim*\n need to be updated, and may not actually be useful for\n anything.\n '''\n return (\n len(self.collections) +\n len(self.images) +\n len(self.lines) +\n len(self.patches))>0\n\n def add_artist(self, a):\n 'Add any :class:`~matplotlib.artist.Artist` to the axes'\n a.set_axes(self)\n self.artists.append(a)\n self._set_artist_props(a)\n a.set_clip_path(self.patch)\n a._remove_method = lambda h: self.artists.remove(h)\n\n def add_collection(self, collection, autolim=True):\n '''\n add a :class:`~matplotlib.collections.Collection` instance\n to the axes\n '''\n label = collection.get_label()\n if not label:\n collection.set_label('collection%d'%len(self.collections))\n self.collections.append(collection)\n self._set_artist_props(collection)\n collection.set_clip_path(self.patch)\n if autolim:\n if collection._paths and len(collection._paths):\n self.update_datalim(collection.get_datalim(self.transData))\n\n collection._remove_method = lambda h: self.collections.remove(h)\n\n def add_line(self, line):\n '''\n Add a :class:`~matplotlib.lines.Line2D` to the list of plot\n lines\n '''\n self._set_artist_props(line)\n line.set_clip_path(self.patch)\n\n self._update_line_limits(line)\n if not line.get_label():\n line.set_label('_line%d'%len(self.lines))\n self.lines.append(line)\n line._remove_method = lambda h: self.lines.remove(h)\n\n def _update_line_limits(self, line):\n p = line.get_path()\n if p.vertices.size > 0:\n self.dataLim.update_from_path(p, self.ignore_existing_data_limits,\n updatex=line.x_isdata,\n updatey=line.y_isdata)\n self.ignore_existing_data_limits = False\n\n def add_patch(self, p):\n \"\"\"\n Add a :class:`~matplotlib.patches.Patch` *p* to the list of\n axes patches; the clipbox will be set to the Axes clipping\n box. If the transform is not set, it will be set to\n :attr:`transData`.\n \"\"\"\n\n self._set_artist_props(p)\n p.set_clip_path(self.patch)\n self._update_patch_limits(p)\n self.patches.append(p)\n p._remove_method = lambda h: self.patches.remove(h)\n\n def _update_patch_limits(self, patch):\n 'update the data limits for patch *p*'\n # hist can add zero height Rectangles, which is useful to keep\n # the bins, counts and patches lined up, but it throws off log\n # scaling. We'll ignore rects with zero height or width in\n # the auto-scaling\n\n if (isinstance(patch, mpatches.Rectangle) and\n (patch.get_width()==0 or patch.get_height()==0)):\n return\n vertices = patch.get_path().vertices\n if vertices.size > 0:\n xys = patch.get_patch_transform().transform(vertices)\n if patch.get_data_transform() != self.transData:\n transform = (patch.get_data_transform() +\n self.transData.inverted())\n xys = transform.transform(xys)\n self.update_datalim(xys, updatex=patch.x_isdata,\n updatey=patch.y_isdata)\n\n\n def add_table(self, tab):\n '''\n Add a :class:`~matplotlib.tables.Table` instance to the\n list of axes tables\n '''\n self._set_artist_props(tab)\n self.tables.append(tab)\n tab.set_clip_path(self.patch)\n tab._remove_method = lambda h: self.tables.remove(h)\n\n def relim(self):\n 'recompute the data limits based on current artists'\n # Collections are deliberately not supported (yet); see\n # the TODO note in artists.py.\n self.dataLim.ignore(True)\n self.ignore_existing_data_limits = True\n for line in self.lines:\n self._update_line_limits(line)\n\n for p in self.patches:\n self._update_patch_limits(p)\n\n def update_datalim(self, xys, updatex=True, updatey=True):\n 'Update the data lim bbox with seq of xy tups or equiv. 2-D array'\n # if no data is set currently, the bbox will ignore its\n # limits and set the bound to be the bounds of the xydata.\n # Otherwise, it will compute the bounds of it's current data\n # and the data in xydata\n\n if iterable(xys) and not len(xys): return\n if not ma.isMaskedArray(xys):\n xys = np.asarray(xys)\n self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,\n updatex=updatex, updatey=updatey)\n self.ignore_existing_data_limits = False\n\n def update_datalim_numerix(self, x, y):\n 'Update the data lim bbox with seq of xy tups'\n # if no data is set currently, the bbox will ignore it's\n # limits and set the bound to be the bounds of the xydata.\n # Otherwise, it will compute the bounds of it's current data\n # and the data in xydata\n if iterable(x) and not len(x): return\n self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)\n self.ignore_existing_data_limits = False\n\n def update_datalim_bounds(self, bounds):\n '''\n Update the datalim to include the given\n :class:`~matplotlib.transforms.Bbox` *bounds*\n '''\n self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))\n\n def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):\n 'look for unit *kwargs* and update the axis instances as necessary'\n\n if self.xaxis is None or self.yaxis is None: return\n\n #print 'processing', self.get_geometry()\n if xdata is not None:\n # we only need to update if there is nothing set yet.\n if not self.xaxis.have_units():\n self.xaxis.update_units(xdata)\n #print '\\tset from xdata', self.xaxis.units\n\n if ydata is not None:\n # we only need to update if there is nothing set yet.\n if not self.yaxis.have_units():\n self.yaxis.update_units(ydata)\n #print '\\tset from ydata', self.yaxis.units\n\n # process kwargs 2nd since these will override default units\n if kwargs is not None:\n xunits = kwargs.pop( 'xunits', self.xaxis.units)\n if xunits!=self.xaxis.units:\n #print '\\tkw setting xunits', xunits\n self.xaxis.set_units(xunits)\n # If the units being set imply a different converter,\n # we need to update.\n if xdata is not None:\n self.xaxis.update_units(xdata)\n\n yunits = kwargs.pop('yunits', self.yaxis.units)\n if yunits!=self.yaxis.units:\n #print '\\tkw setting yunits', yunits\n self.yaxis.set_units(yunits)\n # If the units being set imply a different converter,\n # we need to update.\n if ydata is not None:\n self.yaxis.update_units(ydata)\n\n def in_axes(self, mouseevent):\n '''\n return *True* if the given *mouseevent* (in display coords)\n is in the Axes\n '''\n return self.patch.contains(mouseevent)[0]\n\n def get_autoscale_on(self):\n \"\"\"\n Get whether autoscaling is applied on plot commands\n \"\"\"\n return self._autoscaleon\n\n def set_autoscale_on(self, b):\n \"\"\"\n Set whether autoscaling is applied on plot commands\n\n accepts: [ *True* | *False* ]\n \"\"\"\n self._autoscaleon = b\n\n def autoscale_view(self, tight=False, scalex=True, scaley=True):\n \"\"\"\n autoscale the view limits using the data limits. You can\n selectively autoscale only a single axis, eg, the xaxis by\n setting *scaley* to *False*. The autoscaling preserves any\n axis direction reversal that has already been done.\n \"\"\"\n # if image data only just use the datalim\n if not self._autoscaleon: return\n if scalex:\n xshared = self._shared_x_axes.get_siblings(self)\n dl = [ax.dataLim for ax in xshared]\n bb = mtransforms.BboxBase.union(dl)\n x0, x1 = bb.intervalx\n if scaley:\n yshared = self._shared_y_axes.get_siblings(self)\n dl = [ax.dataLim for ax in yshared]\n bb = mtransforms.BboxBase.union(dl)\n y0, y1 = bb.intervaly\n if (tight or (len(self.images)>0 and\n len(self.lines)==0 and\n len(self.patches)==0)):\n if scalex:\n self.set_xbound(x0, x1)\n if scaley:\n self.set_ybound(y0, y1)\n return\n\n if scalex:\n XL = self.xaxis.get_major_locator().view_limits(x0, x1)\n self.set_xbound(XL)\n if scaley:\n YL = self.yaxis.get_major_locator().view_limits(y0, y1)\n self.set_ybound(YL)\n\n #### Drawing\n\n def draw(self, renderer=None, inframe=False):\n \"Draw everything (plot lines, axes, labels)\"\n if renderer is None:\n renderer = self._cachedRenderer\n\n if renderer is None:\n raise RuntimeError('No renderer defined')\n if not self.get_visible(): return\n renderer.open_group('axes')\n\n self.apply_aspect()\n\n # the patch draws the background rectangle -- the frame below\n # will draw the edges\n if self.axison and self._frameon:\n self.patch.draw(renderer)\n\n artists = []\n\n\n\n if len(self.images)<=1 or renderer.option_image_nocomposite():\n for im in self.images:\n im.draw(renderer)\n else:\n # make a composite image blending alpha\n # list of (mimage.Image, ox, oy)\n\n mag = renderer.get_image_magnification()\n ims = [(im.make_image(mag),0,0)\n for im in self.images if im.get_visible()]\n\n\n l, b, r, t = self.bbox.extents\n width = mag*((round(r) + 0.5) - (round(l) - 0.5))\n height = mag*((round(t) + 0.5) - (round(b) - 0.5))\n im = mimage.from_images(height,\n width,\n ims)\n\n im.is_grayscale = False\n l, b, w, h = self.bbox.bounds\n # composite images need special args so they will not\n # respect z-order for now\n renderer.draw_image(\n round(l), round(b), im, self.bbox,\n self.patch.get_path(),\n self.patch.get_transform())\n\n artists.extend(self.collections)\n artists.extend(self.patches)\n artists.extend(self.lines)\n artists.extend(self.texts)\n artists.extend(self.artists)\n if self.axison and not inframe:\n if self._axisbelow:\n self.xaxis.set_zorder(0.5)\n self.yaxis.set_zorder(0.5)\n else:\n self.xaxis.set_zorder(2.5)\n self.yaxis.set_zorder(2.5)\n artists.extend([self.xaxis, self.yaxis])\n if not inframe: artists.append(self.title)\n artists.extend(self.tables)\n if self.legend_ is not None:\n artists.append(self.legend_)\n\n # the frame draws the edges around the axes patch -- we\n # decouple these so the patch can be in the background and the\n # frame in the foreground.\n if self.axison and self._frameon:\n artists.append(self.frame)\n\n\n dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)\n if not a.get_animated() ]\n dsu.sort()\n\n for zorder, i, a in dsu:\n a.draw(renderer)\n\n renderer.close_group('axes')\n self._cachedRenderer = renderer\n\n def draw_artist(self, a):\n \"\"\"\n This method can only be used after an initial draw which\n caches the renderer. It is used to efficiently update Axes\n data (axis ticks, labels, etc are not updated)\n \"\"\"\n assert self._cachedRenderer is not None\n a.draw(self._cachedRenderer)\n\n def redraw_in_frame(self):\n \"\"\"\n This method can only be used after an initial draw which\n caches the renderer. It is used to efficiently update Axes\n data (axis ticks, labels, etc are not updated)\n \"\"\"\n assert self._cachedRenderer is not None\n self.draw(self._cachedRenderer, inframe=True)\n\n def get_renderer_cache(self):\n return self._cachedRenderer\n\n def __draw_animate(self):\n # ignore for now; broken\n if self._lastRenderer is None:\n raise RuntimeError('You must first call ax.draw()')\n dsu = [(a.zorder, a) for a in self.animated.keys()]\n dsu.sort()\n renderer = self._lastRenderer\n renderer.blit()\n for tmp, a in dsu:\n a.draw(renderer)\n\n #### Axes rectangle characteristics\n\n def get_frame_on(self):\n \"\"\"\n Get whether the axes rectangle patch is drawn\n \"\"\"\n return self._frameon\n\n def set_frame_on(self, b):\n \"\"\"\n Set whether the axes rectangle patch is drawn\n\n ACCEPTS: [ *True* | *False* ]\n \"\"\"\n self._frameon = b\n\n def get_axisbelow(self):\n \"\"\"\n Get whether axis below is true or not\n \"\"\"\n return self._axisbelow\n\n def set_axisbelow(self, b):\n \"\"\"\n Set whether the axis ticks and gridlines are above or below most artists\n\n ACCEPTS: [ *True* | *False* ]\n \"\"\"\n self._axisbelow = b\n\n def grid(self, b=None, **kwargs):\n \"\"\"\n call signature::\n\n grid(self, b=None, **kwargs)\n\n Set the axes grids on or off; *b* is a boolean\n\n If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If\n *kwargs* are supplied, it is assumed that you want a grid and *b*\n is thus set to *True*\n\n *kawrgs* are used to set the grid line properties, eg::\n\n ax.grid(color='r', linestyle='-', linewidth=2)\n\n Valid :class:`~matplotlib.lines.Line2D` kwargs are\n\n %(Line2D)s\n \"\"\"\n if len(kwargs): b = True\n self.xaxis.grid(b, **kwargs)\n self.yaxis.grid(b, **kwargs)\n grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd\n\n def ticklabel_format(self, **kwargs):\n \"\"\"\n Convenience method for manipulating the ScalarFormatter\n used by default for linear axes.\n\n Optional keyword arguments:\n\n ============ =====================================\n Keyword Description\n ============ =====================================\n *style* [ 'sci' (or 'scientific') | 'plain' ]\n plain turns off scientific notation\n *scilimits* (m, n), pair of integers; if *style*\n is 'sci', scientific notation will\n be used for numbers outside the range\n 10`-m`:sup: to 10`n`:sup:.\n Use (0,0) to include all numbers.\n *axis* [ 'x' | 'y' | 'both' ]\n ============ =====================================\n\n Only the major ticks are affected.\n If the method is called when the\n :class:`~matplotlib.ticker.ScalarFormatter` is not the\n :class:`~matplotlib.ticker.Formatter` being used, an\n :exc:`AttributeError` will be raised.\n\n \"\"\"\n style = kwargs.pop('style', '').lower()\n scilimits = kwargs.pop('scilimits', None)\n if scilimits is not None:\n try:\n m, n = scilimits\n m+n+1 # check that both are numbers\n except (ValueError, TypeError):\n raise ValueError(\"scilimits must be a sequence of 2 integers\")\n axis = kwargs.pop('axis', 'both').lower()\n if style[:3] == 'sci':\n sb = True\n elif style in ['plain', 'comma']:\n sb = False\n if style == 'plain':\n cb = False\n else:\n cb = True\n raise NotImplementedError, \"comma style remains to be added\"\n elif style == '':\n sb = None\n else:\n raise ValueError, \"%s is not a valid style value\"\n try:\n if sb is not None:\n if axis == 'both' or axis == 'x':\n self.xaxis.major.formatter.set_scientific(sb)\n if axis == 'both' or axis == 'y':\n self.yaxis.major.formatter.set_scientific(sb)\n if scilimits is not None:\n if axis == 'both' or axis == 'x':\n self.xaxis.major.formatter.set_powerlimits(scilimits)\n if axis == 'both' or axis == 'y':\n self.yaxis.major.formatter.set_powerlimits(scilimits)\n except AttributeError:\n raise AttributeError(\n \"This method only works with the ScalarFormatter.\")\n\n def set_axis_off(self):\n \"\"\"turn off the axis\"\"\"\n self.axison = False\n\n def set_axis_on(self):\n \"\"\"turn on the axis\"\"\"\n self.axison = True\n\n def get_axis_bgcolor(self):\n 'Return the axis background color'\n return self._axisbg\n\n def set_axis_bgcolor(self, color):\n \"\"\"\n set the axes background color\n\n ACCEPTS: any matplotlib color - see\n :func:`~matplotlib.pyplot.colors`\n \"\"\"\n\n self._axisbg = color\n self.patch.set_facecolor(color)\n\n ### data limits, ticks, tick labels, and formatting\n\n def invert_xaxis(self):\n \"Invert the x-axis.\"\n left, right = self.get_xlim()\n self.set_xlim(right, left)\n\n def xaxis_inverted(self):\n 'Returns True if the x-axis is inverted.'\n left, right = self.get_xlim()\n return right < left\n\n def get_xbound(self):\n \"\"\"\n Returns the x-axis numerical bounds where::\n\n lowerBound < upperBound\n\n \"\"\"\n left, right = self.get_xlim()\n if left < right:\n return left, right\n else:\n return right, left\n\n def set_xbound(self, lower=None, upper=None):\n \"\"\"\n Set the lower and upper numerical bounds of the x-axis.\n This method will honor axes inversion regardless of parameter order.\n \"\"\"\n if upper is None and iterable(lower):\n lower,upper = lower\n\n old_lower,old_upper = self.get_xbound()\n\n if lower is None: lower = old_lower\n if upper is None: upper = old_upper\n\n if self.xaxis_inverted():\n if lower < upper:\n self.set_xlim(upper, lower)\n else:\n self.set_xlim(lower, upper)\n else:\n if lower < upper:\n self.set_xlim(lower, upper)\n else:\n self.set_xlim(upper, lower)\n\n def get_xlim(self):\n \"\"\"\n Get the x-axis range [*xmin*, *xmax*]\n \"\"\"\n return tuple(self.viewLim.intervalx)\n\n def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):\n \"\"\"\n call signature::\n\n set_xlim(self, *args, **kwargs)\n\n Set the limits for the xaxis\n\n Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]\n\n Examples::\n\n set_xlim((valmin, valmax))\n set_xlim(valmin, valmax)\n set_xlim(xmin=1) # xmax unchanged\n set_xlim(xmax=1) # xmin unchanged\n\n Keyword arguments:\n\n *ymin*: scalar\n the min of the ylim\n *ymax*: scalar\n the max of the ylim\n *emit*: [ True | False ]\n notify observers of lim change\n\n ACCEPTS: len(2) sequence of floats\n \"\"\"\n if xmax is None and iterable(xmin):\n xmin,xmax = xmin\n\n\n self._process_unit_info(xdata=(xmin, xmax))\n if xmin is not None:\n xmin = self.convert_xunits(xmin)\n if xmax is not None:\n xmax = self.convert_xunits(xmax)\n\n old_xmin,old_xmax = self.get_xlim()\n if xmin is None: xmin = old_xmin\n if xmax is None: xmax = old_xmax\n\n xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)\n xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)\n\n self.viewLim.intervalx = (xmin, xmax)\n\n if emit:\n self.callbacks.process('xlim_changed', self)\n # Call all of the other x-axes that are shared with this one\n for other in self._shared_x_axes.get_siblings(self):\n if other is not self:\n other.set_xlim(self.viewLim.intervalx, emit=False)\n if (other.figure != self.figure and\n other.figure.canvas is not None):\n other.figure.canvas.draw_idle()\n\n return xmin, xmax\n\n def get_xscale(self):\n 'return the xaxis scale string: %s' % (\n \", \".join(mscale.get_scale_names()))\n return self.xaxis.get_scale()\n\n def set_xscale(self, value, **kwargs):\n \"\"\"\n call signature::\n\n set_xscale(value)\n\n Set the scaling of the x-axis: %(scale)s\n\n ACCEPTS: [%(scale)s]\n\n Different kwargs are accepted, depending on the scale:\n %(scale_docs)s\n \"\"\"\n self.xaxis.set_scale(value, **kwargs)\n self.autoscale_view()\n self._update_transScale()\n\n set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {\n 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),\n 'scale_docs': mscale.get_scale_docs().strip()}\n\n def get_xticks(self, minor=False):\n 'Return the x ticks as a list of locations'\n return self.xaxis.get_ticklocs(minor=minor)\n\n def set_xticks(self, ticks, minor=False):\n \"\"\"\n Set the x ticks with list of *ticks*\n\n ACCEPTS: sequence of floats\n \"\"\"\n return self.xaxis.set_ticks(ticks, minor=minor)\n\n def get_xmajorticklabels(self):\n 'Get the xtick labels as a list of Text instances'\n return cbook.silent_list('Text xticklabel',\n self.xaxis.get_majorticklabels())\n\n def get_xminorticklabels(self):\n 'Get the xtick labels as a list of Text instances'\n return cbook.silent_list('Text xticklabel',\n self.xaxis.get_minorticklabels())\n\n def get_xticklabels(self, minor=False):\n 'Get the xtick labels as a list of Text instances'\n return cbook.silent_list('Text xticklabel',\n self.xaxis.get_ticklabels(minor=minor))\n\n def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):\n \"\"\"\n call signature::\n\n set_xticklabels(labels, fontdict=None, minor=False, **kwargs)\n\n Set the xtick labels with list of strings *labels*. Return a\n list of axis text instances.\n\n *kwargs* set the :class:`~matplotlib.text.Text` properties.\n Valid properties are\n %(Text)s\n\n ACCEPTS: sequence of strings\n \"\"\"\n return self.xaxis.set_ticklabels(labels, fontdict,\n minor=minor, **kwargs)\n set_xticklabels.__doc__ = cbook.dedent(\n set_xticklabels.__doc__) % martist.kwdocd\n\n def invert_yaxis(self):\n \"Invert the y-axis.\"\n left, right = self.get_ylim()\n self.set_ylim(right, left)\n\n def yaxis_inverted(self):\n 'Returns True if the y-axis is inverted.'\n left, right = self.get_ylim()\n return right < left\n\n def get_ybound(self):\n \"Return y-axis numerical bounds in the form of lowerBound < upperBound\"\n left, right = self.get_ylim()\n if left < right:\n return left, right\n else:\n return right, left\n\n def set_ybound(self, lower=None, upper=None):\n \"\"\"Set the lower and upper numerical bounds of the y-axis.\n This method will honor axes inversion regardless of parameter order.\n \"\"\"\n if upper is None and iterable(lower):\n lower,upper = lower\n\n old_lower,old_upper = self.get_ybound()\n\n if lower is None: lower = old_lower\n if upper is None: upper = old_upper\n\n if self.yaxis_inverted():\n if lower < upper:\n self.set_ylim(upper, lower)\n else:\n self.set_ylim(lower, upper)\n else:\n if lower < upper:\n self.set_ylim(lower, upper)\n else:\n self.set_ylim(upper, lower)\n\n def get_ylim(self):\n \"\"\"\n Get the y-axis range [*ymin*, *ymax*]\n \"\"\"\n return tuple(self.viewLim.intervaly)\n\n def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):\n \"\"\"\n call signature::\n\n set_ylim(self, *args, **kwargs):\n\n Set the limits for the yaxis; v = [ymin, ymax]::\n\n set_ylim((valmin, valmax))\n set_ylim(valmin, valmax)\n set_ylim(ymin=1) # ymax unchanged\n set_ylim(ymax=1) # ymin unchanged\n\n Keyword arguments:\n\n *ymin*: scalar\n the min of the ylim\n *ymax*: scalar\n the max of the ylim\n *emit*: [ True | False ]\n notify observers of lim change\n\n Returns the current ylimits as a length 2 tuple\n\n ACCEPTS: len(2) sequence of floats\n \"\"\"\n if ymax is None and iterable(ymin):\n ymin,ymax = ymin\n\n if ymin is not None:\n ymin = self.convert_yunits(ymin)\n if ymax is not None:\n ymax = self.convert_yunits(ymax)\n\n old_ymin,old_ymax = self.get_ylim()\n\n if ymin is None: ymin = old_ymin\n if ymax is None: ymax = old_ymax\n\n ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)\n ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)\n self.viewLim.intervaly = (ymin, ymax)\n\n if emit:\n self.callbacks.process('ylim_changed', self)\n # Call all of the other y-axes that are shared with this one\n for other in self._shared_y_axes.get_siblings(self):\n if other is not self:\n other.set_ylim(self.viewLim.intervaly, emit=False)\n if (other.figure != self.figure and\n other.figure.canvas is not None):\n other.figure.canvas.draw_idle()\n return ymin, ymax\n\n def get_yscale(self):\n 'return the xaxis scale string: %s' % (\n \", \".join(mscale.get_scale_names()))\n return self.yaxis.get_scale()\n\n def set_yscale(self, value, **kwargs):\n \"\"\"\n call signature::\n\n set_yscale(value)\n\n Set the scaling of the y-axis: %(scale)s\n\n ACCEPTS: [%(scale)s]\n\n Different kwargs are accepted, depending on the scale:\n %(scale_docs)s\n \"\"\"\n self.yaxis.set_scale(value, **kwargs)\n self.autoscale_view()\n self._update_transScale()\n\n set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {\n 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),\n 'scale_docs': mscale.get_scale_docs().strip()}\n\n def get_yticks(self, minor=False):\n 'Return the y ticks as a list of locations'\n return self.yaxis.get_ticklocs(minor=minor)\n\n def set_yticks(self, ticks, minor=False):\n \"\"\"\n Set the y ticks with list of *ticks*\n\n ACCEPTS: sequence of floats\n\n Keyword arguments:\n\n *minor*: [ False | True ]\n Sets the minor ticks if True\n \"\"\"\n return self.yaxis.set_ticks(ticks, minor=minor)\n\n def get_ymajorticklabels(self):\n 'Get the xtick labels as a list of Text instances'\n return cbook.silent_list('Text yticklabel',\n self.yaxis.get_majorticklabels())\n\n def get_yminorticklabels(self):\n 'Get the xtick labels as a list of Text instances'\n return cbook.silent_list('Text yticklabel',\n self.yaxis.get_minorticklabels())\n\n def get_yticklabels(self, minor=False):\n 'Get the xtick labels as a list of Text instances'\n return cbook.silent_list('Text yticklabel',\n self.yaxis.get_ticklabels(minor=minor))\n\n def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):\n \"\"\"\n call signature::\n\n set_yticklabels(labels, fontdict=None, minor=False, **kwargs)\n\n Set the ytick labels with list of strings *labels*. Return a list of\n :class:`~matplotlib.text.Text` instances.\n\n *kwargs* set :class:`~matplotlib.text.Text` properties for the labels.\n Valid properties are\n %(Text)s\n\n ACCEPTS: sequence of strings\n \"\"\"\n return self.yaxis.set_ticklabels(labels, fontdict,\n minor=minor, **kwargs)\n set_yticklabels.__doc__ = cbook.dedent(\n set_yticklabels.__doc__) % martist.kwdocd\n\n def xaxis_date(self, tz=None):\n \"\"\"Sets up x-axis ticks and labels that treat the x data as dates.\n\n *tz* is the time zone to use in labeling dates. Defaults to rc value.\n \"\"\"\n\n xmin, xmax = self.dataLim.intervalx\n if xmin==0.:\n # no data has been added - let's set the default datalim.\n # We should probably use a better proxy for the datalim\n # have been updated than the ignore setting\n dmax = today = datetime.date.today()\n dmin = today-datetime.timedelta(days=10)\n self._process_unit_info(xdata=(dmin, dmax))\n dmin, dmax = self.convert_xunits([dmin, dmax])\n self.viewLim.intervalx = dmin, dmax\n self.dataLim.intervalx = dmin, dmax\n\n locator = self.xaxis.get_major_locator()\n if not isinstance(locator, mdates.DateLocator):\n locator = mdates.AutoDateLocator(tz)\n self.xaxis.set_major_locator(locator)\n\n # the autolocator uses the viewlim to pick the right date\n # locator, but it may not have correct viewlim before an\n # autoscale. If the viewlim is still zero..1, set it to the\n # datalim and the autoscaler will update it on request\n if self.viewLim.intervalx[0]==0.:\n self.viewLim.intervalx = tuple(self.dataLim.intervalx)\n locator.refresh()\n\n formatter = self.xaxis.get_major_formatter()\n if not isinstance(formatter, mdates.DateFormatter):\n formatter = mdates.AutoDateFormatter(locator, tz)\n self.xaxis.set_major_formatter(formatter)\n\n def yaxis_date(self, tz=None):\n \"\"\"Sets up y-axis ticks and labels that treat the y data as dates.\n\n *tz* is the time zone to use in labeling dates. Defaults to rc value.\n \"\"\"\n ymin, ymax = self.dataLim.intervaly\n if ymin==0.:\n # no data has been added - let's set the default datalim.\n # We should probably use a better proxy for the datalim\n # have been updated than the ignore setting\n dmax = today = datetime.date.today()\n dmin = today-datetime.timedelta(days=10)\n self._process_unit_info(ydata=(dmin, dmax))\n\n dmin, dmax = self.convert_yunits([dmin, dmax])\n self.viewLim.intervaly = dmin, dmax\n self.dataLim.intervaly = dmin, dmax\n\n\n locator = self.yaxis.get_major_locator()\n if not isinstance(locator, mdates.DateLocator):\n locator = mdates.AutoDateLocator(tz)\n self.yaxis.set_major_locator(locator)\n\n # the autolocator uses the viewlim to pick the right date\n # locator, but it may not have correct viewlim before an\n # autoscale. If the viewlim is still zero..1, set it to the\n # datalim and the autoscaler will update it on request\n if self.viewLim.intervaly[0]==0.:\n self.viewLim.intervaly = tuple(self.dataLim.intervaly)\n locator.refresh()\n\n formatter = self.xaxis.get_major_formatter()\n if not isinstance(formatter, mdates.DateFormatter):\n formatter = mdates.AutoDateFormatter(locator, tz)\n self.yaxis.set_major_formatter(formatter)\n\n def format_xdata(self, x):\n \"\"\"\n Return *x* string formatted. This function will use the attribute\n self.fmt_xdata if it is callable, else will fall back on the xaxis\n major formatter\n \"\"\"\n try: return self.fmt_xdata(x)\n except TypeError:\n func = self.xaxis.get_major_formatter().format_data_short\n val = func(x)\n return val\n\n def format_ydata(self, y):\n \"\"\"\n Return y string formatted. This function will use the\n :attr:`fmt_ydata` attribute if it is callable, else will fall\n back on the yaxis major formatter\n \"\"\"\n try: return self.fmt_ydata(y)\n except TypeError:\n func = self.yaxis.get_major_formatter().format_data_short\n val = func(y)\n return val\n\n def format_coord(self, x, y):\n 'return a format string formatting the *x*, *y* coord'\n if x is None:\n x = '???'\n if y is None:\n y = '???'\n xs = self.format_xdata(x)\n ys = self.format_ydata(y)\n return 'x=%s, y=%s'%(xs,ys)\n\n #### Interactive manipulation\n\n def can_zoom(self):\n \"\"\"\n Return *True* if this axes support the zoom box\n \"\"\"\n return True\n\n def get_navigate(self):\n \"\"\"\n Get whether the axes responds to navigation commands\n \"\"\"\n return self._navigate\n\n def set_navigate(self, b):\n \"\"\"\n Set whether the axes responds to navigation toolbar commands\n\n ACCEPTS: [ True | False ]\n \"\"\"\n self._navigate = b\n\n def get_navigate_mode(self):\n \"\"\"\n Get the navigation toolbar button status: 'PAN', 'ZOOM', or None\n \"\"\"\n return self._navigate_mode\n\n def set_navigate_mode(self, b):\n \"\"\"\n Set the navigation toolbar button status;\n\n .. warning::\n this is not a user-API function.\n\n \"\"\"\n self._navigate_mode = b\n\n def start_pan(self, x, y, button):\n \"\"\"\n Called when a pan operation has started.\n\n *x*, *y* are the mouse coordinates in display coords.\n button is the mouse button number:\n\n * 1: LEFT\n * 2: MIDDLE\n * 3: RIGHT\n\n .. note::\n Intended to be overridden by new projection types.\n \"\"\"\n self._pan_start = cbook.Bunch(\n lim = self.viewLim.frozen(),\n trans = self.transData.frozen(),\n trans_inverse = self.transData.inverted().frozen(),\n bbox = self.bbox.frozen(),\n x = x,\n y = y\n )\n\n def end_pan(self):\n \"\"\"\n Called when a pan operation completes (when the mouse button\n is up.)\n\n .. note::\n Intended to be overridden by new projection types.\n \"\"\"\n del self._pan_start\n\n def drag_pan(self, button, key, x, y):\n \"\"\"\n Called when the mouse moves during a pan operation.\n\n *button* is the mouse button number:\n\n * 1: LEFT\n * 2: MIDDLE\n * 3: RIGHT\n\n *key* is a \"shift\" key\n\n *x*, *y* are the mouse coordinates in display coords.\n\n .. note::\n Intended to be overridden by new projection types.\n \"\"\"\n def format_deltas(key, dx, dy):\n if key=='control':\n if(abs(dx)>abs(dy)):\n dy = dx\n else:\n dx = dy\n elif key=='x':\n dy = 0\n elif key=='y':\n dx = 0\n elif key=='shift':\n if 2*abs(dx) < abs(dy):\n dx=0\n elif 2*abs(dy) < abs(dx):\n dy=0\n elif(abs(dx)>abs(dy)):\n dy=dy/abs(dy)*abs(dx)\n else:\n dx=dx/abs(dx)*abs(dy)\n return (dx,dy)\n\n p = self._pan_start\n dx = x - p.x\n dy = y - p.y\n if dx == 0 and dy == 0:\n return\n if button == 1:\n dx, dy = format_deltas(key, dx, dy)\n result = p.bbox.translated(-dx, -dy) \\\n .transformed(p.trans_inverse)\n elif button == 3:\n try:\n dx = -dx / float(self.bbox.width)\n dy = -dy / float(self.bbox.height)\n dx, dy = format_deltas(key, dx, dy)\n if self.get_aspect() != 'auto':\n dx = 0.5 * (dx + dy)\n dy = dx\n\n alpha = np.power(10.0, (dx, dy))\n start = p.trans_inverse.transform_point((p.x, p.y))\n lim_points = p.lim.get_points()\n result = start + alpha * (lim_points - start)\n result = mtransforms.Bbox(result)\n except OverflowError:\n warnings.warn('Overflow while panning')\n return\n\n self.set_xlim(*result.intervalx)\n self.set_ylim(*result.intervaly)\n\n def get_cursor_props(self):\n \"\"\"\n return the cursor propertiess as a (*linewidth*, *color*)\n tuple, where *linewidth* is a float and *color* is an RGBA\n tuple\n \"\"\"\n return self._cursorProps\n\n def set_cursor_props(self, *args):\n \"\"\"\n Set the cursor property as::\n\n ax.set_cursor_props(linewidth, color)\n\n or::\n\n ax.set_cursor_props((linewidth, color))\n\n ACCEPTS: a (*float*, *color*) tuple\n \"\"\"\n if len(args)==1:\n lw, c = args[0]\n elif len(args)==2:\n lw, c = args\n else:\n raise ValueError('args must be a (linewidth, color) tuple')\n c =mcolors.colorConverter.to_rgba(c)\n self._cursorProps = lw, c\n\n def connect(self, s, func):\n \"\"\"\n Register observers to be notified when certain events occur. Register\n with callback functions with the following signatures. The function\n has the following signature::\n\n func(ax) # where ax is the instance making the callback.\n\n The following events can be connected to:\n\n 'xlim_changed','ylim_changed'\n\n The connection id is is returned - you can use this with\n disconnect to disconnect from the axes event\n\n \"\"\"\n raise DeprecationWarning('use the callbacks CallbackRegistry instance '\n 'instead')\n\n def disconnect(self, cid):\n 'disconnect from the Axes event.'\n raise DeprecationWarning('use the callbacks CallbackRegistry instance '\n 'instead')\n\n def get_children(self):\n 'return a list of child artists'\n children = []\n children.append(self.xaxis)\n children.append(self.yaxis)\n children.extend(self.lines)\n children.extend(self.patches)\n children.extend(self.texts)\n children.extend(self.tables)\n children.extend(self.artists)\n children.extend(self.images)\n if self.legend_ is not None:\n children.append(self.legend_)\n children.extend(self.collections)\n children.append(self.title)\n children.append(self.patch)\n children.append(self.frame)\n return children\n\n def contains(self,mouseevent):\n \"\"\"Test whether the mouse event occured in the axes.\n\n Returns T/F, {}\n \"\"\"\n if callable(self._contains): return self._contains(self,mouseevent)\n\n return self.patch.contains(mouseevent)\n\n def pick(self, *args):\n \"\"\"\n call signature::\n\n pick(mouseevent)\n\n each child artist will fire a pick event if mouseevent is over\n the artist and the artist has picker set\n \"\"\"\n if len(args)>1:\n raise DeprecationWarning('New pick API implemented -- '\n 'see API_CHANGES in the src distribution')\n martist.Artist.pick(self,args[0])\n\n def __pick(self, x, y, trans=None, among=None):\n \"\"\"\n Return the artist under point that is closest to the *x*, *y*.\n If *trans* is *None*, *x*, and *y* are in window coords,\n (0,0 = lower left). Otherwise, *trans* is a\n :class:`~matplotlib.transforms.Transform` that specifies the\n coordinate system of *x*, *y*.\n\n The selection of artists from amongst which the pick function\n finds an artist can be narrowed using the optional keyword\n argument *among*. If provided, this should be either a sequence\n of permitted artists or a function taking an artist as its\n argument and returning a true value if and only if that artist\n can be selected.\n\n Note this algorithm calculates distance to the vertices of the\n polygon, so if you want to pick a patch, click on the edge!\n \"\"\"\n # MGDTODO: Needs updating\n if trans is not None:\n xywin = trans.transform_point((x,y))\n else:\n xywin = x,y\n\n def dist_points(p1, p2):\n 'return the distance between two points'\n x1, y1 = p1\n x2, y2 = p2\n return math.sqrt((x1-x2)**2+(y1-y2)**2)\n\n def dist_x_y(p1, x, y):\n '*x* and *y* are arrays; return the distance to the closest point'\n x1, y1 = p1\n return min(np.sqrt((x-x1)**2+(y-y1)**2))\n\n def dist(a):\n if isinstance(a, Text):\n bbox = a.get_window_extent()\n l,b,w,h = bbox.bounds\n verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)\n xt, yt = zip(*verts)\n elif isinstance(a, Patch):\n path = a.get_path()\n tverts = a.get_transform().transform_path(path)\n xt, yt = zip(*tverts)\n elif isinstance(a, mlines.Line2D):\n xdata = a.get_xdata(orig=False)\n ydata = a.get_ydata(orig=False)\n xt, yt = a.get_transform().numerix_x_y(xdata, ydata)\n\n return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))\n\n artists = self.lines + self.patches + self.texts\n if callable(among):\n artists = filter(test, artists)\n elif iterable(among):\n amongd = dict([(k,1) for k in among])\n artists = [a for a in artists if a in amongd]\n elif among is None:\n pass\n else:\n raise ValueError('among must be callable or iterable')\n if not len(artists): return None\n ds = [ (dist(a),a) for a in artists]\n ds.sort()\n return ds[0][1]\n\n #### Labelling\n\n def get_title(self):\n \"\"\"\n Get the title text string.\n \"\"\"\n return self.title.get_text()\n\n def set_title(self, label, fontdict=None, **kwargs):\n \"\"\"\n call signature::\n\n set_title(label, fontdict=None, **kwargs):\n\n Set the title for the axes.\n\n kwargs are Text properties:\n %(Text)s\n\n ACCEPTS: str\n\n .. seealso::\n :meth:`text`:\n for information on how override and the optional args work\n \"\"\"\n default = {\n 'fontsize':rcParams['axes.titlesize'],\n 'verticalalignment' : 'bottom',\n 'horizontalalignment' : 'center'\n }\n\n self.title.set_text(label)\n self.title.update(default)\n if fontdict is not None: self.title.update(fontdict)\n self.title.update(kwargs)\n return self.title\n set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd\n\n def get_xlabel(self):\n \"\"\"\n Get the xlabel text string.\n \"\"\"\n label = self.xaxis.get_label()\n return label.get_text()\n\n def set_xlabel(self, xlabel, fontdict=None, **kwargs):\n \"\"\"\n call signature::\n\n set_xlabel(xlabel, fontdict=None, **kwargs)\n\n Set the label for the xaxis.\n\n Valid kwargs are Text properties:\n %(Text)s\n ACCEPTS: str\n\n .. seealso::\n :meth:`text`:\n for information on how override and the optional args work\n \"\"\"\n\n label = self.xaxis.get_label()\n label.set_text(xlabel)\n if fontdict is not None: label.update(fontdict)\n label.update(kwargs)\n return label\n set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd\n\n def get_ylabel(self):\n \"\"\"\n Get the ylabel text string.\n \"\"\"\n label = self.yaxis.get_label()\n return label.get_text()\n\n def set_ylabel(self, ylabel, fontdict=None, **kwargs):\n \"\"\"\n call signature::\n\n set_ylabel(ylabel, fontdict=None, **kwargs)\n\n Set the label for the yaxis\n\n Valid kwargs are Text properties:\n %(Text)s\n ACCEPTS: str\n\n .. seealso::\n :meth:`text`:\n for information on how override and the optional args work\n \"\"\"\n label = self.yaxis.get_label()\n label.set_text(ylabel)\n if fontdict is not None: label.update(fontdict)\n label.update(kwargs)\n return label\n set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd\n\n def text(self, x, y, s, fontdict=None,\n withdash=False, **kwargs):\n \"\"\"\n call signature::\n\n text(x, y, s, fontdict=None, **kwargs)\n\n Add text in string *s* to axis at location *x*, *y*, data\n coordinates.\n\n Keyword arguments:\n\n *fontdict*:\n A dictionary to override the default text properties.\n If *fontdict* is *None*, the defaults are determined by your rc\n parameters.\n\n *withdash*: [ False | True ]\n Creates a :class:`~matplotlib.text.TextWithDash` instance\n instead of a :class:`~matplotlib.text.Text` instance.\n\n Individual keyword arguments can be used to override any given\n parameter::\n\n text(x, y, s, fontsize=12)\n\n The default transform specifies that text is in data coords,\n alternatively, you can specify text in axis coords (0,0 is\n lower-left and 1,1 is upper-right). The example below places\n text in the center of the axes::\n\n text(0.5, 0.5,'matplotlib',\n horizontalalignment='center',\n verticalalignment='center',\n transform = ax.transAxes)\n\n You can put a rectangular box around the text instance (eg. to\n set a background color) by using the keyword *bbox*. *bbox* is\n a dictionary of :class:`matplotlib.patches.Rectangle`\n properties. For example::\n\n text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))\n\n Valid kwargs are :class:`matplotlib.text.Text` properties:\n\n %(Text)s\n \"\"\"\n default = {\n 'verticalalignment' : 'bottom',\n 'horizontalalignment' : 'left',\n #'verticalalignment' : 'top',\n 'transform' : self.transData,\n }\n\n # At some point if we feel confident that TextWithDash\n # is robust as a drop-in replacement for Text and that\n # the performance impact of the heavier-weight class\n # isn't too significant, it may make sense to eliminate\n # the withdash kwarg and simply delegate whether there's\n # a dash to TextWithDash and dashlength.\n if withdash:\n t = mtext.TextWithDash(\n x=x, y=y, text=s,\n )\n else:\n t = mtext.Text(\n x=x, y=y, text=s,\n )\n self._set_artist_props(t)\n\n t.update(default)\n if fontdict is not None: t.update(fontdict)\n t.update(kwargs)\n self.texts.append(t)\n t._remove_method = lambda h: self.texts.remove(h)\n\n\n #if t.get_clip_on(): t.set_clip_box(self.bbox)\n if 'clip_on' in kwargs: t.set_clip_box(self.bbox)\n return t\n text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd\n\n def annotate(self, *args, **kwargs):\n \"\"\"\n call signature::\n\n annotate(s, xy, xytext=None, xycoords='data',\n textcoords='data', arrowprops=None, **kwargs)\n\n Keyword arguments:\n\n %(Annotation)s\n\n .. plot:: mpl_examples/pylab_examples/annotation_demo2.py\n \"\"\"\n a = mtext.Annotation(*args, **kwargs)\n a.set_transform(mtransforms.IdentityTransform())\n self._set_artist_props(a)\n if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)\n self.texts.append(a)\n return a\n annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd\n\n #### Lines and spans\n\n def axhline(self, y=0, xmin=0, xmax=1, **kwargs):\n \"\"\"\n call signature::\n\n axhline(y=0, xmin=0, xmax=1, **kwargs)\n\n Axis Horizontal Line\n\n Draw a horizontal line at *y* from *xmin* to *xmax*. With the\n default values of *xmin* = 0 and *xmax* = 1, this line will\n always span the horizontal extent of the axes, regardless of\n the xlim settings, even if you change them, eg. with the\n :meth:`set_xlim` command. That is, the horizontal extent is\n in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*\n location is in data coordinates.\n\n Return value is the :class:`~matplotlib.lines.Line2D`\n instance. kwargs are the same as kwargs to plot, and can be\n used to control the line properties. Eg.,\n\n * draw a thick red hline at *y* = 0 that spans the xrange\n\n >>> axhline(linewidth=4, color='r')\n\n * draw a default hline at *y* = 1 that spans the xrange\n\n >>> axhline(y=1)\n\n * draw a default hline at *y* = .5 that spans the the middle half of\n the xrange\n\n >>> axhline(y=.5, xmin=0.25, xmax=0.75)\n\n Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n .. seealso::\n :meth:`axhspan`:\n for example plot and source code\n \"\"\"\n\n ymin, ymax = self.get_ybound()\n\n # We need to strip away the units for comparison with\n # non-unitized bounds\n yy = self.convert_yunits( y )\n scaley = (yyymax)\n\n trans = mtransforms.blended_transform_factory(\n self.transAxes, self.transData)\n l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)\n l.x_isdata = False\n self.add_line(l)\n self.autoscale_view(scalex=False, scaley=scaley)\n return l\n\n axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd\n\n def axvline(self, x=0, ymin=0, ymax=1, **kwargs):\n \"\"\"\n call signature::\n\n axvline(x=0, ymin=0, ymax=1, **kwargs)\n\n Axis Vertical Line\n\n Draw a vertical line at *x* from *ymin* to *ymax*. With the\n default values of *ymin* = 0 and *ymax* = 1, this line will\n always span the vertical extent of the axes, regardless of the\n xlim settings, even if you change them, eg. with the\n :meth:`set_xlim` command. That is, the vertical extent is in\n axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location\n is in data coordinates.\n\n Return value is the :class:`~matplotlib.lines.Line2D`\n instance. kwargs are the same as kwargs to plot, and can be\n used to control the line properties. Eg.,\n\n * draw a thick red vline at *x* = 0 that spans the yrange\n\n >>> axvline(linewidth=4, color='r')\n\n * draw a default vline at *x* = 1 that spans the yrange\n\n >>> axvline(x=1)\n\n * draw a default vline at *x* = .5 that spans the the middle half of\n the yrange\n\n >>> axvline(x=.5, ymin=0.25, ymax=0.75)\n\n Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n .. seealso::\n :meth:`axhspan`:\n for example plot and source code\n \"\"\"\n\n xmin, xmax = self.get_xbound()\n\n # We need to strip away the units for comparison with\n # non-unitized bounds\n xx = self.convert_xunits( x )\n scalex = (xxxmax)\n\n trans = mtransforms.blended_transform_factory(\n self.transData, self.transAxes)\n l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)\n l.y_isdata = False\n self.add_line(l)\n self.autoscale_view(scalex=scalex, scaley=False)\n return l\n\n axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd\n\n def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):\n \"\"\"\n call signature::\n\n axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)\n\n Axis Horizontal Span.\n\n *y* coords are in data units and *x* coords are in axes (relative\n 0-1) units.\n\n Draw a horizontal span (rectangle) from *ymin* to *ymax*.\n With the default values of *xmin* = 0 and *xmax* = 1, this\n always spans the xrange, regardless of the xlim settings, even\n if you change them, eg. with the :meth:`set_xlim` command.\n That is, the horizontal extent is in axes coords: 0=left,\n 0.5=middle, 1.0=right but the *y* location is in data\n coordinates.\n\n Return value is a :class:`matplotlib.patches.Polygon`\n instance.\n\n Examples:\n\n * draw a gray rectangle from *y* = 0.25-0.75 that spans the\n horizontal extent of the axes\n\n >>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)\n\n Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:\n\n %(Polygon)s\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/axhspan_demo.py\n\n \"\"\"\n trans = mtransforms.blended_transform_factory(\n self.transAxes, self.transData)\n\n # process the unit information\n self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )\n\n # first we need to strip away the units\n xmin, xmax = self.convert_xunits( [xmin, xmax] )\n ymin, ymax = self.convert_yunits( [ymin, ymax] )\n\n verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)\n p = mpatches.Polygon(verts, **kwargs)\n p.set_transform(trans)\n p.x_isdata = False\n self.add_patch(p)\n return p\n axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd\n\n def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):\n \"\"\"\n call signature::\n\n axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)\n\n Axis Vertical Span.\n\n *x* coords are in data units and *y* coords are in axes (relative\n 0-1) units.\n\n Draw a vertical span (rectangle) from *xmin* to *xmax*. With\n the default values of *ymin* = 0 and *ymax* = 1, this always\n spans the yrange, regardless of the ylim settings, even if you\n change them, eg. with the :meth:`set_ylim` command. That is,\n the vertical extent is in axes coords: 0=bottom, 0.5=middle,\n 1.0=top but the *y* location is in data coordinates.\n\n Return value is the :class:`matplotlib.patches.Polygon`\n instance.\n\n Examples:\n\n * draw a vertical green translucent rectangle from x=1.25 to 1.55 that\n spans the yrange of the axes\n\n >>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)\n\n Valid kwargs are :class:`~matplotlib.patches.Polygon`\n properties:\n\n %(Polygon)s\n\n .. seealso::\n :meth:`axhspan`:\n for example plot and source code\n \"\"\"\n trans = mtransforms.blended_transform_factory(\n self.transData, self.transAxes)\n\n # process the unit information\n self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )\n\n # first we need to strip away the units\n xmin, xmax = self.convert_xunits( [xmin, xmax] )\n ymin, ymax = self.convert_yunits( [ymin, ymax] )\n\n verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]\n p = mpatches.Polygon(verts, **kwargs)\n p.set_transform(trans)\n p.y_isdata = False\n self.add_patch(p)\n return p\n axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd\n\n\n def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',\n label='', **kwargs):\n \"\"\"\n call signature::\n\n hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)\n\n Plot horizontal lines at each *y* from *xmin* to *xmax*.\n\n Returns the :class:`~matplotlib.collections.LineCollection`\n that was added.\n\n Required arguments:\n\n *y*:\n a 1-D numpy array or iterable.\n\n *xmin* and *xmax*:\n can be scalars or ``len(x)`` numpy arrays. If they are\n scalars, then the respective values are constant, else the\n widths of the lines are determined by *xmin* and *xmax*.\n\n Optional keyword arguments:\n\n *colors*:\n a line collections color argument, either a single color\n or a ``len(y)`` list of colors\n\n *linestyles*:\n [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/hline_demo.py\n \"\"\"\n if kwargs.get('fmt') is not None:\n raise DeprecationWarning('hlines now uses a '\n 'collections.LineCollection and not a '\n 'list of Line2D to draw; see API_CHANGES')\n\n # We do the conversion first since not all unitized data is uniform\n y = self.convert_yunits( y )\n xmin = self.convert_xunits( xmin )\n xmax = self.convert_xunits( xmax )\n\n if not iterable(y): y = [y]\n if not iterable(xmin): xmin = [xmin]\n if not iterable(xmax): xmax = [xmax]\n\n y = np.asarray(y)\n xmin = np.asarray(xmin)\n xmax = np.asarray(xmax)\n\n if len(xmin)==1:\n xmin = np.resize( xmin, y.shape )\n if len(xmax)==1:\n xmax = np.resize( xmax, y.shape )\n\n if len(xmin)!=len(y):\n raise ValueError, 'xmin and y are unequal sized sequences'\n if len(xmax)!=len(y):\n raise ValueError, 'xmax and y are unequal sized sequences'\n\n verts = [ ((thisxmin, thisy), (thisxmax, thisy))\n for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]\n coll = mcoll.LineCollection(verts, colors=colors,\n linestyles=linestyles, label=label)\n self.add_collection(coll)\n coll.update(kwargs)\n\n minx = min(xmin.min(), xmax.min())\n maxx = max(xmin.max(), xmax.max())\n miny = y.min()\n maxy = y.max()\n\n corners = (minx, miny), (maxx, maxy)\n\n self.update_datalim(corners)\n self.autoscale_view()\n\n\n return coll\n hlines.__doc__ = cbook.dedent(hlines.__doc__)\n\n def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',\n label='', **kwargs):\n \"\"\"\n call signature::\n\n vlines(x, ymin, ymax, color='k', linestyles='solid')\n\n Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*\n or *ymax* can be scalars or len(*x*) numpy arrays. If they are\n scalars, then the respective values are constant, else the\n heights of the lines are determined by *ymin* and *ymax*.\n\n *colors*\n a line collections color args, either a single color\n or a len(*x*) list of colors\n\n *linestyles*\n\n one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n\n Returns the :class:`matplotlib.collections.LineCollection`\n that was added.\n\n kwargs are :class:`~matplotlib.collections.LineCollection` properties:\n\n %(LineCollection)s\n \"\"\"\n\n if kwargs.get('fmt') is not None:\n raise DeprecationWarning('vlines now uses a '\n 'collections.LineCollection and not a '\n 'list of Line2D to draw; see API_CHANGES')\n\n self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)\n\n # We do the conversion first since not all unitized data is uniform\n x = self.convert_xunits( x )\n ymin = self.convert_yunits( ymin )\n ymax = self.convert_yunits( ymax )\n\n if not iterable(x): x = [x]\n if not iterable(ymin): ymin = [ymin]\n if not iterable(ymax): ymax = [ymax]\n\n x = np.asarray(x)\n ymin = np.asarray(ymin)\n ymax = np.asarray(ymax)\n if len(ymin)==1:\n ymin = np.resize( ymin, x.shape )\n if len(ymax)==1:\n ymax = np.resize( ymax, x.shape )\n\n if len(ymin)!=len(x):\n raise ValueError, 'ymin and x are unequal sized sequences'\n if len(ymax)!=len(x):\n raise ValueError, 'ymax and x are unequal sized sequences'\n\n Y = np.array([ymin, ymax]).T\n\n verts = [ ((thisx, thisymin), (thisx, thisymax))\n for thisx, (thisymin, thisymax) in zip(x,Y)]\n #print 'creating line collection'\n coll = mcoll.LineCollection(verts, colors=colors,\n linestyles=linestyles, label=label)\n self.add_collection(coll)\n coll.update(kwargs)\n\n minx = min( x )\n maxx = max( x )\n\n miny = min( min(ymin), min(ymax) )\n maxy = max( max(ymin), max(ymax) )\n\n corners = (minx, miny), (maxx, maxy)\n self.update_datalim(corners)\n self.autoscale_view()\n\n return coll\n vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd\n\n #### Basic plotting\n def plot(self, *args, **kwargs):\n \"\"\"\n Plot lines and/or markers to the\n :class:`~matplotlib.axes.Axes`. *args* is a variable length\n argument, allowing for multiple *x*, *y* pairs with an\n optional format string. For example, each of the following is\n legal::\n\n plot(x, y) # plot x and y using default line style and color\n plot(x, y, 'bo') # plot x and y using blue circle markers\n plot(y) # plot y using x as index array 0..N-1\n plot(y, 'r+') # ditto, but with red plusses\n\n If *x* and/or *y* is 2-dimensional, then the corresponding columns\n will be plotted.\n\n An arbitrary number of *x*, *y*, *fmt* groups can be\n specified, as in::\n\n a.plot(x1, y1, 'g^', x2, y2, 'g-')\n\n Return value is a list of lines that were added.\n\n The following format string characters are accepted to control\n the line style or marker:\n\n ================ ===============================\n character description\n ================ ===============================\n '-' solid line style\n '--' dashed line style\n '-.' dash-dot line style\n ':' dotted line style\n '.' point marker\n ',' pixel marker\n 'o' circle marker\n 'v' triangle_down marker\n '^' triangle_up marker\n '<' triangle_left marker\n '>' triangle_right marker\n '1' tri_down marker\n '2' tri_up marker\n '3' tri_left marker\n '4' tri_right marker\n 's' square marker\n 'p' pentagon marker\n '*' star marker\n 'h' hexagon1 marker\n 'H' hexagon2 marker\n '+' plus marker\n 'x' x marker\n 'D' diamond marker\n 'd' thin_diamond marker\n '|' vline marker\n '_' hline marker\n ================ ===============================\n\n\n The following color abbreviations are supported:\n\n ========== ========\n character color\n ========== ========\n 'b' blue\n 'g' green\n 'r' red\n 'c' cyan\n 'm' magenta\n 'y' yellow\n 'k' black\n 'w' white\n ========== ========\n\n In addition, you can specify colors in many weird and\n wonderful ways, including full names (``'green'``), hex\n strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or\n grayscale intensities as a string (``'0.8'``). Of these, the\n string specifications can be used in place of a ``fmt`` group,\n but the tuple forms can be used only as ``kwargs``.\n\n Line styles and colors are combined in a single format string, as in\n ``'bo'`` for blue circles.\n\n The *kwargs* can be used to set line properties (any property that has\n a ``set_*`` method). You can use this to set a line label (for auto\n legends), linewidth, anitialising, marker face color, etc. Here is an\n example::\n\n plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)\n plot([1,2,3], [1,4,9], 'rs', label='line 2')\n axis([0, 4, 0, 10])\n legend()\n\n If you make multiple lines with one plot command, the kwargs\n apply to all those lines, e.g.::\n\n plot(x1, y1, x2, y2, antialised=False)\n\n Neither line will be antialiased.\n\n You do not need to use format strings, which are just\n abbreviations. All of the line properties can be controlled\n by keyword arguments. For example, you can set the color,\n marker, linestyle, and markercolor with::\n\n plot(x, y, color='green', linestyle='dashed', marker='o',\n markerfacecolor='blue', markersize=12). See\n :class:`~matplotlib.lines.Line2D` for details.\n\n The kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n kwargs *scalex* and *scaley*, if defined, are passed on to\n :meth:`~matplotlib.axes.Axes.autoscale_view` to determine\n whether the *x* and *y* axes are autoscaled; the default is\n *True*.\n \"\"\"\n scalex = kwargs.pop( 'scalex', True)\n scaley = kwargs.pop( 'scaley', True)\n\n if not self._hold: self.cla()\n lines = []\n\n for line in self._get_lines(*args, **kwargs):\n self.add_line(line)\n lines.append(line)\n\n\n self.autoscale_view(scalex=scalex, scaley=scaley)\n return lines\n\n plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd\n\n def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,\n **kwargs):\n \"\"\"\n call signature::\n\n plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)\n\n Similar to the :func:`~matplotlib.pyplot.plot` command, except\n the *x* or *y* (or both) data is considered to be dates, and the\n axis is labeled accordingly.\n\n *x* and/or *y* can be a sequence of dates represented as float\n days since 0001-01-01 UTC.\n\n Keyword arguments:\n\n *fmt*: string\n The plot format string.\n\n *tz*: [ None | timezone string ]\n The time zone to use in labeling dates. If *None*, defaults to rc\n value.\n\n *xdate*: [ True | False ]\n If *True*, the *x*-axis will be labeled with dates.\n\n *ydate*: [ False | True ]\n If *True*, the *y*-axis will be labeled with dates.\n\n Note if you are using custom date tickers and formatters, it\n may be necessary to set the formatters/locators after the call\n to :meth:`plot_date` since :meth:`plot_date` will set the\n default tick locator to\n :class:`matplotlib.ticker.AutoDateLocator` (if the tick\n locator is not already set to a\n :class:`matplotlib.ticker.DateLocator` instance) and the\n default tick formatter to\n :class:`matplotlib.ticker.AutoDateFormatter` (if the tick\n formatter is not already set to a\n :class:`matplotlib.ticker.DateFormatter` instance).\n\n Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n .. seealso::\n :mod:`~matplotlib.dates`:\n for helper functions\n\n :func:`~matplotlib.dates.date2num`,\n :func:`~matplotlib.dates.num2date` and\n :func:`~matplotlib.dates.drange`:\n for help on creating the required floating point\n dates.\n \"\"\"\n\n if not self._hold: self.cla()\n\n ret = self.plot(x, y, fmt, **kwargs)\n\n if xdate:\n self.xaxis_date(tz)\n if ydate:\n self.yaxis_date(tz)\n\n self.autoscale_view()\n\n return ret\n plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd\n\n\n def loglog(self, *args, **kwargs):\n \"\"\"\n call signature::\n\n loglog(*args, **kwargs)\n\n Make a plot with log scaling on the *x* and *y* axis.\n\n :func:`~matplotlib.pyplot.loglog` supports all the keyword\n arguments of :func:`~matplotlib.pyplot.plot` and\n :meth:`matplotlib.axes.Axes.set_xscale` /\n :meth:`matplotlib.axes.Axes.set_yscale`.\n\n Notable keyword arguments:\n\n *basex*/*basey*: scalar > 1\n base of the *x*/*y* logarithm\n\n *subsx*/*subsy*: [ None | sequence ]\n the location of the minor *x*/*y* ticks; *None* defaults\n to autosubs, which depend on the number of decades in the\n plot; see :meth:`matplotlib.axes.Axes.set_xscale` /\n :meth:`matplotlib.axes.Axes.set_yscale` for details\n\n The remaining valid kwargs are\n :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/log_demo.py\n\n \"\"\"\n if not self._hold: self.cla()\n\n dx = {'basex': kwargs.pop('basex', 10),\n 'subsx': kwargs.pop('subsx', None),\n }\n dy = {'basey': kwargs.pop('basey', 10),\n 'subsy': kwargs.pop('subsy', None),\n }\n\n self.set_xscale('log', **dx)\n self.set_yscale('log', **dy)\n\n b = self._hold\n self._hold = True # we've already processed the hold\n l = self.plot(*args, **kwargs)\n self._hold = b # restore the hold\n\n return l\n loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd\n\n def semilogx(self, *args, **kwargs):\n \"\"\"\n call signature::\n\n semilogx(*args, **kwargs)\n\n Make a plot with log scaling on the *x* axis.\n\n :func:`semilogx` supports all the keyword arguments of\n :func:`~matplotlib.pyplot.plot` and\n :meth:`matplotlib.axes.Axes.set_xscale`.\n\n Notable keyword arguments:\n\n *basex*: scalar > 1\n base of the *x* logarithm\n\n *subsx*: [ None | sequence ]\n The location of the minor xticks; *None* defaults to\n autosubs, which depend on the number of decades in the\n plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for\n details.\n\n The remaining valid kwargs are\n :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n .. seealso::\n :meth:`loglog`:\n For example code and figure\n \"\"\"\n if not self._hold: self.cla()\n d = {'basex': kwargs.pop( 'basex', 10),\n 'subsx': kwargs.pop( 'subsx', None),\n }\n\n self.set_xscale('log', **d)\n b = self._hold\n self._hold = True # we've already processed the hold\n l = self.plot(*args, **kwargs)\n self._hold = b # restore the hold\n return l\n semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd\n\n def semilogy(self, *args, **kwargs):\n \"\"\"\n call signature::\n\n semilogy(*args, **kwargs)\n\n Make a plot with log scaling on the *y* axis.\n\n :func:`semilogy` supports all the keyword arguments of\n :func:`~matplotlib.pylab.plot` and\n :meth:`matplotlib.axes.Axes.set_yscale`.\n\n Notable keyword arguments:\n\n *basey*: scalar > 1\n Base of the *y* logarithm\n\n *subsy*: [ None | sequence ]\n The location of the minor yticks; *None* defaults to\n autosubs, which depend on the number of decades in the\n plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for\n details.\n\n The remaining valid kwargs are\n :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n .. seealso::\n :meth:`loglog`:\n For example code and figure\n \"\"\"\n if not self._hold: self.cla()\n d = {'basey': kwargs.pop('basey', 10),\n 'subsy': kwargs.pop('subsy', None),\n }\n self.set_yscale('log', **d)\n b = self._hold\n self._hold = True # we've already processed the hold\n l = self.plot(*args, **kwargs)\n self._hold = b # restore the hold\n\n return l\n semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd\n\n def acorr(self, x, **kwargs):\n \"\"\"\n call signature::\n\n acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,\n maxlags=None, **kwargs)\n\n Plot the autocorrelation of *x*. If *normed* = *True*,\n normalize the data by the autocorrelation at 0-th lag. *x* is\n detrended by the *detrend* callable (default no normalization).\n\n Data are plotted as ``plot(lags, c, **kwargs)``\n\n Return value is a tuple (*lags*, *c*, *line*) where:\n\n - *lags* are a length 2*maxlags+1 lag vector\n\n - *c* is the 2*maxlags+1 auto correlation vector\n\n - *line* is a :class:`~matplotlib.lines.Line2D` instance\n returned by :meth:`plot`\n\n The default *linestyle* is None and the default *marker* is\n ``'o'``, though these can be overridden with keyword args.\n The cross correlation is performed with\n :func:`numpy.correlate` with *mode* = 2.\n\n If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`\n rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw\n vertical lines from the origin to the acorr. Otherwise, the\n plot style is determined by the kwargs, which are\n :class:`~matplotlib.lines.Line2D` properties.\n\n *maxlags* is a positive integer detailing the number of lags\n to show. The default value of *None* will return all\n :math:`2 \\mathrm{len}(x) - 1` lags.\n\n The return value is a tuple (*lags*, *c*, *linecol*, *b*)\n where\n\n - *linecol* is the\n :class:`~matplotlib.collections.LineCollection`\n\n - *b* is the *x*-axis.\n\n .. seealso::\n :meth:`~matplotlib.axes.Axes.plot` or\n :meth:`~matplotlib.axes.Axes.vlines`: For documentation on\n valid kwargs.\n\n **Example:**\n\n :func:`~matplotlib.pyplot.xcorr` above, and\n :func:`~matplotlib.pyplot.acorr` below.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/xcorr_demo.py\n \"\"\"\n return self.xcorr(x, x, **kwargs)\n acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd\n\n def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,\n usevlines=False, maxlags=None, **kwargs):\n \"\"\"\n call signature::\n\n xcorr(x, y, normed=False, detrend=mlab.detrend_none,\n usevlines=False, **kwargs):\n\n Plot the cross correlation between *x* and *y*. If *normed* =\n *True*, normalize the data by the cross correlation at 0-th\n lag. *x* and y are detrended by the *detrend* callable\n (default no normalization). *x* and *y* must be equal length.\n\n Data are plotted as ``plot(lags, c, **kwargs)``\n\n Return value is a tuple (*lags*, *c*, *line*) where:\n\n - *lags* are a length ``2*maxlags+1`` lag vector\n\n - *c* is the ``2*maxlags+1`` auto correlation vector\n\n - *line* is a :class:`~matplotlib.lines.Line2D` instance\n returned by :func:`~matplotlib.pyplot.plot`.\n\n The default *linestyle* is *None* and the default *marker* is\n 'o', though these can be overridden with keyword args. The\n cross correlation is performed with :func:`numpy.correlate`\n with *mode* = 2.\n\n If *usevlines* is *True*:\n\n :func:`~matplotlib.pyplot.vlines`\n rather than :func:`~matplotlib.pyplot.plot` is used to draw\n vertical lines from the origin to the xcorr. Otherwise the\n plotstyle is determined by the kwargs, which are\n :class:`~matplotlib.lines.Line2D` properties.\n\n The return value is a tuple (*lags*, *c*, *linecol*, *b*)\n where *linecol* is the\n :class:`matplotlib.collections.LineCollection` instance and\n *b* is the *x*-axis.\n\n *maxlags* is a positive integer detailing the number of lags to show.\n The default value of *None* will return all ``(2*len(x)-1)`` lags.\n\n **Example:**\n\n :func:`~matplotlib.pyplot.xcorr` above, and\n :func:`~matplotlib.pyplot.acorr` below.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/xcorr_demo.py\n \"\"\"\n\n Nx = len(x)\n if Nx!=len(y):\n raise ValueError('x and y must be equal length')\n\n x = detrend(np.asarray(x))\n y = detrend(np.asarray(y))\n\n c = np.correlate(x, y, mode=2)\n\n if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))\n\n if maxlags is None: maxlags = Nx - 1\n\n if maxlags >= Nx or maxlags < 1:\n raise ValueError('maglags must be None or strictly '\n 'positive < %d'%Nx)\n\n lags = np.arange(-maxlags,maxlags+1)\n c = c[Nx-1-maxlags:Nx+maxlags]\n\n\n if usevlines:\n a = self.vlines(lags, [0], c, **kwargs)\n b = self.axhline(**kwargs)\n else:\n\n kwargs.setdefault('marker', 'o')\n kwargs.setdefault('linestyle', 'None')\n a, = self.plot(lags, c, **kwargs)\n b = None\n return lags, c, a, b\n xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd\n\n def legend(self, *args, **kwargs):\n \"\"\"\n call signature::\n\n legend(*args, **kwargs)\n\n Place a legend on the current axes at location *loc*. Labels are a\n sequence of strings and *loc* can be a string or an integer specifying\n the legend location.\n\n To make a legend with existing lines::\n\n legend()\n\n :meth:`legend` by itself will try and build a legend using the label\n property of the lines/patches/collections. You can set the label of\n a line by doing::\n\n plot(x, y, label='my data')\n\n or::\n\n line.set_label('my data').\n\n If label is set to '_nolegend_', the item will not be shown in\n legend.\n\n To automatically generate the legend from labels::\n\n legend( ('label1', 'label2', 'label3') )\n\n To make a legend for a list of lines and labels::\n\n legend( (line1, line2, line3), ('label1', 'label2', 'label3') )\n\n To make a legend at a given location, using a location argument::\n\n legend( ('label1', 'label2', 'label3'), loc='upper left')\n\n or::\n\n legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)\n\n The location codes are\n\n =============== =============\n Location String Location Code\n =============== =============\n 'best' 0\n 'upper right' 1\n 'upper left' 2\n 'lower left' 3\n 'lower right' 4\n 'right' 5\n 'center left' 6\n 'center right' 7\n 'lower center' 8\n 'upper center' 9\n 'center' 10\n =============== =============\n\n If none of these are locations are suitable, loc can be a 2-tuple\n giving x,y in axes coords, ie::\n\n loc = 0, 1 # left top\n loc = 0.5, 0.5 # center\n\n Keyword arguments:\n\n *isaxes*: [ True | False ]\n Indicates that this is an axes legend\n\n *numpoints*: integer\n The number of points in the legend line, default is 4\n\n *prop*: [ None | FontProperties ]\n A :class:`matplotlib.font_manager.FontProperties`\n instance, or *None* to use rc settings.\n\n *pad*: [ None | scalar ]\n The fractional whitespace inside the legend border, between 0 and 1.\n If *None*, use rc settings.\n\n *markerscale*: [ None | scalar ]\n The relative size of legend markers vs. original. If *None*, use rc\n settings.\n\n *shadow*: [ None | False | True ]\n If *True*, draw a shadow behind legend. If *None*, use rc settings.\n\n *labelsep*: [ None | scalar ]\n The vertical space between the legend entries. If *None*, use rc\n settings.\n\n *handlelen*: [ None | scalar ]\n The length of the legend lines. If *None*, use rc settings.\n\n *handletextsep*: [ None | scalar ]\n The space between the legend line and legend text. If *None*, use rc\n settings.\n\n *axespad*: [ None | scalar ]\n The border between the axes and legend edge. If *None*, use rc\n settings.\n\n **Example:**\n\n .. plot:: mpl_examples/api/legend_demo.py\n \"\"\"\n\n def get_handles():\n handles = self.lines[:]\n handles.extend(self.patches)\n handles.extend([c for c in self.collections\n if isinstance(c, mcoll.LineCollection)])\n handles.extend([c for c in self.collections\n if isinstance(c, mcoll.RegularPolyCollection)])\n return handles\n\n if len(args)==0:\n handles = []\n labels = []\n for handle in get_handles():\n label = handle.get_label()\n if (label is not None and\n label != '' and not label.startswith('_')):\n handles.append(handle)\n labels.append(label)\n if len(handles) == 0:\n warnings.warn(\"No labeled objects found. \"\n \"Use label='...' kwarg on individual plots.\")\n return None\n\n elif len(args)==1:\n # LABELS\n labels = args[0]\n handles = [h for h, label in zip(get_handles(), labels)]\n\n elif len(args)==2:\n if is_string_like(args[1]) or isinstance(args[1], int):\n # LABELS, LOC\n labels, loc = args\n handles = [h for h, label in zip(get_handles(), labels)]\n kwargs['loc'] = loc\n else:\n # LINES, LABELS\n handles, labels = args\n\n elif len(args)==3:\n # LINES, LABELS, LOC\n handles, labels, loc = args\n kwargs['loc'] = loc\n else:\n raise TypeError('Invalid arguments to legend')\n\n\n handles = cbook.flatten(handles)\n self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)\n return self.legend_\n\n #### Specialized plotting\n\n def step(self, x, y, *args, **kwargs):\n '''\n call signature::\n\n step(x, y, *args, **kwargs)\n\n Make a step plot. Additional keyword args to :func:`step` are the same\n as those for :func:`~matplotlib.pyplot.plot`.\n\n *x* and *y* must be 1-D sequences, and it is assumed, but not checked,\n that *x* is uniformly increasing.\n\n Keyword arguments:\n\n *where*: [ 'pre' | 'post' | 'mid' ]\n If 'pre', the interval from x[i] to x[i+1] has level y[i]\n\n If 'post', that interval has level y[i+1]\n\n If 'mid', the jumps in *y* occur half-way between the\n *x*-values.\n '''\n\n where = kwargs.pop('where', 'pre')\n if where not in ('pre', 'post', 'mid'):\n raise ValueError(\"'where' argument to step must be \"\n \"'pre', 'post' or 'mid'\")\n kwargs['linestyle'] = 'steps-' + where\n\n return self.plot(x, y, *args, **kwargs)\n\n\n def bar(self, left, height, width=0.8, bottom=None,\n color=None, edgecolor=None, linewidth=None,\n yerr=None, xerr=None, ecolor=None, capsize=3,\n align='edge', orientation='vertical', log=False,\n **kwargs\n ):\n \"\"\"\n call signature::\n\n bar(left, height, width=0.8, bottom=0,\n color=None, edgecolor=None, linewidth=None,\n yerr=None, xerr=None, ecolor=None, capsize=3,\n align='edge', orientation='vertical', log=False)\n\n Make a bar plot with rectangles bounded by:\n\n *left*, *left* + *width*, *bottom*, *bottom* + *height*\n (left, right, bottom and top edges)\n\n *left*, *height*, *width*, and *bottom* can be either scalars\n or sequences\n\n Return value is a list of\n :class:`matplotlib.patches.Rectangle` instances.\n\n Required arguments:\n\n ======== ===============================================\n Argument Description\n ======== ===============================================\n *left* the x coordinates of the left sides of the bars\n *height* the heights of the bars\n ======== ===============================================\n\n Optional keyword arguments:\n\n =============== ==========================================\n Keyword Description\n =============== ==========================================\n *width* the widths of the bars\n *bottom* the y coordinates of the bottom edges of\n the bars\n *color* the colors of the bars\n *edgecolor* the colors of the bar edges\n *linewidth* width of bar edges; None means use default\n linewidth; 0 means don't draw edges.\n *xerr* if not None, will be used to generate\n errorbars on the bar chart\n *yerr* if not None, will be used to generate\n errorbars on the bar chart\n *ecolor* specifies the color of any errorbar\n *capsize* (default 3) determines the length in\n points of the error bar caps\n *align* 'edge' (default) | 'center'\n *orientation* 'vertical' | 'horizontal'\n *log* [False|True] False (default) leaves the\n orientation axis as-is; True sets it to\n log scale\n =============== ==========================================\n\n For vertical bars, *align* = 'edge' aligns bars by their left\n edges in left, while *align* = 'center' interprets these\n values as the *x* coordinates of the bar centers. For\n horizontal bars, *align* = 'edge' aligns bars by their bottom\n edges in bottom, while *align* = 'center' interprets these\n values as the *y* coordinates of the bar centers.\n\n The optional arguments *color*, *edgecolor*, *linewidth*,\n *xerr*, and *yerr* can be either scalars or sequences of\n length equal to the number of bars. This enables you to use\n bar as the basis for stacked bar charts, or candlestick plots.\n\n Other optional kwargs:\n\n %(Rectangle)s\n\n **Example:** A stacked bar chart.\n\n .. plot:: mpl_examples/pylab_examples/bar_stacked.py\n \"\"\"\n if not self._hold: self.cla()\n\n label = kwargs.pop('label', '')\n def make_iterable(x):\n if not iterable(x):\n return [x]\n else:\n return x\n\n # make them safe to take len() of\n _left = left\n left = make_iterable(left)\n height = make_iterable(height)\n width = make_iterable(width)\n _bottom = bottom\n bottom = make_iterable(bottom)\n linewidth = make_iterable(linewidth)\n\n adjust_ylim = False\n adjust_xlim = False\n if orientation == 'vertical':\n self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)\n if log:\n self.set_yscale('log')\n # size width and bottom according to length of left\n if _bottom is None:\n if self.get_yscale() == 'log':\n bottom = [1e-100]\n adjust_ylim = True\n else:\n bottom = [0]\n nbars = len(left)\n if len(width) == 1:\n width *= nbars\n if len(bottom) == 1:\n bottom *= nbars\n elif orientation == 'horizontal':\n self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)\n if log:\n self.set_xscale('log')\n # size left and height according to length of bottom\n if _left is None:\n if self.get_xscale() == 'log':\n left = [1e-100]\n adjust_xlim = True\n else:\n left = [0]\n nbars = len(bottom)\n if len(left) == 1:\n left *= nbars\n if len(height) == 1:\n height *= nbars\n else:\n raise ValueError, 'invalid orientation: %s' % orientation\n\n\n # do not convert to array here as unit info is lost\n #left = np.asarray(left)\n #height = np.asarray(height)\n #width = np.asarray(width)\n #bottom = np.asarray(bottom)\n\n if len(linewidth) < nbars:\n linewidth *= nbars\n\n if color is None:\n color = [None] * nbars\n else:\n color = list(mcolors.colorConverter.to_rgba_array(color))\n if len(color) < nbars:\n color *= nbars\n\n if edgecolor is None:\n edgecolor = [None] * nbars\n else:\n edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))\n if len(edgecolor) < nbars:\n edgecolor *= nbars\n\n if yerr is not None:\n if not iterable(yerr):\n yerr = [yerr]*nbars\n\n if xerr is not None:\n if not iterable(xerr):\n xerr = [xerr]*nbars\n\n # FIXME: convert the following to proper input validation\n # raising ValueError; don't use assert for this.\n assert len(left)==nbars, \"argument 'left' must be %d or scalar\" % nbars\n assert len(height)==nbars, (\"argument 'height' must be %d or scalar\" %\n nbars)\n assert len(width)==nbars, (\"argument 'width' must be %d or scalar\" %\n nbars)\n assert len(bottom)==nbars, (\"argument 'bottom' must be %d or scalar\" %\n nbars)\n\n if yerr is not None and len(yerr)!=nbars:\n raise ValueError(\n \"bar() argument 'yerr' must be len(%s) or scalar\" % nbars)\n if xerr is not None and len(xerr)!=nbars:\n raise ValueError(\n \"bar() argument 'xerr' must be len(%s) or scalar\" % nbars)\n\n patches = []\n\n # lets do some conversions now since some types cannot be\n # subtracted uniformly\n if self.xaxis is not None:\n xconv = self.xaxis.converter\n if xconv is not None:\n units = self.xaxis.get_units()\n left = xconv.convert( left, units )\n width = xconv.convert( width, units )\n\n if self.yaxis is not None:\n yconv = self.yaxis.converter\n if yconv is not None :\n units = self.yaxis.get_units()\n bottom = yconv.convert( bottom, units )\n height = yconv.convert( height, units )\n\n if align == 'edge':\n pass\n elif align == 'center':\n if orientation == 'vertical':\n left = [left[i] - width[i]/2. for i in xrange(len(left))]\n elif orientation == 'horizontal':\n bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]\n\n else:\n raise ValueError, 'invalid alignment: %s' % align\n\n args = zip(left, bottom, width, height, color, edgecolor, linewidth)\n for l, b, w, h, c, e, lw in args:\n if h<0:\n b += h\n h = abs(h)\n if w<0:\n l += w\n w = abs(w)\n r = mpatches.Rectangle(\n xy=(l, b), width=w, height=h,\n facecolor=c,\n edgecolor=e,\n linewidth=lw,\n label=label\n )\n label = '_nolegend_'\n r.update(kwargs)\n #print r.get_label(), label, 'label' in kwargs\n self.add_patch(r)\n patches.append(r)\n\n holdstate = self._hold\n self.hold(True) # ensure hold is on before plotting errorbars\n\n if xerr is not None or yerr is not None:\n if orientation == 'vertical':\n # using list comps rather than arrays to preserve unit info\n x = [l+0.5*w for l, w in zip(left, width)]\n y = [b+h for b,h in zip(bottom, height)]\n\n elif orientation == 'horizontal':\n # using list comps rather than arrays to preserve unit info\n x = [l+w for l,w in zip(left, width)]\n y = [b+0.5*h for b,h in zip(bottom, height)]\n\n self.errorbar(\n x, y,\n yerr=yerr, xerr=xerr,\n fmt=None, ecolor=ecolor, capsize=capsize)\n\n self.hold(holdstate) # restore previous hold state\n\n if adjust_xlim:\n xmin, xmax = self.dataLim.intervalx\n xmin = np.amin(width[width!=0]) # filter out the 0 width rects\n if xerr is not None:\n xmin = xmin - np.amax(xerr)\n xmin = max(xmin*0.9, 1e-100)\n self.dataLim.intervalx = (xmin, xmax)\n\n if adjust_ylim:\n ymin, ymax = self.dataLim.intervaly\n ymin = np.amin(height[height!=0]) # filter out the 0 height rects\n if yerr is not None:\n ymin = ymin - np.amax(yerr)\n ymin = max(ymin*0.9, 1e-100)\n self.dataLim.intervaly = (ymin, ymax)\n self.autoscale_view()\n return patches\n bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd\n\n def barh(self, bottom, width, height=0.8, left=None, **kwargs):\n \"\"\"\n call signature::\n\n barh(bottom, width, height=0.8, left=0, **kwargs)\n\n Make a horizontal bar plot with rectangles bounded by:\n\n *left*, *left* + *width*, *bottom*, *bottom* + *height*\n (left, right, bottom and top edges)\n\n *bottom*, *width*, *height*, and *left* can be either scalars\n or sequences\n\n Return value is a list of\n :class:`matplotlib.patches.Rectangle` instances.\n\n Required arguments:\n\n ======== ======================================================\n Argument Description\n ======== ======================================================\n *bottom* the vertical positions of the bottom edges of the bars\n *width* the lengths of the bars\n ======== ======================================================\n\n Optional keyword arguments:\n\n =============== ==========================================\n Keyword Description\n =============== ==========================================\n *height* the heights (thicknesses) of the bars\n *left* the x coordinates of the left edges of the\n bars\n *color* the colors of the bars\n *edgecolor* the colors of the bar edges\n *linewidth* width of bar edges; None means use default\n linewidth; 0 means don't draw edges.\n *xerr* if not None, will be used to generate\n errorbars on the bar chart\n *yerr* if not None, will be used to generate\n errorbars on the bar chart\n *ecolor* specifies the color of any errorbar\n *capsize* (default 3) determines the length in\n points of the error bar caps\n *align* 'edge' (default) | 'center'\n *log* [False|True] False (default) leaves the\n horizontal axis as-is; True sets it to log\n scale\n =============== ==========================================\n\n Setting *align* = 'edge' aligns bars by their bottom edges in\n bottom, while *align* = 'center' interprets these values as\n the *y* coordinates of the bar centers.\n\n The optional arguments *color*, *edgecolor*, *linewidth*,\n *xerr*, and *yerr* can be either scalars or sequences of\n length equal to the number of bars. This enables you to use\n barh as the basis for stacked bar charts, or candlestick\n plots.\n\n other optional kwargs:\n\n %(Rectangle)s\n \"\"\"\n\n patches = self.bar(left=left, height=height, width=width, bottom=bottom,\n orientation='horizontal', **kwargs)\n return patches\n\n barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd\n\n def broken_barh(self, xranges, yrange, **kwargs):\n \"\"\"\n call signature::\n\n broken_barh(self, xranges, yrange, **kwargs)\n\n A collection of horizontal bars spanning *yrange* with a sequence of\n *xranges*.\n\n Required arguments:\n\n ========= ==============================\n Argument Description\n ========= ==============================\n *xranges* sequence of (*xmin*, *xwidth*)\n *yrange* sequence of (*ymin*, *ywidth*)\n ========= ==============================\n\n kwargs are\n :class:`matplotlib.collections.BrokenBarHCollection`\n properties:\n\n %(BrokenBarHCollection)s\n\n these can either be a single argument, ie::\n\n facecolors = 'black'\n\n or a sequence of arguments for the various bars, ie::\n\n facecolors = ('black', 'red', 'green')\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/broken_barh.py\n \"\"\"\n col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)\n self.add_collection(col, autolim=True)\n self.autoscale_view()\n\n return col\n\n broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd\n\n def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):\n \"\"\"\n call signature::\n\n stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')\n\n A stem plot plots vertical lines (using *linefmt*) at each *x*\n location from the baseline to *y*, and places a marker there\n using *markerfmt*. A horizontal line at 0 is is plotted using\n *basefmt*.\n\n Return value is a tuple (*markerline*, *stemlines*,\n *baseline*).\n\n .. seealso::\n `this document`__ for details\n\n :file:`examples/pylab_examples/stem_plot.py`:\n for a demo\n\n __ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html\n\n \"\"\"\n remember_hold=self._hold\n if not self._hold: self.cla()\n self.hold(True)\n\n markerline, = self.plot(x, y, markerfmt)\n\n stemlines = []\n for thisx, thisy in zip(x, y):\n l, = self.plot([thisx,thisx], [0, thisy], linefmt)\n stemlines.append(l)\n\n baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)\n\n self.hold(remember_hold)\n\n return markerline, stemlines, baseline\n\n\n def pie(self, x, explode=None, labels=None, colors=None,\n autopct=None, pctdistance=0.6, shadow=False,\n labeldistance=1.1):\n r\"\"\"\n call signature::\n\n pie(x, explode=None, labels=None,\n colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),\n autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)\n\n Make a pie chart of array *x*. The fractional area of each\n wedge is given by x/sum(x). If sum(x) <= 1, then the values\n of x give the fractional area directly and the array will not\n be normalized.\n\n Keyword arguments:\n\n *explode*: [ None | len(x) sequence ]\n If not *None*, is a len(*x*) array which specifies the\n fraction of the radius with which to offset each wedge.\n\n *colors*: [ None | color sequence ]\n A sequence of matplotlib color args through which the pie chart\n will cycle.\n\n *labels*: [ None | len(x) sequence of strings ]\n A sequence of strings providing the labels for each wedge\n\n *autopct*: [ None | format string | format function ]\n If not *None*, is a string or function used to label the\n wedges with their numeric value. The label will be placed inside\n the wedge. If it is a format string, the label will be ``fmt%pct``.\n If it is a function, it will be called.\n\n *pctdistance*: scalar\n The ratio between the center of each pie slice and the\n start of the text generated by *autopct*. Ignored if\n *autopct* is *None*; default is 0.6.\n\n *labeldistance*: scalar\n The radial distance at which the pie labels are drawn\n\n *shadow*: [ False | True ]\n Draw a shadow beneath the pie.\n\n The pie chart will probably look best if the figure and axes are\n square. Eg.::\n\n figure(figsize=(8,8))\n ax = axes([0.1, 0.1, 0.8, 0.8])\n\n Return value:\n If *autopct* is None, return the tuple (*patches*, *texts*):\n\n - *patches* is a sequence of\n :class:`matplotlib.patches.Wedge` instances\n\n - *texts* is a list of the label\n :class:`matplotlib.text.Text` instances.\n\n If *autopct* is not *None*, return the tuple (*patches*,\n *texts*, *autotexts*), where *patches* and *texts* are as\n above, and *autotexts* is a list of\n :class:`~matplotlib.text.Text` instances for the numeric\n labels.\n \"\"\"\n self.set_frame_on(False)\n\n x = np.asarray(x).astype(np.float32)\n\n sx = float(x.sum())\n if sx>1: x = np.divide(x,sx)\n\n if labels is None: labels = ['']*len(x)\n if explode is None: explode = [0]*len(x)\n assert(len(x)==len(labels))\n assert(len(x)==len(explode))\n if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')\n\n\n center = 0,0\n radius = 1\n theta1 = 0\n i = 0\n texts = []\n slices = []\n autotexts = []\n for frac, label, expl in cbook.safezip(x,labels, explode):\n x, y = center\n theta2 = theta1 + frac\n thetam = 2*math.pi*0.5*(theta1+theta2)\n x += expl*math.cos(thetam)\n y += expl*math.sin(thetam)\n\n w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,\n facecolor=colors[i%len(colors)])\n slices.append(w)\n self.add_patch(w)\n w.set_label(label)\n\n if shadow:\n # make sure to add a shadow after the call to\n # add_patch so the figure and transform props will be\n # set\n shad = mpatches.Shadow(w, -0.02, -0.02,\n #props={'facecolor':w.get_facecolor()}\n )\n shad.set_zorder(0.9*w.get_zorder())\n self.add_patch(shad)\n\n\n xt = x + labeldistance*radius*math.cos(thetam)\n yt = y + labeldistance*radius*math.sin(thetam)\n label_alignment = xt > 0 and 'left' or 'right'\n\n t = self.text(xt, yt, label,\n size=rcParams['xtick.labelsize'],\n horizontalalignment=label_alignment,\n verticalalignment='center')\n\n texts.append(t)\n\n if autopct is not None:\n xt = x + pctdistance*radius*math.cos(thetam)\n yt = y + pctdistance*radius*math.sin(thetam)\n if is_string_like(autopct):\n s = autopct%(100.*frac)\n elif callable(autopct):\n s = autopct(100.*frac)\n else:\n raise TypeError(\n 'autopct must be callable or a format string')\n\n t = self.text(xt, yt, s,\n horizontalalignment='center',\n verticalalignment='center')\n autotexts.append(t)\n\n\n theta1 = theta2\n i += 1\n\n self.set_xlim((-1.25, 1.25))\n self.set_ylim((-1.25, 1.25))\n self.set_xticks([])\n self.set_yticks([])\n\n if autopct is None: return slices, texts\n else: return slices, texts, autotexts\n\n def errorbar(self, x, y, yerr=None, xerr=None,\n fmt='-', ecolor=None, elinewidth=None, capsize=3,\n barsabove=False, lolims=False, uplims=False,\n xlolims=False, xuplims=False, **kwargs):\n \"\"\"\n call signature::\n\n errorbar(x, y, yerr=None, xerr=None,\n fmt='-', ecolor=None, elinewidth=None, capsize=3,\n barsabove=False, lolims=False, uplims=False,\n xlolims=False, xuplims=False)\n\n Plot *x* versus *y* with error deltas in *yerr* and *xerr*.\n Vertical errorbars are plotted if *yerr* is not *None*.\n Horizontal errorbars are plotted if *xerr* is not *None*.\n\n *x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a\n single error bar at *x*, *y*.\n\n Optional keyword arguments:\n\n *xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]\n If a scalar number, len(N) array-like object, or an Nx1 array-like\n object, errorbars are drawn +/- value.\n\n If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and\n +column2\n\n *fmt*: '-'\n The plot format symbol for *y*. If *fmt* is *None*, just plot the\n errorbars with no line symbols. This can be useful for creating a\n bar plot with errorbars.\n\n *ecolor*: [ None | mpl color ]\n a matplotlib color arg which gives the color the errorbar lines; if\n *None*, use the marker color.\n\n *elinewidth*: scalar\n the linewidth of the errorbar lines. If *None*, use the linewidth.\n\n *capsize*: scalar\n the size of the error bar caps in points\n\n *barsabove*: [ True | False ]\n if *True*, will plot the errorbars above the plot\n symbols. Default is below.\n\n *lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]\n These arguments can be used to indicate that a value gives\n only upper/lower limits. In that case a caret symbol is\n used to indicate this. lims-arguments may be of the same\n type as *xerr* and *yerr*.\n\n All other keyword arguments are passed on to the plot command for the\n markers, so you can add additional key=value pairs to control the\n errorbar markers. For example, this code makes big red squares with\n thick green edges::\n\n x,y,yerr = rand(3,10)\n errorbar(x, y, yerr, marker='s',\n mfc='red', mec='green', ms=20, mew=4)\n\n where *mfc*, *mec*, *ms* and *mew* are aliases for the longer\n property names, *markerfacecolor*, *markeredgecolor*, *markersize*\n and *markeredgewith*.\n\n valid kwargs for the marker properties are\n\n %(Line2D)s\n\n Return value is a length 3 tuple. The first element is the\n :class:`~matplotlib.lines.Line2D` instance for the *y* symbol\n lines. The second element is a list of error bar cap lines,\n the third element is a list of\n :class:`~matplotlib.collections.LineCollection` instances for\n the horizontal and vertical error ranges.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/errorbar_demo.py\n\n \"\"\"\n\n self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)\n if not self._hold: self.cla()\n\n # make sure all the args are iterable; use lists not arrays to\n # preserve units\n if not iterable(x):\n x = [x]\n\n if not iterable(y):\n y = [y]\n\n if xerr is not None:\n if not iterable(xerr):\n xerr = [xerr]*len(x)\n\n if yerr is not None:\n if not iterable(yerr):\n yerr = [yerr]*len(y)\n\n l0 = None\n\n if barsabove and fmt is not None:\n l0, = self.plot(x,y,fmt,**kwargs)\n\n barcols = []\n caplines = []\n\n lines_kw = {'label':'_nolegend_'}\n if elinewidth:\n lines_kw['linewidth'] = elinewidth\n else:\n if 'linewidth' in kwargs:\n lines_kw['linewidth']=kwargs['linewidth']\n if 'lw' in kwargs:\n lines_kw['lw']=kwargs['lw']\n if 'transform' in kwargs:\n lines_kw['transform'] = kwargs['transform']\n\n # arrays fine here, they are booleans and hence not units\n if not iterable(lolims):\n lolims = np.asarray([lolims]*len(x), bool)\n else: lolims = np.asarray(lolims, bool)\n\n if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)\n else: uplims = np.asarray(uplims, bool)\n\n if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)\n else: xlolims = np.asarray(xlolims, bool)\n\n if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)\n else: xuplims = np.asarray(xuplims, bool)\n\n def xywhere(xs, ys, mask):\n \"\"\"\n return xs[mask], ys[mask] where mask is True but xs and\n ys are not arrays\n \"\"\"\n assert len(xs)==len(ys)\n assert len(xs)==len(mask)\n xs = [thisx for thisx, b in zip(xs, mask) if b]\n ys = [thisy for thisy, b in zip(ys, mask) if b]\n return xs, ys\n\n\n if capsize > 0:\n plot_kw = {\n 'ms':2*capsize,\n 'label':'_nolegend_'}\n if 'markeredgewidth' in kwargs:\n plot_kw['markeredgewidth']=kwargs['markeredgewidth']\n if 'mew' in kwargs:\n plot_kw['mew']=kwargs['mew']\n if 'transform' in kwargs:\n plot_kw['transform'] = kwargs['transform']\n\n if xerr is not None:\n if (iterable(xerr) and len(xerr)==2 and\n iterable(xerr[0]) and iterable(xerr[1])):\n # using list comps rather than arrays to preserve units\n left = [thisx-thiserr for (thisx, thiserr)\n in cbook.safezip(x,xerr[0])]\n right = [thisx+thiserr for (thisx, thiserr)\n in cbook.safezip(x,xerr[1])]\n else:\n # using list comps rather than arrays to preserve units\n left = [thisx-thiserr for (thisx, thiserr)\n in cbook.safezip(x,xerr)]\n right = [thisx+thiserr for (thisx, thiserr)\n in cbook.safezip(x,xerr)]\n\n barcols.append( self.hlines(y, left, right, **lines_kw ) )\n if capsize > 0:\n if xlolims.any():\n # can't use numpy logical indexing since left and\n # y are lists\n leftlo, ylo = xywhere(left, y, xlolims)\n\n caplines.extend(\n self.plot(leftlo, ylo, ls='None',\n marker=mlines.CARETLEFT, **plot_kw) )\n xlolims = ~xlolims\n leftlo, ylo = xywhere(left, y, xlolims)\n caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )\n else:\n caplines.extend( self.plot(left, y, 'k|', **plot_kw) )\n\n if xuplims.any():\n\n rightup, yup = xywhere(right, y, xuplims)\n caplines.extend(\n self.plot(rightup, yup, ls='None',\n marker=mlines.CARETRIGHT, **plot_kw) )\n xuplims = ~xuplims\n rightup, yup = xywhere(right, y, xuplims)\n caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )\n else:\n caplines.extend( self.plot(right, y, 'k|', **plot_kw) )\n\n if yerr is not None:\n if (iterable(yerr) and len(yerr)==2 and\n iterable(yerr[0]) and iterable(yerr[1])):\n # using list comps rather than arrays to preserve units\n lower = [thisy-thiserr for (thisy, thiserr)\n in cbook.safezip(y,yerr[0])]\n upper = [thisy+thiserr for (thisy, thiserr)\n in cbook.safezip(y,yerr[1])]\n else:\n # using list comps rather than arrays to preserve units\n lower = [thisy-thiserr for (thisy, thiserr)\n in cbook.safezip(y,yerr)]\n upper = [thisy+thiserr for (thisy, thiserr)\n in cbook.safezip(y,yerr)]\n\n barcols.append( self.vlines(x, lower, upper, **lines_kw) )\n if capsize > 0:\n\n if lolims.any():\n xlo, lowerlo = xywhere(x, lower, lolims)\n caplines.extend(\n self.plot(xlo, lowerlo, ls='None',\n marker=mlines.CARETDOWN, **plot_kw) )\n lolims = ~lolims\n xlo, lowerlo = xywhere(x, lower, lolims)\n caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )\n else:\n caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )\n\n if uplims.any():\n xup, upperup = xywhere(x, upper, uplims)\n\n caplines.extend(\n self.plot(xup, upperup, ls='None',\n marker=mlines.CARETUP, **plot_kw) )\n uplims = ~uplims\n xup, upperup = xywhere(x, upper, uplims)\n caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )\n else:\n caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )\n\n if not barsabove and fmt is not None:\n l0, = self.plot(x,y,fmt,**kwargs)\n\n if ecolor is None:\n if l0 is None:\n ecolor = self._get_lines._get_next_cycle_color()\n else:\n ecolor = l0.get_color()\n\n for l in barcols:\n l.set_color(ecolor)\n for l in caplines:\n l.set_color(ecolor)\n\n self.autoscale_view()\n return (l0, caplines, barcols)\n errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd\n\n def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,\n positions=None, widths=None):\n \"\"\"\n call signature::\n\n boxplot(x, notch=0, sym='+', vert=1, whis=1.5,\n positions=None, widths=None)\n\n Make a box and whisker plot for each column of *x* or each\n vector in sequence *x*. The box extends from the lower to\n upper quartile values of the data, with a line at the median.\n The whiskers extend from the box to show the range of the\n data. Flier points are those past the end of the whiskers.\n\n - *notch* = 0 (default) produces a rectangular box plot.\n - *notch* = 1 will produce a notched box plot\n\n *sym* (default 'b+') is the default symbol for flier points.\n Enter an empty string ('') if you don't want to show fliers.\n\n - *vert* = 1 (default) makes the boxes vertical.\n - *vert* = 0 makes horizontal boxes. This seems goofy, but\n that's how Matlab did it.\n\n *whis* (default 1.5) defines the length of the whiskers as\n a function of the inner quartile range. They extend to the\n most extreme data point within ( ``whis*(75%-25%)`` ) data range.\n\n *positions* (default 1,2,...,n) sets the horizontal positions of\n the boxes. The ticks and limits are automatically set to match\n the positions.\n\n *widths* is either a scalar or a vector and sets the width of\n each box. The default is 0.5, or ``0.15*(distance between extreme\n positions)`` if that is smaller.\n\n *x* is an array or a sequence of vectors.\n\n Returns a dictionary mapping each component of the boxplot\n to a list of the :class:`matplotlib.lines.Line2D`\n instances created.\n\n **Example:**\n\n .. plot:: pyplots/boxplot_demo.py\n \"\"\"\n if not self._hold: self.cla()\n holdStatus = self._hold\n whiskers, caps, boxes, medians, fliers = [], [], [], [], []\n\n # convert x to a list of vectors\n if hasattr(x, 'shape'):\n if len(x.shape) == 1:\n if hasattr(x[0], 'shape'):\n x = list(x)\n else:\n x = [x,]\n elif len(x.shape) == 2:\n nr, nc = x.shape\n if nr == 1:\n x = [x]\n elif nc == 1:\n x = [x.ravel()]\n else:\n x = [x[:,i] for i in xrange(nc)]\n else:\n raise ValueError, \"input x can have no more than 2 dimensions\"\n if not hasattr(x[0], '__len__'):\n x = [x]\n col = len(x)\n\n # get some plot info\n if positions is None:\n positions = range(1, col + 1)\n if widths is None:\n distance = max(positions) - min(positions)\n widths = min(0.15*max(distance,1.0), 0.5)\n if isinstance(widths, float) or isinstance(widths, int):\n widths = np.ones((col,), float) * widths\n\n # loop through columns, adding each to plot\n self.hold(True)\n for i,pos in enumerate(positions):\n d = np.ravel(x[i])\n row = len(d)\n # get median and quartiles\n q1, med, q3 = mlab.prctile(d,[25,50,75])\n # get high extreme\n iq = q3 - q1\n hi_val = q3 + whis*iq\n wisk_hi = np.compress( d <= hi_val , d )\n if len(wisk_hi) == 0:\n wisk_hi = q3\n else:\n wisk_hi = max(wisk_hi)\n # get low extreme\n lo_val = q1 - whis*iq\n wisk_lo = np.compress( d >= lo_val, d )\n if len(wisk_lo) == 0:\n wisk_lo = q1\n else:\n wisk_lo = min(wisk_lo)\n # get fliers - if we are showing them\n flier_hi = []\n flier_lo = []\n flier_hi_x = []\n flier_lo_x = []\n if len(sym) != 0:\n flier_hi = np.compress( d > wisk_hi, d )\n flier_lo = np.compress( d < wisk_lo, d )\n flier_hi_x = np.ones(flier_hi.shape[0]) * pos\n flier_lo_x = np.ones(flier_lo.shape[0]) * pos\n\n # get x locations for fliers, whisker, whisker cap and box sides\n box_x_min = pos - widths[i] * 0.5\n box_x_max = pos + widths[i] * 0.5\n\n wisk_x = np.ones(2) * pos\n\n cap_x_min = pos - widths[i] * 0.25\n cap_x_max = pos + widths[i] * 0.25\n cap_x = [cap_x_min, cap_x_max]\n\n # get y location for median\n med_y = [med, med]\n\n # calculate 'regular' plot\n if notch == 0:\n # make our box vectors\n box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]\n box_y = [q1, q1, q3, q3, q1 ]\n # make our median line vectors\n med_x = [box_x_min, box_x_max]\n # calculate 'notch' plot\n else:\n notch_max = med + 1.57*iq/np.sqrt(row)\n notch_min = med - 1.57*iq/np.sqrt(row)\n if notch_max > q3:\n notch_max = q3\n if notch_min < q1:\n notch_min = q1\n # make our notched box vectors\n box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,\n box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,\n box_x_min ]\n box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,\n med, notch_min, q1]\n # make our median line vectors\n med_x = [cap_x_min, cap_x_max]\n med_y = [med, med]\n\n # vertical or horizontal plot?\n if vert:\n def doplot(*args):\n return self.plot(*args)\n else:\n def doplot(*args):\n shuffled = []\n for i in xrange(0, len(args), 3):\n shuffled.extend([args[i+1], args[i], args[i+2]])\n return self.plot(*shuffled)\n\n whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',\n wisk_x, [q3, wisk_hi], 'b--'))\n caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',\n cap_x, [wisk_lo, wisk_lo], 'k-'))\n boxes.extend(doplot(box_x, box_y, 'b-'))\n medians.extend(doplot(med_x, med_y, 'r-'))\n fliers.extend(doplot(flier_hi_x, flier_hi, sym,\n flier_lo_x, flier_lo, sym))\n\n # fix our axes/ticks up a little\n if 1 == vert:\n setticks, setlim = self.set_xticks, self.set_xlim\n else:\n setticks, setlim = self.set_yticks, self.set_ylim\n\n newlimits = min(positions)-0.5, max(positions)+0.5\n setlim(newlimits)\n setticks(positions)\n\n # reset hold status\n self.hold(holdStatus)\n\n return dict(whiskers=whiskers, caps=caps, boxes=boxes,\n medians=medians, fliers=fliers)\n\n def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,\n vmin=None, vmax=None, alpha=1.0, linewidths=None,\n faceted=True, verts=None,\n **kwargs):\n \"\"\"\n call signatures::\n\n scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,\n vmin=None, vmax=None, alpha=1.0, linewidths=None,\n verts=None, **kwargs)\n\n Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D\n sequences of the same length, *N*.\n\n Keyword arguments:\n\n *s*:\n size in points^2. It is a scalar or an array of the same\n length as *x* and *y*.\n\n *c*:\n a color. *c* can be a single color format string, or a\n sequence of color specifications of length *N*, or a\n sequence of *N* numbers to be mapped to colors using the\n *cmap* and *norm* specified via kwargs (see below). Note\n that *c* should not be a single numeric RGB or RGBA\n sequence because that is indistinguishable from an array\n of values to be colormapped. *c* can be a 2-D array in\n which the rows are RGB or RGBA, however.\n\n *marker*:\n can be one of:\n\n ===== ==============\n Value Description\n ===== ==============\n 's' square\n 'o' circle\n '^' triangle up\n '>' triangle right\n 'v' triangle down\n '<' triangle left\n 'd' diamond\n 'p' pentagram\n 'h' hexagon\n '8' octagon\n '+' plus\n 'x' cross\n ===== ==============\n\n The marker can also be a tuple (*numsides*, *style*,\n *angle*), which will create a custom, regular symbol.\n\n *numsides*:\n the number of sides\n\n *style*:\n the style of the regular symbol:\n\n ===== =============================================\n Value Description\n ===== =============================================\n 0 a regular polygon\n 1 a star-like symbol\n 2 an asterisk\n 3 a circle (*numsides* and *angle* is ignored)\n ===== =============================================\n\n *angle*:\n the angle of rotation of the symbol\n\n Finally, *marker* can be (*verts*, 0): *verts* is a\n sequence of (*x*, *y*) vertices for a custom scatter\n symbol. Alternatively, use the kwarg combination\n *marker* = *None*, *verts* = *verts*.\n\n Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in\n which case all masks will be combined and only unmasked points\n will be plotted.\n\n Other keyword arguments: the color mapping and normalization\n arguments will be used only if *c* is an array of floats.\n\n *cmap*: [ None | Colormap ]\n A :class:`matplotlib.colors.Colormap` instance. If *None*,\n defaults to rc ``image.cmap``. *cmap* is only used if *c*\n is an array of floats.\n\n *norm*: [ None | Normalize ]\n A :class:`matplotlib.colors.Normalize` instance is used to\n scale luminance data to 0, 1. If *None*, use the default\n :func:`normalize`. *norm* is only used if *c* is an array\n of floats.\n\n *vmin*/*vmax*:\n *vmin* and *vmax* are used in conjunction with norm to\n normalize luminance data. If either are None, the min and\n max of the color array *C* is used. Note if you pass a\n *norm* instance, your settings for *vmin* and *vmax* will\n be ignored.\n\n *alpha*: 0 <= scalar <= 1\n The alpha value for the patches\n\n *linewidths*: [ None | scalar | sequence ]\n If *None*, defaults to (lines.linewidth,). Note that this\n is a tuple, and if you set the linewidths argument you\n must set it as a sequence of floats, as required by\n :class:`~matplotlib.collections.RegularPolyCollection`.\n\n Optional kwargs control the\n :class:`~matplotlib.collections.Collection` properties; in\n particular:\n\n *edgecolors*:\n 'none' to plot faces with no outlines\n\n *facecolors*:\n 'none' to plot unfilled outlines\n\n Here are the standard descriptions of all the\n :class:`~matplotlib.collections.Collection` kwargs:\n\n %(Collection)s\n\n A :class:`~matplotlib.collections.Collection` instance is\n returned.\n \"\"\"\n\n if not self._hold: self.cla()\n\n syms = { # a dict from symbol to (numsides, angle)\n 's' : (4,math.pi/4.0,0), # square\n 'o' : (20,3,0), # circle\n '^' : (3,0,0), # triangle up\n '>' : (3,math.pi/2.0,0), # triangle right\n 'v' : (3,math.pi,0), # triangle down\n '<' : (3,3*math.pi/2.0,0), # triangle left\n 'd' : (4,0,0), # diamond\n 'p' : (5,0,0), # pentagram\n 'h' : (6,0,0), # hexagon\n '8' : (8,0,0), # octagon\n '+' : (4,0,2), # plus\n 'x' : (4,math.pi/4.0,2) # cross\n }\n\n self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)\n\n x, y, s, c = cbook.delete_masked_points(x, y, s, c)\n\n\n if is_string_like(c) or cbook.is_sequence_of_strings(c):\n colors = mcolors.colorConverter.to_rgba_array(c, alpha)\n else:\n sh = np.shape(c)\n # The inherent ambiguity is resolved in favor of color\n # mapping, not interpretation as rgb or rgba:\n if len(sh) == 1 and sh[0] == len(x):\n colors = None # use cmap, norm after collection is created\n else:\n colors = mcolors.colorConverter.to_rgba_array(c, alpha)\n\n if not iterable(s):\n scales = (s,)\n else:\n scales = s\n\n if faceted:\n edgecolors = None\n else:\n edgecolors = 'none'\n warnings.warn(\n '''replace \"faceted=False\" with \"edgecolors='none'\"''',\n DeprecationWarning) #2008/04/18\n\n sym = None\n symstyle = 0\n\n # to be API compatible\n if marker is None and not (verts is None):\n marker = (verts, 0)\n verts = None\n\n if is_string_like(marker):\n # the standard way to define symbols using a string character\n sym = syms.get(marker)\n if sym is None and verts is None:\n raise ValueError('Unknown marker symbol to scatter')\n numsides, rotation, symstyle = syms[marker]\n\n elif iterable(marker):\n # accept marker to be:\n # (numsides, style, [angle])\n # or\n # (verts[], style, [angle])\n\n if len(marker)<2 or len(marker)>3:\n raise ValueError('Cannot create markersymbol from marker')\n\n if cbook.is_numlike(marker[0]):\n # (numsides, style, [angle])\n\n if len(marker)==2:\n numsides, rotation = marker[0], 0.\n elif len(marker)==3:\n numsides, rotation = marker[0], marker[2]\n sym = True\n\n if marker[1] in (1,2):\n symstyle = marker[1]\n\n else:\n verts = np.asarray(marker[0])\n\n if sym is not None:\n if symstyle==0:\n collection = mcoll.RegularPolyCollection(\n numsides, rotation, scales,\n facecolors = colors,\n edgecolors = edgecolors,\n linewidths = linewidths,\n offsets = zip(x,y),\n transOffset = self.transData,\n )\n elif symstyle==1:\n collection = mcoll.StarPolygonCollection(\n numsides, rotation, scales,\n facecolors = colors,\n edgecolors = edgecolors,\n linewidths = linewidths,\n offsets = zip(x,y),\n transOffset = self.transData,\n )\n elif symstyle==2:\n collection = mcoll.AsteriskPolygonCollection(\n numsides, rotation, scales,\n facecolors = colors,\n edgecolors = edgecolors,\n linewidths = linewidths,\n offsets = zip(x,y),\n transOffset = self.transData,\n )\n elif symstyle==3:\n collection = mcoll.CircleCollection(\n scales,\n facecolors = colors,\n edgecolors = edgecolors,\n linewidths = linewidths,\n offsets = zip(x,y),\n transOffset = self.transData,\n )\n else:\n rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))\n verts /= rescale\n\n collection = mcoll.PolyCollection(\n (verts,), scales,\n facecolors = colors,\n edgecolors = edgecolors,\n linewidths = linewidths,\n offsets = zip(x,y),\n transOffset = self.transData,\n )\n collection.set_transform(mtransforms.IdentityTransform())\n collection.set_alpha(alpha)\n collection.update(kwargs)\n\n if colors is None:\n if norm is not None: assert(isinstance(norm, mcolors.Normalize))\n if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))\n collection.set_array(np.asarray(c))\n collection.set_cmap(cmap)\n collection.set_norm(norm)\n\n if vmin is not None or vmax is not None:\n collection.set_clim(vmin, vmax)\n else:\n collection.autoscale_None()\n\n temp_x = x\n temp_y = y\n\n minx = np.amin(temp_x)\n maxx = np.amax(temp_x)\n miny = np.amin(temp_y)\n maxy = np.amax(temp_y)\n\n w = maxx-minx\n h = maxy-miny\n\n # the pad is a little hack to deal with the fact that we don't\n # want to transform all the symbols whose scales are in points\n # to data coords to get the exact bounding box for efficiency\n # reasons. It can be done right if this is deemed important\n padx, pady = 0.05*w, 0.05*h\n corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)\n self.update_datalim( corners)\n self.autoscale_view()\n\n # add the collection last\n self.add_collection(collection)\n return collection\n\n scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd\n\n def hexbin(self, x, y, C = None, gridsize = 100, bins = None,\n xscale = 'linear', yscale = 'linear',\n cmap=None, norm=None, vmin=None, vmax=None,\n alpha=1.0, linewidths=None, edgecolors='none',\n reduce_C_function = np.mean,\n **kwargs):\n \"\"\"\n call signature::\n\n hexbin(x, y, C = None, gridsize = 100, bins = None,\n xscale = 'linear', yscale = 'linear',\n cmap=None, norm=None, vmin=None, vmax=None,\n alpha=1.0, linewidths=None, edgecolors='none'\n reduce_C_function = np.mean,\n **kwargs)\n\n Make a hexagonal binning plot of *x* versus *y*, where *x*,\n *y* are 1-D sequences of the same length, *N*. If *C* is None\n (the default), this is a histogram of the number of occurences\n of the observations at (x[i],y[i]).\n\n If *C* is specified, it specifies values at the coordinate\n (x[i],y[i]). These values are accumulated for each hexagonal\n bin and then reduced according to *reduce_C_function*, which\n defaults to numpy's mean function (np.mean). (If *C* is\n specified, it must also be a 1-D sequence of the same length\n as *x* and *y*.)\n\n *x*, *y* and/or *C* may be masked arrays, in which case only\n unmasked points will be plotted.\n\n Optional keyword arguments:\n\n *gridsize*: [ 100 | integer ]\n The number of hexagons in the *x*-direction, default is\n 100. The corresponding number of hexagons in the\n *y*-direction is chosen such that the hexagons are\n approximately regular. Alternatively, gridsize can be a\n tuple with two elements specifying the number of hexagons\n in the *x*-direction and the *y*-direction.\n\n *bins*: [ None | 'log' | integer | sequence ]\n If *None*, no binning is applied; the color of each hexagon\n directly corresponds to its count value.\n\n If 'log', use a logarithmic scale for the color\n map. Internally, :math:`log_{10}(i+1)` is used to\n determine the hexagon color.\n\n If an integer, divide the counts in the specified number\n of bins, and color the hexagons accordingly.\n\n If a sequence of values, the values of the lower bound of\n the bins to be used.\n\n *xscale*: [ 'linear' | 'log' ]\n Use a linear or log10 scale on the horizontal axis.\n\n *scale*: [ 'linear' | 'log' ]\n Use a linear or log10 scale on the vertical axis.\n\n Other keyword arguments controlling color mapping and normalization\n arguments:\n\n *cmap*: [ None | Colormap ]\n a :class:`matplotlib.cm.Colormap` instance. If *None*,\n defaults to rc ``image.cmap``.\n\n *norm*: [ None | Normalize ]\n :class:`matplotlib.colors.Normalize` instance is used to\n scale luminance data to 0,1.\n\n *vmin*/*vmax*: scalar\n *vmin* and *vmax* are used in conjunction with *norm* to normalize\n luminance data. If either are *None*, the min and max of the color\n array *C* is used. Note if you pass a norm instance, your settings\n for *vmin* and *vmax* will be ignored.\n\n *alpha*: scalar\n the alpha value for the patches\n\n *linewidths*: [ None | scalar ]\n If *None*, defaults to rc lines.linewidth. Note that this\n is a tuple, and if you set the linewidths argument you\n must set it as a sequence of floats, as required by\n :class:`~matplotlib.collections.RegularPolyCollection`.\n\n Other keyword arguments controlling the Collection properties:\n\n *edgecolors*: [ None | mpl color | color sequence ]\n If 'none', draws the edges in the same color as the fill color.\n This is the default, as it avoids unsightly unpainted pixels\n between the hexagons.\n\n If *None*, draws the outlines in the default color.\n\n If a matplotlib color arg or sequence of rgba tuples, draws the\n outlines in the specified color.\n\n Here are the standard descriptions of all the\n :class:`~matplotlib.collections.Collection` kwargs:\n\n %(Collection)s\n\n The return value is a\n :class:`~matplotlib.collections.PolyCollection` instance; use\n :meth:`~matplotlib.collection.PolyCollection.get_array` on\n this :class:`~matplotlib.collections.PolyCollection` to get\n the counts in each hexagon.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/hexbin_demo.py\n \"\"\"\n\n if not self._hold: self.cla()\n\n self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)\n\n x, y, C = cbook.delete_masked_points(x, y, C)\n\n # Set the size of the hexagon grid\n if iterable(gridsize):\n nx, ny = gridsize\n else:\n nx = gridsize\n ny = int(nx/math.sqrt(3))\n # Count the number of data in each hexagon\n x = np.array(x, float)\n y = np.array(y, float)\n if xscale=='log':\n x = np.log10(x)\n if yscale=='log':\n y = np.log10(y)\n xmin = np.amin(x)\n xmax = np.amax(x)\n ymin = np.amin(y)\n ymax = np.amax(y)\n # In the x-direction, the hexagons exactly cover the region from\n # xmin to xmax. Need some padding to avoid roundoff errors.\n padding = 1.e-9 * (xmax - xmin)\n xmin -= padding\n xmax += padding\n sx = (xmax-xmin) / nx\n sy = (ymax-ymin) / ny\n x = (x-xmin)/sx\n y = (y-ymin)/sy\n ix1 = np.round(x).astype(int)\n iy1 = np.round(y).astype(int)\n ix2 = np.floor(x).astype(int)\n iy2 = np.floor(y).astype(int)\n\n nx1 = nx + 1\n ny1 = ny + 1\n nx2 = nx\n ny2 = ny\n n = nx1*ny1+nx2*ny2\n\n d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2\n d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2\n bdist = (d1 0-1\n\n *norm* is only used for an MxN float array.\n\n *vmin*/*vmax*: [ None | scalar ]\n Used to scale a luminance image to 0-1. If either is\n *None*, the min and max of the luminance values will be\n used. Note if *norm* is not *None*, the settings for\n *vmin* and *vmax* will be ignored.\n\n *alpha*: scalar\n The alpha blending value, between 0 (transparent) and 1 (opaque)\n\n *origin*: [ None | 'upper' | 'lower' ]\n Place the [0,0] index of the array in the upper left or lower left\n corner of the axes. If *None*, default to rc ``image.origin``.\n\n *extent*: [ None | scalars (left, right, bottom, top) ]\n Eata values of the axes. The default assigns zero-based row,\n column indices to the *x*, *y* centers of the pixels.\n\n *shape*: [ None | scalars (columns, rows) ]\n For raw buffer images\n\n *filternorm*:\n A parameter for the antigrain image resize filter. From the\n antigrain documentation, if *filternorm* = 1, the filter normalizes\n integer values and corrects the rounding errors. It doesn't do\n anything with the source floating point values, it corrects only\n integers according to the rule of 1.0 which means that any sum of\n pixel weights must be equal to 1.0. So, the filter function must\n produce a graph of the proper shape.\n\n *filterrad*:\n The filter radius for filters that have a radius\n parameter, i.e. when interpolation is one of: 'sinc',\n 'lanczos' or 'blackman'\n\n Additional kwargs are :class:`~matplotlib.artist.Artist` properties:\n\n %(Artist)s\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/image_demo.py\n \"\"\"\n\n if not self._hold: self.cla()\n\n if norm is not None: assert(isinstance(norm, mcolors.Normalize))\n if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))\n if aspect is None: aspect = rcParams['image.aspect']\n self.set_aspect(aspect)\n im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,\n filternorm=filternorm,\n filterrad=filterrad, resample=resample, **kwargs)\n\n im.set_data(X)\n im.set_alpha(alpha)\n self._set_artist_props(im)\n im.set_clip_path(self.patch)\n #if norm is None and shape is None:\n # im.set_clim(vmin, vmax)\n if vmin is not None or vmax is not None:\n im.set_clim(vmin, vmax)\n else:\n im.autoscale_None()\n im.set_url(url)\n\n xmin, xmax, ymin, ymax = im.get_extent()\n\n corners = (xmin, ymin), (xmax, ymax)\n self.update_datalim(corners)\n if self._autoscaleon:\n self.set_xlim((xmin, xmax))\n self.set_ylim((ymin, ymax))\n self.images.append(im)\n\n return im\n imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd\n\n\n def _pcolorargs(self, funcname, *args):\n if len(args)==1:\n C = args[0]\n numRows, numCols = C.shape\n X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )\n elif len(args)==3:\n X, Y, C = args\n else:\n raise TypeError(\n 'Illegal arguments to %s; see help(%s)' % (funcname, funcname))\n\n Nx = X.shape[-1]\n Ny = Y.shape[0]\n if len(X.shape) <> 2 or X.shape[0] == 1:\n x = X.reshape(1,Nx)\n X = x.repeat(Ny, axis=0)\n if len(Y.shape) <> 2 or Y.shape[1] == 1:\n y = Y.reshape(Ny, 1)\n Y = y.repeat(Nx, axis=1)\n if X.shape != Y.shape:\n raise TypeError(\n 'Incompatible X, Y inputs to %s; see help(%s)' % (\n funcname, funcname))\n return X, Y, C\n\n def pcolor(self, *args, **kwargs):\n \"\"\"\n call signatures::\n\n pcolor(C, **kwargs)\n pcolor(X, Y, C, **kwargs)\n\n Create a pseudocolor plot of a 2-D array.\n\n *C* is the array of color values.\n\n *X* and *Y*, if given, specify the (*x*, *y*) coordinates of\n the colored quadrilaterals; the quadrilateral for C[i,j] has\n corners at::\n\n (X[i, j], Y[i, j]),\n (X[i, j+1], Y[i, j+1]),\n (X[i+1, j], Y[i+1, j]),\n (X[i+1, j+1], Y[i+1, j+1]).\n\n Ideally the dimensions of *X* and *Y* should be one greater\n than those of *C*; if the dimensions are the same, then the\n last row and column of *C* will be ignored.\n\n Note that the the column index corresponds to the\n *x*-coordinate, and the row index corresponds to *y*; for\n details, see the :ref:`Grid Orientation\n ` section below.\n\n If either or both of *X* and *Y* are 1-D arrays or column vectors,\n they will be expanded as needed into the appropriate 2-D arrays,\n making a rectangular grid.\n\n *X*, *Y* and *C* may be masked arrays. If either C[i, j], or one\n of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],\n [i, j+1],[i+1, j+1]) is masked, nothing is plotted.\n\n Keyword arguments:\n\n *cmap*: [ None | Colormap ]\n A :class:`matplotlib.cm.Colormap` instance. If *None*, use\n rc settings.\n\n norm: [ None | Normalize ]\n An :class:`matplotlib.colors.Normalize` instance is used\n to scale luminance data to 0,1. If *None*, defaults to\n :func:`normalize`.\n\n *vmin*/*vmax*: [ None | scalar ]\n *vmin* and *vmax* are used in conjunction with *norm* to\n normalize luminance data. If either are *None*, the min\n and max of the color array *C* is used. If you pass a\n *norm* instance, *vmin* and *vmax* will be ignored.\n\n *shading*: [ 'flat' | 'faceted' ]\n If 'faceted', a black grid is drawn around each rectangle; if\n 'flat', edges are not drawn. Default is 'flat', contrary to\n Matlab(TM).\n\n This kwarg is deprecated; please use 'edgecolors' instead:\n * shading='flat' -- edgecolors='None'\n * shading='faceted -- edgecolors='k'\n\n *edgecolors*: [ None | 'None' | color | color sequence]\n If *None*, the rc setting is used by default.\n\n If 'None', edges will not be visible.\n\n An mpl color or sequence of colors will set the edge color\n\n *alpha*: 0 <= scalar <= 1\n the alpha blending value\n\n Return value is a :class:`matplotlib.collection.Collection`\n instance.\n\n .. _axes-pcolor-grid-orientation:\n\n The grid orientation follows the Matlab(TM) convention: an\n array *C* with shape (*nrows*, *ncolumns*) is plotted with\n the column number as *X* and the row number as *Y*, increasing\n up; hence it is plotted the way the array would be printed,\n except that the *Y* axis is reversed. That is, *C* is taken\n as *C*(*y*, *x*).\n\n Similarly for :func:`~matplotlib.pyplot.meshgrid`::\n\n x = np.arange(5)\n y = np.arange(3)\n X, Y = meshgrid(x,y)\n\n is equivalent to:\n\n X = array([[0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4]])\n\n Y = array([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2]])\n\n so if you have::\n\n C = rand( len(x), len(y))\n\n then you need::\n\n pcolor(X, Y, C.T)\n\n or::\n\n pcolor(C.T)\n\n Matlab :func:`pcolor` always discards the last row and column\n of *C*, but matplotlib displays the last row and column if *X* and\n *Y* are not specified, or if *X* and *Y* have one more row and\n column than *C*.\n\n kwargs can be used to control the\n :class:`~matplotlib.collection.PolyCollection` properties:\n\n %(PolyCollection)s\n \"\"\"\n\n if not self._hold: self.cla()\n\n alpha = kwargs.pop('alpha', 1.0)\n norm = kwargs.pop('norm', None)\n cmap = kwargs.pop('cmap', None)\n vmin = kwargs.pop('vmin', None)\n vmax = kwargs.pop('vmax', None)\n shading = kwargs.pop('shading', 'flat')\n\n X, Y, C = self._pcolorargs('pcolor', *args)\n Ny, Nx = X.shape\n\n # convert to MA, if necessary.\n C = ma.asarray(C)\n X = ma.asarray(X)\n Y = ma.asarray(Y)\n mask = ma.getmaskarray(X)+ma.getmaskarray(Y)\n xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]\n # don't plot if C or any of the surrounding vertices are masked.\n mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask\n\n newaxis = np.newaxis\n compress = np.compress\n\n ravelmask = (mask==0).ravel()\n X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())\n Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())\n X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())\n Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())\n X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())\n Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())\n X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())\n Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())\n npoly = len(X1)\n\n xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],\n X2[:,newaxis], Y2[:,newaxis],\n X3[:,newaxis], Y3[:,newaxis],\n X4[:,newaxis], Y4[:,newaxis],\n X1[:,newaxis], Y1[:,newaxis]),\n axis=1)\n verts = xy.reshape((npoly, 5, 2))\n\n #verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))\n\n C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())\n\n\n if shading == 'faceted':\n edgecolors = (0,0,0,1),\n linewidths = (0.25,)\n else:\n edgecolors = 'face'\n linewidths = (1.0,)\n kwargs.setdefault('edgecolors', edgecolors)\n kwargs.setdefault('antialiaseds', (0,))\n kwargs.setdefault('linewidths', linewidths)\n\n collection = mcoll.PolyCollection(verts, **kwargs)\n\n collection.set_alpha(alpha)\n collection.set_array(C)\n if norm is not None: assert(isinstance(norm, mcolors.Normalize))\n if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))\n collection.set_cmap(cmap)\n collection.set_norm(norm)\n if vmin is not None or vmax is not None:\n collection.set_clim(vmin, vmax)\n else:\n collection.autoscale_None()\n self.grid(False)\n\n x = X.compressed()\n y = Y.compressed()\n minx = np.amin(x)\n maxx = np.amax(x)\n miny = np.amin(y)\n maxy = np.amax(y)\n\n corners = (minx, miny), (maxx, maxy)\n self.update_datalim( corners)\n self.autoscale_view()\n self.add_collection(collection)\n return collection\n pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd\n\n def pcolormesh(self, *args, **kwargs):\n \"\"\"\n call signatures::\n\n pcolormesh(C)\n pcolormesh(X, Y, C)\n pcolormesh(C, **kwargs)\n\n *C* may be a masked array, but *X* and *Y* may not. Masked\n array support is implemented via *cmap* and *norm*; in\n contrast, :func:`~matplotlib.pyplot.pcolor` simply does not\n draw quadrilaterals with masked colors or vertices.\n\n Keyword arguments:\n\n *cmap*: [ None | Colormap ]\n A :class:`matplotlib.cm.Colormap` instance. If None, use\n rc settings.\n\n *norm*: [ None | Normalize ]\n A :class:`matplotlib.colors.Normalize` instance is used to\n scale luminance data to 0,1. If None, defaults to\n :func:`normalize`.\n\n *vmin*/*vmax*: [ None | scalar ]\n *vmin* and *vmax* are used in conjunction with *norm* to\n normalize luminance data. If either are *None*, the min\n and max of the color array *C* is used. If you pass a\n *norm* instance, *vmin* and *vmax* will be ignored.\n\n *shading*: [ 'flat' | 'faceted' ]\n If 'faceted', a black grid is drawn around each rectangle; if\n 'flat', edges are not drawn. Default is 'flat', contrary to\n Matlab(TM).\n\n This kwarg is deprecated; please use 'edgecolors' instead:\n * shading='flat' -- edgecolors='None'\n * shading='faceted -- edgecolors='k'\n\n *edgecolors*: [ None | 'None' | color | color sequence]\n If None, the rc setting is used by default.\n\n If 'None', edges will not be visible.\n\n An mpl color or sequence of colors will set the edge color\n\n *alpha*: 0 <= scalar <= 1\n the alpha blending value\n\n Return value is a :class:`matplotlib.collection.QuadMesh`\n object.\n\n kwargs can be used to control the\n :class:`matplotlib.collections.QuadMesh`\n properties:\n\n %(QuadMesh)s\n\n .. seealso::\n :func:`~matplotlib.pyplot.pcolor`:\n For an explanation of the grid orientation and the\n expansion of 1-D *X* and/or *Y* to 2-D arrays.\n \"\"\"\n if not self._hold: self.cla()\n\n alpha = kwargs.pop('alpha', 1.0)\n norm = kwargs.pop('norm', None)\n cmap = kwargs.pop('cmap', None)\n vmin = kwargs.pop('vmin', None)\n vmax = kwargs.pop('vmax', None)\n shading = kwargs.pop('shading', 'flat')\n edgecolors = kwargs.pop('edgecolors', 'None')\n antialiased = kwargs.pop('antialiased', False)\n\n X, Y, C = self._pcolorargs('pcolormesh', *args)\n Ny, Nx = X.shape\n\n # convert to one dimensional arrays\n C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at\n # lower left corner\n X = X.ravel()\n Y = Y.ravel()\n\n coords = np.zeros(((Nx * Ny), 2), dtype=float)\n coords[:, 0] = X\n coords[:, 1] = Y\n\n if shading == 'faceted' or edgecolors != 'None':\n showedges = 1\n else:\n showedges = 0\n\n collection = mcoll.QuadMesh(\n Nx - 1, Ny - 1, coords, showedges,\n antialiased=antialiased) # kwargs are not used\n collection.set_alpha(alpha)\n collection.set_array(C)\n if norm is not None: assert(isinstance(norm, mcolors.Normalize))\n if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))\n collection.set_cmap(cmap)\n collection.set_norm(norm)\n if vmin is not None or vmax is not None:\n collection.set_clim(vmin, vmax)\n else:\n collection.autoscale_None()\n\n self.grid(False)\n\n minx = np.amin(X)\n maxx = np.amax(X)\n miny = np.amin(Y)\n maxy = np.amax(Y)\n\n corners = (minx, miny), (maxx, maxy)\n self.update_datalim( corners)\n self.autoscale_view()\n self.add_collection(collection)\n return collection\n pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd\n\n def pcolorfast(self, *args, **kwargs):\n \"\"\"\n pseudocolor plot of a 2-D array\n\n Experimental; this is a version of pcolor that\n does not draw lines, that provides the fastest\n possible rendering with the Agg backend, and that\n can handle any quadrilateral grid.\n\n Call signatures::\n\n pcolor(C, **kwargs)\n pcolor(xr, yr, C, **kwargs)\n pcolor(x, y, C, **kwargs)\n pcolor(X, Y, C, **kwargs)\n\n C is the 2D array of color values corresponding to quadrilateral\n cells. Let (nr, nc) be its shape. C may be a masked array.\n\n ``pcolor(C, **kwargs)`` is equivalent to\n ``pcolor([0,nc], [0,nr], C, **kwargs)``\n\n *xr*, *yr* specify the ranges of *x* and *y* corresponding to the\n rectangular region bounding *C*. If::\n\n xr = [x0, x1]\n\n and::\n\n yr = [y0,y1]\n\n then *x* goes from *x0* to *x1* as the second index of *C* goes\n from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of\n cell (0,0), and (*x1*, *y1*) is the outermost corner of cell\n (*nr*-1, *nc*-1). All cells are rectangles of the same size.\n This is the fastest version.\n\n *x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,\n giving the x and y boundaries of the cells. Hence the cells are\n rectangular but the grid may be nonuniform. The speed is\n intermediate. (The grid is checked, and if found to be\n uniform the fast version is used.)\n\n *X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify\n the (x,y) coordinates of the corners of the colored\n quadrilaterals; the quadrilateral for C[i,j] has corners at\n (X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),\n (X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.\n This is the most general, but the slowest to render. It may\n produce faster and more compact output using ps, pdf, and\n svg backends, however.\n\n Note that the the column index corresponds to the x-coordinate,\n and the row index corresponds to y; for details, see\n the \"Grid Orientation\" section below.\n\n Optional keyword arguments:\n\n *cmap*: [ None | Colormap ]\n A cm Colormap instance from cm. If None, use rc settings.\n *norm*: [ None | Normalize ]\n An mcolors.Normalize instance is used to scale luminance data to\n 0,1. If None, defaults to normalize()\n *vmin*/*vmax*: [ None | scalar ]\n *vmin* and *vmax* are used in conjunction with norm to normalize\n luminance data. If either are *None*, the min and max of the color\n array *C* is used. If you pass a norm instance, *vmin* and *vmax*\n will be *None*.\n *alpha*: 0 <= scalar <= 1\n the alpha blending value\n\n Return value is an image if a regular or rectangular grid\n is specified, and a QuadMesh collection in the general\n quadrilateral case.\n\n \"\"\"\n\n if not self._hold: self.cla()\n\n alpha = kwargs.pop('alpha', 1.0)\n norm = kwargs.pop('norm', None)\n cmap = kwargs.pop('cmap', None)\n vmin = kwargs.pop('vmin', None)\n vmax = kwargs.pop('vmax', None)\n if norm is not None: assert(isinstance(norm, mcolors.Normalize))\n if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))\n\n C = args[-1]\n nr, nc = C.shape\n if len(args) == 1:\n style = \"image\"\n x = [0, nc]\n y = [0, nr]\n elif len(args) == 3:\n x, y = args[:2]\n x = np.asarray(x)\n y = np.asarray(y)\n if x.ndim == 1 and y.ndim == 1:\n if x.size == 2 and y.size == 2:\n style = \"image\"\n else:\n dx = np.diff(x)\n dy = np.diff(y)\n if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and\n np.ptp(dy) < 0.01*np.abs(dy.mean())):\n style = \"image\"\n else:\n style = \"pcolorimage\"\n elif x.ndim == 2 and y.ndim == 2:\n style = \"quadmesh\"\n else:\n raise TypeError(\"arguments do not match valid signatures\")\n else:\n raise TypeError(\"need 1 argument or 3 arguments\")\n\n if style == \"quadmesh\":\n\n # convert to one dimensional arrays\n # This should also be moved to the QuadMesh class\n C = ma.ravel(C) # data point in each cell is value\n # at lower left corner\n X = x.ravel()\n Y = y.ravel()\n Nx = nc+1\n Ny = nr+1\n\n # The following needs to be cleaned up; the renderer\n # requires separate contiguous arrays for X and Y,\n # but the QuadMesh class requires the 2D array.\n coords = np.empty(((Nx * Ny), 2), np.float64)\n coords[:, 0] = X\n coords[:, 1] = Y\n\n # The QuadMesh class can also be changed to\n # handle relevant superclass kwargs; the initializer\n # should do much more than it does now.\n collection = mcoll.QuadMesh(nc, nr, coords, 0)\n collection.set_alpha(alpha)\n collection.set_array(C)\n collection.set_cmap(cmap)\n collection.set_norm(norm)\n self.add_collection(collection)\n xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()\n ret = collection\n\n else:\n # One of the image styles:\n xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]\n if style == \"image\":\n\n im = mimage.AxesImage(self, cmap, norm,\n interpolation='nearest',\n origin='lower',\n extent=(xl, xr, yb, yt),\n **kwargs)\n im.set_data(C)\n im.set_alpha(alpha)\n self.images.append(im)\n ret = im\n\n if style == \"pcolorimage\":\n im = mimage.PcolorImage(self, x, y, C,\n cmap=cmap,\n norm=norm,\n alpha=alpha,\n **kwargs)\n self.images.append(im)\n ret = im\n\n self._set_artist_props(ret)\n if vmin is not None or vmax is not None:\n ret.set_clim(vmin, vmax)\n else:\n ret.autoscale_None()\n self.update_datalim(np.array([[xl, yb], [xr, yt]]))\n self.autoscale_view(tight=True)\n return ret\n\n def contour(self, *args, **kwargs):\n if not self._hold: self.cla()\n kwargs['filled'] = False\n return mcontour.ContourSet(self, *args, **kwargs)\n contour.__doc__ = mcontour.ContourSet.contour_doc\n\n def contourf(self, *args, **kwargs):\n if not self._hold: self.cla()\n kwargs['filled'] = True\n return mcontour.ContourSet(self, *args, **kwargs)\n contourf.__doc__ = mcontour.ContourSet.contour_doc\n\n def clabel(self, CS, *args, **kwargs):\n return CS.clabel(*args, **kwargs)\n clabel.__doc__ = mcontour.ContourSet.clabel.__doc__\n\n def table(self, **kwargs):\n \"\"\"\n call signature::\n\n table(cellText=None, cellColours=None,\n cellLoc='right', colWidths=None,\n rowLabels=None, rowColours=None, rowLoc='left',\n colLabels=None, colColours=None, colLoc='center',\n loc='bottom', bbox=None):\n\n Add a table to the current axes. Returns a\n :class:`matplotlib.table.Table` instance. For finer grained\n control over tables, use the :class:`~matplotlib.table.Table`\n class and add it to the axes with\n :meth:`~matplotlib.axes.Axes.add_table`.\n\n Thanks to John Gill for providing the class and table.\n\n kwargs control the :class:`~matplotlib.table.Table`\n properties:\n\n %(Table)s\n \"\"\"\n return mtable.table(self, **kwargs)\n table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd\n\n def twinx(self):\n \"\"\"\n call signature::\n\n ax = twinx()\n\n create a twin of Axes for generating a plot with a sharex\n x-axis but independent y axis. The y-axis of self will have\n ticks on left and the returned axes will have ticks on the\n right\n \"\"\"\n\n ax2 = self.figure.add_axes(self.get_position(True), sharex=self,\n frameon=False)\n ax2.yaxis.tick_right()\n ax2.yaxis.set_label_position('right')\n self.yaxis.tick_left()\n return ax2\n\n def twiny(self):\n \"\"\"\n call signature::\n\n ax = twiny()\n\n create a twin of Axes for generating a plot with a shared\n y-axis but independent x axis. The x-axis of self will have\n ticks on bottom and the returned axes will have ticks on the\n top\n \"\"\"\n\n ax2 = self.figure.add_axes(self.get_position(True), sharey=self,\n frameon=False)\n ax2.xaxis.tick_top()\n ax2.xaxis.set_label_position('top')\n self.xaxis.tick_bottom()\n return ax2\n\n def get_shared_x_axes(self):\n 'Return a copy of the shared axes Grouper object for x axes'\n return self._shared_x_axes\n\n def get_shared_y_axes(self):\n 'Return a copy of the shared axes Grouper object for y axes'\n return self._shared_y_axes\n\n #### Data analysis\n\n def hist(self, x, bins=10, range=None, normed=False, cumulative=False,\n bottom=None, histtype='bar', align='mid',\n orientation='vertical', rwidth=None, log=False, **kwargs):\n \"\"\"\n call signature::\n\n hist(x, bins=10, range=None, normed=False, cumulative=False,\n bottom=None, histtype='bar', align='mid',\n orientation='vertical', rwidth=None, log=False, **kwargs)\n\n Compute and draw the histogram of *x*. The return value is a\n tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,\n [*patches0*, *patches1*,...]) if the input contains multiple\n data.\n\n Keyword arguments:\n\n *bins*:\n Either an integer number of bins or a sequence giving the\n bins. *x* are the data to be binned. *x* can be an array,\n a 2D array with multiple data in its columns, or a list of\n arrays with data of different length. Note, if *bins*\n is an integer input argument=numbins, *bins* + 1 bin edges\n will be returned, compatible with the semantics of\n :func:`numpy.histogram` with the *new* = True argument.\n Unequally spaced bins are supported if *bins* is a sequence.\n\n *range*:\n The lower and upper range of the bins. Lower and upper outliers\n are ignored. If not provided, *range* is (x.min(), x.max()).\n Range has no effect if *bins* is a sequence.\n\n If *bins* is a sequence or *range* is specified, autoscaling is\n set off (*autoscale_on* is set to *False*) and the xaxis limits\n are set to encompass the full specified bin range.\n\n *normed*:\n If *True*, the first element of the return tuple will\n be the counts normalized to form a probability density, i.e.,\n ``n/(len(x)*dbin)``. In a probability density, the integral of\n the histogram should be 1; you can verify that with a\n trapezoidal integration of the probability density function::\n\n pdf, bins, patches = ax.hist(...)\n print np.sum(pdf * np.diff(bins))\n\n *cumulative*:\n If *True*, then a histogram is computed where each bin\n gives the counts in that bin plus all bins for smaller values.\n The last bin gives the total number of datapoints. If *normed*\n is also *True* then the histogram is normalized such that the\n last bin equals 1. If *cumulative* evaluates to less than 0\n (e.g. -1), the direction of accumulation is reversed. In this\n case, if *normed* is also *True*, then the histogram is normalized\n such that the first bin equals 1.\n\n *histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]\n The type of histogram to draw.\n\n - 'bar' is a traditional bar-type histogram. If multiple data\n are given the bars are aranged side by side.\n\n - 'barstacked' is a bar-type histogram where multiple\n data are stacked on top of each other.\n\n - 'step' generates a lineplot that is by default\n unfilled.\n\n - 'stepfilled' generates a lineplot that is by default\n filled.\n\n *align*: ['left' | 'mid' | 'right' ]\n Controls how the histogram is plotted.\n\n - 'left': bars are centered on the left bin edges.\n\n - 'mid': bars are centered between the bin edges.\n\n - 'right': bars are centered on the right bin edges.\n\n *orientation*: [ 'horizontal' | 'vertical' ]\n If 'horizontal', :func:`~matplotlib.pyplot.barh` will be\n used for bar-type histograms and the *bottom* kwarg will be\n the left edges.\n\n *rwidth*:\n The relative width of the bars as a fraction of the bin\n width. If *None*, automatically compute the width. Ignored\n if *histtype* = 'step' or 'stepfilled'.\n\n *log*:\n If *True*, the histogram axis will be set to a log scale.\n If *log* is *True* and *x* is a 1D array, empty bins will\n be filtered out and only the non-empty (*n*, *bins*,\n *patches*) will be returned.\n\n kwargs are used to update the properties of the hist\n :class:`~matplotlib.patches.Rectangle` instances:\n\n %(Rectangle)s\n\n You can use labels for your histogram, and only the first\n :class:`~matplotlib.patches.Rectangle` gets the label (the\n others get the magic string '_nolegend_'. This will make the\n histograms work in the intuitive way for bar charts::\n\n ax.hist(10+2*np.random.randn(1000), label='men')\n ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)\n ax.legend()\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/histogram_demo.py\n \"\"\"\n if not self._hold: self.cla()\n\n # NOTE: the range keyword overwrites the built-in func range !!!\n # needs to be fixed in with numpy !!!\n\n if kwargs.get('width') is not None:\n raise DeprecationWarning(\n 'hist now uses the rwidth to give relative width '\n 'and not absolute width')\n\n try:\n # make sure a copy is created: don't use asarray\n x = np.transpose(np.array(x))\n if len(x.shape)==1:\n x.shape = (1,x.shape[0])\n elif len(x.shape)==2 and x.shape[1]1: dr = 0.8\n else: dr = 1.0\n\n if histtype=='bar':\n width = dr*totwidth/len(n)\n dw = width\n\n if len(n)>1:\n boffset = -0.5*dr*totwidth*(1.-1./len(n))\n else:\n boffset = 0.0\n elif histtype=='barstacked':\n width = dr*totwidth\n boffset, dw = 0.0, 0.0\n\n stacked = True\n else:\n raise ValueError, 'invalid histtype: %s' % histtype\n\n if align == 'mid' or align == 'edge':\n boffset += 0.5*totwidth\n elif align == 'right':\n boffset += totwidth\n elif align != 'left' and align != 'center':\n raise ValueError, 'invalid align: %s' % align\n\n if orientation == 'horizontal':\n for m in n:\n color = self._get_lines._get_next_cycle_color()\n patch = self.barh(bins[:-1]+boffset, m, height=width,\n left=bottom, align='center', log=log,\n color=color)\n patches.append(patch)\n if stacked:\n if bottom is None: bottom = 0.0\n bottom += m\n boffset += dw\n elif orientation == 'vertical':\n for m in n:\n color = self._get_lines._get_next_cycle_color()\n patch = self.bar(bins[:-1]+boffset, m, width=width,\n bottom=bottom, align='center', log=log,\n color=color)\n patches.append(patch)\n if stacked:\n if bottom is None: bottom = 0.0\n bottom += m\n boffset += dw\n else:\n raise ValueError, 'invalid orientation: %s' % orientation\n\n elif histtype.startswith('step'):\n x = np.zeros( 2*len(bins), np.float )\n y = np.zeros( 2*len(bins), np.float )\n\n x[0::2], x[1::2] = bins, bins\n\n if align == 'left' or align == 'center':\n x -= 0.5*(bins[1]-bins[0])\n elif align == 'right':\n x += 0.5*(bins[1]-bins[0])\n elif align != 'mid' and align != 'edge':\n raise ValueError, 'invalid align: %s' % align\n\n if log:\n y[0],y[-1] = 1e-100, 1e-100\n if orientation == 'horizontal':\n self.set_xscale('log')\n elif orientation == 'vertical':\n self.set_yscale('log')\n\n fill = False\n if histtype == 'stepfilled':\n fill = True\n elif histtype != 'step':\n raise ValueError, 'invalid histtype: %s' % histtype\n\n for m in n:\n y[1:-1:2], y[2::2] = m, m\n if orientation == 'horizontal':\n x,y = y,x\n elif orientation != 'vertical':\n raise ValueError, 'invalid orientation: %s' % orientation\n\n color = self._get_lines._get_next_cycle_color()\n if fill:\n patches.append( self.fill(x, y,\n closed=False, facecolor=color) )\n else:\n patches.append( self.fill(x, y,\n closed=False, edgecolor=color, fill=False) )\n\n # adopted from adjust_x/ylim part of the bar method\n if orientation == 'horizontal':\n xmin, xmax = 0, self.dataLim.intervalx[1]\n for m in n:\n xmin = np.amin(m[m!=0]) # filter out the 0 height bins\n xmin = max(xmin*0.9, 1e-100)\n self.dataLim.intervalx = (xmin, xmax)\n elif orientation == 'vertical':\n ymin, ymax = 0, self.dataLim.intervaly[1]\n for m in n:\n ymin = np.amin(m[m!=0]) # filter out the 0 height bins\n ymin = max(ymin*0.9, 1e-100)\n self.dataLim.intervaly = (ymin, ymax)\n self.autoscale_view()\n\n else:\n raise ValueError, 'invalid histtype: %s' % histtype\n\n label = kwargs.pop('label', '')\n\n for patch in patches:\n for p in patch:\n p.update(kwargs)\n p.set_label(label)\n label = '_nolegend_'\n\n if binsgiven:\n self.set_autoscale_on(False)\n if orientation == 'vertical':\n self.autoscale_view(scalex=False, scaley=True)\n XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])\n self.set_xbound(XL)\n else:\n self.autoscale_view(scalex=True, scaley=False)\n YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])\n self.set_ybound(YL)\n\n if len(n)==1:\n return n[0], bins, cbook.silent_list('Patch', patches[0])\n else:\n return n, bins, cbook.silent_list('Lists of Patches', patches)\n hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd\n\n def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs):\n \"\"\"\n call signature::\n\n psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs)\n\n The power spectral density by Welch's average periodogram\n method. The vector *x* is divided into *NFFT* length\n segments. Each segment is detrended by function *detrend* and\n windowed by function *window*. *noverlap* gives the length of\n the overlap between segments. The :math:`|\\mathrm{fft}(i)|^2`\n of each segment :math:`i` are averaged to compute *Pxx*, with a\n scaling to correct for power loss due to windowing. *Fs* is the\n sampling frequency.\n\n %(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the x extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\n Returns the tuple (*Pxx*, *freqs*).\n\n For plotting, the power is plotted as\n :math:`10\\log_{10}(P_{xx})` for decibels, though *Pxx* itself\n is returned.\n\n References:\n Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)\n\n kwargs control the :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/psd_demo.py\n \"\"\"\n if not self._hold: self.cla()\n pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,\n sides, scale_by_freq)\n pxx.shape = len(freqs),\n freqs += Fc\n\n if scale_by_freq in (None, True):\n psd_units = 'dB/Hz'\n else:\n psd_units = 'dB'\n\n self.plot(freqs, 10*np.log10(pxx), **kwargs)\n self.set_xlabel('Frequency')\n self.set_ylabel('Power Spectral Density (%s)' % psd_units)\n self.grid(True)\n vmin, vmax = self.viewLim.intervaly\n intv = vmax-vmin\n logi = int(np.log10(intv))\n if logi==0: logi=.1\n step = 10*logi\n #print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1\n ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)\n self.set_yticks(ticks)\n\n return pxx, freqs\n\n psd_doc_dict = dict()\n psd_doc_dict.update(martist.kwdocd)\n psd_doc_dict.update(mlab.kwdocd)\n psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])\n psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict\n\n def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs):\n \"\"\"\n call signature::\n\n csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs)\n\n The cross spectral density :math:`P_{xy}` by Welch's average\n periodogram method. The vectors *x* and *y* are divided into\n *NFFT* length segments. Each segment is detrended by function\n *detrend* and windowed by function *window*. The product of\n the direct FFTs of *x* and *y* are averaged over each segment\n to compute :math:`P_{xy}`, with a scaling to correct for power\n loss due to windowing.\n\n Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum\n (complex valued), and :math:`10\\log_{10}|P_{xy}|` is\n plotted.\n\n %(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the x extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\n References:\n Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)\n\n kwargs control the Line2D properties:\n\n %(Line2D)s\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/csd_demo.py\n\n .. seealso:\n :meth:`psd`\n For a description of the optional parameters.\n \"\"\"\n if not self._hold: self.cla()\n pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,\n pad_to, sides, scale_by_freq)\n pxy.shape = len(freqs),\n # pxy is complex\n freqs += Fc\n\n self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)\n self.set_xlabel('Frequency')\n self.set_ylabel('Cross Spectrum Magnitude (dB)')\n self.grid(True)\n vmin, vmax = self.viewLim.intervaly\n\n intv = vmax-vmin\n step = 10*int(np.log10(intv))\n\n ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)\n self.set_yticks(ticks)\n\n return pxy, freqs\n csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict\n\n def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs):\n \"\"\"\n call signature::\n\n cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,\n window = mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs)\n\n cohere the coherence between *x* and *y*. Coherence is the normalized\n cross spectral density:\n\n .. math::\n\n C_{xy} = \\\\frac{|P_{xy}|^2}{P_{xx}P_{yy}}\n\n %(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the x extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\n The return value is a tuple (*Cxy*, *f*), where *f* are the\n frequencies of the coherence vector.\n\n kwargs are applied to the lines.\n\n References:\n\n * Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)\n\n kwargs control the :class:`~matplotlib.lines.Line2D`\n properties of the coherence plot:\n\n %(Line2D)s\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/cohere_demo.py\n \"\"\"\n if not self._hold: self.cla()\n cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,\n scale_by_freq)\n freqs += Fc\n\n self.plot(freqs, cxy, **kwargs)\n self.set_xlabel('Frequency')\n self.set_ylabel('Coherence')\n self.grid(True)\n\n return cxy, freqs\n cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict\n\n def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=128,\n cmap=None, xextent=None, pad_to=None, sides='default',\n scale_by_freq=None):\n \"\"\"\n call signature::\n\n specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=128,\n cmap=None, xextent=None, pad_to=None, sides='default',\n scale_by_freq=None)\n\n Compute a spectrogram of data in *x*. Data are split into\n *NFFT* length segments and the PSD of each section is\n computed. The windowing function *window* is applied to each\n segment, and the amount of overlap of each segment is\n specified with *noverlap*.\n\n %(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the y extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\n *cmap*:\n A :class:`matplotlib.cm.Colormap` instance; if *None* use\n default determined by rc\n\n *xextent*:\n The image extent along the x-axis. xextent = (xmin,xmax)\n The default is (0,max(bins)), where bins is the return\n value from :func:`mlab.specgram`\n\n Return value is (*Pxx*, *freqs*, *bins*, *im*):\n\n - *bins* are the time points the spectrogram is calculated over\n - *freqs* is an array of frequencies\n - *Pxx* is a len(times) x len(freqs) array of power\n - *im* is a :class:`matplotlib.image.AxesImage` instance\n\n Note: If *x* is real (i.e. non-complex), only the positive\n spectrum is shown. If *x* is complex, both positive and\n negative parts of the spectrum are shown. This can be\n overridden using the *sides* keyword argument.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/specgram_demo.py\n \"\"\"\n if not self._hold: self.cla()\n\n Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,\n window, noverlap, pad_to, sides, scale_by_freq)\n\n Z = 10. * np.log10(Pxx)\n Z = np.flipud(Z)\n\n if xextent is None: xextent = 0, np.amax(bins)\n xmin, xmax = xextent\n freqs += Fc\n extent = xmin, xmax, freqs[0], freqs[-1]\n im = self.imshow(Z, cmap, extent=extent)\n self.axis('auto')\n\n return Pxx, freqs, bins, im\n specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict\n del psd_doc_dict #So that this does not become an Axes attribute\n\n def spy(self, Z, precision=0, marker=None, markersize=None,\n aspect='equal', **kwargs):\n \"\"\"\n call signature::\n\n spy(Z, precision=0, marker=None, markersize=None,\n aspect='equal', **kwargs)\n\n ``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.\n\n If *precision* is 0, any non-zero value will be plotted;\n else, values of :math:`|Z| > precision` will be plotted.\n\n For :class:`scipy.sparse.spmatrix` instances, there is a\n special case: if *precision* is 'present', any value present in\n the array will be plotted, even if it is identically zero.\n\n The array will be plotted as it would be printed, with\n the first index (row) increasing down and the second\n index (column) increasing to the right.\n\n By default aspect is 'equal', so that each array element\n occupies a square space; set the aspect kwarg to 'auto'\n to allow the plot to fill the plot box, or to any scalar\n number to specify the aspect ratio of an array element\n directly.\n\n Two plotting styles are available: image or marker. Both\n are available for full arrays, but only the marker style\n works for :class:`scipy.sparse.spmatrix` instances.\n\n If *marker* and *markersize* are *None*, an image will be\n returned and any remaining kwargs are passed to\n :func:`~matplotlib.pyplot.imshow`; else, a\n :class:`~matplotlib.lines.Line2D` object will be returned with\n the value of marker determining the marker type, and any\n remaining kwargs passed to the\n :meth:`~matplotlib.axes.Axes.plot` method.\n\n If *marker* and *markersize* are *None*, useful kwargs include:\n\n * *cmap*\n * *alpha*\n\n .. seealso::\n :func:`~matplotlib.pyplot.imshow`\n\n For controlling colors, e.g. cyan background and red marks,\n use::\n\n cmap = mcolors.ListedColormap(['c','r'])\n\n If *marker* or *markersize* is not *None*, useful kwargs include:\n\n * *marker*\n * *markersize*\n * *color*\n\n Useful values for *marker* include:\n\n * 's' square (default)\n * 'o' circle\n * '.' point\n * ',' pixel\n\n .. seealso::\n :func:`~matplotlib.pyplot.plot`\n \"\"\"\n if precision is None:\n precision = 0\n warnings.DeprecationWarning(\"Use precision=0 instead of None\")\n # 2008/10/03\n if marker is None and markersize is None and hasattr(Z, 'tocoo'):\n marker = 's'\n if marker is None and markersize is None:\n Z = np.asarray(Z)\n mask = np.absolute(Z)>precision\n\n if 'cmap' not in kwargs:\n kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],\n name='binary')\n nr, nc = Z.shape\n extent = [-0.5, nc-0.5, nr-0.5, -0.5]\n ret = self.imshow(mask, interpolation='nearest', aspect=aspect,\n extent=extent, origin='upper', **kwargs)\n else:\n if hasattr(Z, 'tocoo'):\n c = Z.tocoo()\n if precision == 'present':\n y = c.row\n x = c.col\n else:\n nonzero = np.absolute(c.data) > precision\n y = c.row[nonzero]\n x = c.col[nonzero]\n else:\n Z = np.asarray(Z)\n nonzero = np.absolute(Z)>precision\n y, x = np.nonzero(nonzero)\n if marker is None: marker = 's'\n if markersize is None: markersize = 10\n marks = mlines.Line2D(x, y, linestyle='None',\n marker=marker, markersize=markersize, **kwargs)\n self.add_line(marks)\n nr, nc = Z.shape\n self.set_xlim(xmin=-0.5, xmax=nc-0.5)\n self.set_ylim(ymin=nr-0.5, ymax=-0.5)\n self.set_aspect(aspect)\n ret = marks\n self.title.set_y(1.05)\n self.xaxis.tick_top()\n self.xaxis.set_ticks_position('both')\n self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,\n steps=[1, 2, 5, 10],\n integer=True))\n self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,\n steps=[1, 2, 5, 10],\n integer=True))\n return ret\n\n def matshow(self, Z, **kwargs):\n '''\n Plot a matrix or array as an image.\n\n The matrix will be shown the way it would be printed,\n with the first row at the top. Row and column numbering\n is zero-based.\n\n Argument:\n *Z* anything that can be interpreted as a 2-D array\n\n kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.\n :meth:`matshow` sets defaults for *extent*, *origin*,\n *interpolation*, and *aspect*; use care in overriding the\n *extent* and *origin* kwargs, because they interact. (Also,\n if you want to change them, you probably should be using\n imshow directly in your own version of matshow.)\n\n Returns: an :class:`matplotlib.image.AxesImage` instance.\n '''\n Z = np.asarray(Z)\n nr, nc = Z.shape\n extent = [-0.5, nc-0.5, nr-0.5, -0.5]\n kw = {'extent': extent,\n 'origin': 'upper',\n 'interpolation': 'nearest',\n 'aspect': 'equal'} # (already the imshow default)\n kw.update(kwargs)\n im = self.imshow(Z, **kw)\n self.title.set_y(1.05)\n self.xaxis.tick_top()\n self.xaxis.set_ticks_position('both')\n self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,\n steps=[1, 2, 5, 10],\n integer=True))\n self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,\n steps=[1, 2, 5, 10],\n integer=True))\n return im\n\nclass SubplotBase:\n \"\"\"\n Base class for subplots, which are :class:`Axes` instances with\n additional methods to facilitate generating and manipulating a set\n of :class:`Axes` within a figure.\n \"\"\"\n\n def __init__(self, fig, *args, **kwargs):\n \"\"\"\n *fig* is a :class:`matplotlib.figure.Figure` instance.\n\n *args* is the tuple (*numRows*, *numCols*, *plotNum*), where\n the array of subplots in the figure has dimensions *numRows*,\n *numCols*, and where *plotNum* is the number of the subplot\n being created. *plotNum* starts at 1 in the upper left\n corner and increases to the right.\n\n If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the\n decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.\n \"\"\"\n\n self.figure = fig\n\n if len(args)==1:\n s = str(args[0])\n if len(s) != 3:\n raise ValueError('Argument to subplot must be a 3 digits long')\n rows, cols, num = map(int, s)\n elif len(args)==3:\n rows, cols, num = args\n else:\n raise ValueError( 'Illegal argument to subplot')\n\n\n total = rows*cols\n num -= 1 # convert from matlab to python indexing\n # ie num in range(0,total)\n if num >= total:\n raise ValueError( 'Subplot number exceeds total subplots')\n self._rows = rows\n self._cols = cols\n self._num = num\n\n self.update_params()\n\n # _axes_class is set in the subplot_class_factory\n self._axes_class.__init__(self, fig, self.figbox, **kwargs)\n\n def get_geometry(self):\n 'get the subplot geometry, eg 2,2,3'\n return self._rows, self._cols, self._num+1\n\n # COVERAGE NOTE: Never used internally or from examples\n def change_geometry(self, numrows, numcols, num):\n 'change subplot geometry, eg. from 1,1,1 to 2,2,3'\n self._rows = numrows\n self._cols = numcols\n self._num = num-1\n self.update_params()\n self.set_position(self.figbox)\n\n def update_params(self):\n 'update the subplot position from fig.subplotpars'\n\n rows = self._rows\n cols = self._cols\n num = self._num\n\n pars = self.figure.subplotpars\n left = pars.left\n right = pars.right\n bottom = pars.bottom\n top = pars.top\n wspace = pars.wspace\n hspace = pars.hspace\n totWidth = right-left\n totHeight = top-bottom\n\n figH = totHeight/(rows + hspace*(rows-1))\n sepH = hspace*figH\n\n figW = totWidth/(cols + wspace*(cols-1))\n sepW = wspace*figW\n\n rowNum, colNum = divmod(num, cols)\n\n figBottom = top - (rowNum+1)*figH - rowNum*sepH\n figLeft = left + colNum*(figW + sepW)\n\n self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,\n figW, figH)\n self.rowNum = rowNum\n self.colNum = colNum\n self.numRows = rows\n self.numCols = cols\n\n if 0:\n print 'rcn', rows, cols, num\n print 'lbrt', left, bottom, right, top\n print 'self.figBottom', self.figBottom\n print 'self.figLeft', self.figLeft\n print 'self.figW', self.figW\n print 'self.figH', self.figH\n print 'self.rowNum', self.rowNum\n print 'self.colNum', self.colNum\n print 'self.numRows', self.numRows\n print 'self.numCols', self.numCols\n\n\n def is_first_col(self):\n return self.colNum==0\n\n def is_first_row(self):\n return self.rowNum==0\n\n def is_last_row(self):\n return self.rowNum==self.numRows-1\n\n\n def is_last_col(self):\n return self.colNum==self.numCols-1\n\n # COVERAGE NOTE: Never used internally or from examples\n def label_outer(self):\n \"\"\"\n set the visible property on ticklabels so xticklabels are\n visible only if the subplot is in the last row and yticklabels\n are visible only if the subplot is in the first column\n \"\"\"\n lastrow = self.is_last_row()\n firstcol = self.is_first_col()\n for label in self.get_xticklabels():\n label.set_visible(lastrow)\n\n for label in self.get_yticklabels():\n label.set_visible(firstcol)\n\n_subplot_classes = {}\ndef subplot_class_factory(axes_class=None):\n # This makes a new class that inherits from SubclassBase and the\n # given axes_class (which is assumed to be a subclass of Axes).\n # This is perhaps a little bit roundabout to make a new class on\n # the fly like this, but it means that a new Subplot class does\n # not have to be created for every type of Axes.\n if axes_class is None:\n axes_class = Axes\n\n new_class = _subplot_classes.get(axes_class)\n if new_class is None:\n new_class = new.classobj(\"%sSubplot\" % (axes_class.__name__),\n (SubplotBase, axes_class),\n {'_axes_class': axes_class})\n _subplot_classes[axes_class] = new_class\n\n return new_class\n\n# This is provided for backward compatibility\nSubplot = subplot_class_factory()\n\nmartist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)\n\n\"\"\"\n# this is some discarded code I was using to find the minimum positive\n# data point for some log scaling fixes. I realized there was a\n# cleaner way to do it, but am keeping this around as an example for\n# how to get the data out of the axes. Might want to make something\n# like this a method one day, or better yet make get_verts an Artist\n# method\n\n minx, maxx = self.get_xlim()\n if minx<=0 or maxx<=0:\n # find the min pos value in the data\n xs = []\n for line in self.lines:\n xs.extend(line.get_xdata(orig=False))\n for patch in self.patches:\n xs.extend([x for x,y in patch.get_verts()])\n for collection in self.collections:\n xs.extend([x for x,y in collection.get_verts()])\n posx = [x for x in xs if x>0]\n if len(posx):\n\n minx = min(posx)\n maxx = max(posx)\n # warning, probably breaks inverted axis\n self.set_xlim((0.1*minx, maxx))\n\n\"\"\"\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":382696,"cells":{"repo_name":{"kind":"string","value":"nvoron23/scikit-learn"},"path":{"kind":"string","value":"doc/sphinxext/numpy_ext/docscrape_sphinx.py"},"copies":{"kind":"string","value":"408"},"size":{"kind":"string","value":"8061"},"content":{"kind":"string","value":"import re\nimport inspect\nimport textwrap\nimport pydoc\nfrom .docscrape import NumpyDocString\nfrom .docscrape import FunctionDoc\nfrom .docscrape import ClassDoc\n\n\nclass SphinxDocString(NumpyDocString):\n def __init__(self, docstring, config=None):\n config = {} if config is None else config\n self.use_plots = config.get('use_plots', False)\n NumpyDocString.__init__(self, docstring, config=config)\n\n # string conversion routines\n def _str_header(self, name, symbol='`'):\n return ['.. rubric:: ' + name, '']\n\n def _str_field_list(self, name):\n return [':' + name + ':']\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' ' * indent + line]\n return out\n\n def _str_signature(self):\n return ['']\n if self['Signature']:\n return ['``%s``' % self['Signature']] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n return self['Summary'] + ['']\n\n def _str_extended_summary(self):\n return self['Extended Summary'] + ['']\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_field_list(name)\n out += ['']\n for param, param_type, desc in self[name]:\n out += self._str_indent(['**%s** : %s' % (param.strip(),\n param_type)])\n out += ['']\n out += self._str_indent(desc, 8)\n out += ['']\n return out\n\n @property\n def _obj(self):\n if hasattr(self, '_cls'):\n return self._cls\n elif hasattr(self, '_f'):\n return self._f\n return None\n\n def _str_member_list(self, name):\n \"\"\"\n Generate a member listing, autosummary:: table where possible,\n and a table where not.\n\n \"\"\"\n out = []\n if self[name]:\n out += ['.. rubric:: %s' % name, '']\n prefix = getattr(self, '_name', '')\n\n if prefix:\n prefix = '~%s.' % prefix\n\n autosum = []\n others = []\n for param, param_type, desc in self[name]:\n param = param.strip()\n if not self._obj or hasattr(self._obj, param):\n autosum += [\" %s%s\" % (prefix, param)]\n else:\n others.append((param, param_type, desc))\n\n if autosum:\n # GAEL: Toctree commented out below because it creates\n # hundreds of sphinx warnings\n # out += ['.. autosummary::', ' :toctree:', '']\n out += ['.. autosummary::', '']\n out += autosum\n\n if others:\n maxlen_0 = max([len(x[0]) for x in others])\n maxlen_1 = max([len(x[1]) for x in others])\n hdr = \"=\" * maxlen_0 + \" \" + \"=\" * maxlen_1 + \" \" + \"=\" * 10\n fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)\n n_indent = maxlen_0 + maxlen_1 + 4\n out += [hdr]\n for param, param_type, desc in others:\n out += [fmt % (param.strip(), param_type)]\n out += self._str_indent(desc, n_indent)\n out += [hdr]\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += ['']\n content = textwrap.dedent(\"\\n\".join(self[name])).split(\"\\n\")\n out += content\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n out = []\n if self['See Also']:\n see_also = super(SphinxDocString, self)._str_see_also(func_role)\n out = ['.. seealso::', '']\n out += self._str_indent(see_also[2:])\n return out\n\n def _str_warnings(self):\n out = []\n if self['Warnings']:\n out = ['.. warning::', '']\n out += self._str_indent(self['Warnings'])\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n if len(idx) == 0:\n return out\n\n out += ['.. index:: %s' % idx.get('default', '')]\n for section, references in idx.iteritems():\n if section == 'default':\n continue\n elif section == 'refguide':\n out += [' single: %s' % (', '.join(references))]\n else:\n out += [' %s: %s' % (section, ','.join(references))]\n return out\n\n def _str_references(self):\n out = []\n if self['References']:\n out += self._str_header('References')\n if isinstance(self['References'], str):\n self['References'] = [self['References']]\n out.extend(self['References'])\n out += ['']\n # Latex collects all references to a separate bibliography,\n # so we need to insert links to it\n import sphinx # local import to avoid test dependency\n if sphinx.__version__ >= \"0.6\":\n out += ['.. only:: latex', '']\n else:\n out += ['.. latexonly::', '']\n items = []\n for line in self['References']:\n m = re.match(r'.. \\[([a-z0-9._-]+)\\]', line, re.I)\n if m:\n items.append(m.group(1))\n out += [' ' + \", \".join([\"[%s]_\" % item for item in items]), '']\n return out\n\n def _str_examples(self):\n examples_str = \"\\n\".join(self['Examples'])\n\n if (self.use_plots and 'import matplotlib' in examples_str\n and 'plot::' not in examples_str):\n out = []\n out += self._str_header('Examples')\n out += ['.. plot::', '']\n out += self._str_indent(self['Examples'])\n out += ['']\n return out\n else:\n return self._str_section('Examples')\n\n def __str__(self, indent=0, func_role=\"obj\"):\n out = []\n out += self._str_signature()\n out += self._str_index() + ['']\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):\n out += self._str_param_list(param_list)\n out += self._str_warnings()\n out += self._str_see_also(func_role)\n out += self._str_section('Notes')\n out += self._str_references()\n out += self._str_examples()\n for param_list in ('Methods',):\n out += self._str_member_list(param_list)\n out = self._str_indent(out, indent)\n return '\\n'.join(out)\n\n\nclass SphinxFunctionDoc(SphinxDocString, FunctionDoc):\n def __init__(self, obj, doc=None, config={}):\n self.use_plots = config.get('use_plots', False)\n FunctionDoc.__init__(self, obj, doc=doc, config=config)\n\n\nclass SphinxClassDoc(SphinxDocString, ClassDoc):\n def __init__(self, obj, doc=None, func_doc=None, config={}):\n self.use_plots = config.get('use_plots', False)\n ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)\n\n\nclass SphinxObjDoc(SphinxDocString):\n def __init__(self, obj, doc=None, config=None):\n self._f = obj\n SphinxDocString.__init__(self, doc, config=config)\n\n\ndef get_doc_object(obj, what=None, doc=None, config={}):\n if what is None:\n if inspect.isclass(obj):\n what = 'class'\n elif inspect.ismodule(obj):\n what = 'module'\n elif callable(obj):\n what = 'function'\n else:\n what = 'object'\n if what == 'class':\n return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,\n config=config)\n elif what in ('function', 'method'):\n return SphinxFunctionDoc(obj, doc=doc, config=config)\n else:\n if doc is None:\n doc = pydoc.getdoc(obj)\n return SphinxObjDoc(obj, doc, config=config)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382697,"cells":{"repo_name":{"kind":"string","value":"MJuddBooth/pandas"},"path":{"kind":"string","value":"pandas/tests/frame/test_rank.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"11364"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Series\nfrom pandas.tests.frame.common import TestData\nimport pandas.util.testing as tm\nfrom pandas.util.testing import assert_frame_equal\n\n\nclass TestRank(TestData):\n s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])\n df = DataFrame({'A': s, 'B': s})\n\n results = {\n 'average': np.array([1.5, 5.5, 7.0, 3.5, np.nan,\n 3.5, 1.5, 8.0, np.nan, 5.5]),\n 'min': np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),\n 'max': np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),\n 'first': np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),\n 'dense': np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),\n }\n\n @pytest.fixture(params=['average', 'min', 'max', 'first', 'dense'])\n def method(self, request):\n \"\"\"\n Fixture for trying all rank methods\n \"\"\"\n return request.param\n\n def test_rank(self):\n rankdata = pytest.importorskip('scipy.stats.rankdata')\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n ranks0 = self.frame.rank()\n ranks1 = self.frame.rank(1)\n mask = np.isnan(self.frame.values)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp0[mask] = np.nan\n\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n exp1[mask] = np.nan\n\n tm.assert_almost_equal(ranks0.values, exp0)\n tm.assert_almost_equal(ranks1.values, exp1)\n\n # integers\n df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))\n\n result = df.rank()\n exp = df.astype(float).rank()\n tm.assert_frame_equal(result, exp)\n\n result = df.rank(1)\n exp = df.astype(float).rank(1)\n tm.assert_frame_equal(result, exp)\n\n def test_rank2(self):\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0\n result = df.rank(1, pct=True)\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = df.rank(0) / 2.0\n result = df.rank(0, pct=True)\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])\n result = df.rank(1, numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])\n result = df.rank(0, numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]])\n result = df.rank(1, numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]])\n result = df.rank(0, numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n # f7u12, this does not work without extensive workaround\n data = [[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],\n [datetime(2000, 1, 2), datetime(2000, 1, 3),\n datetime(2000, 1, 1)]]\n df = DataFrame(data)\n\n # check the rank\n expected = DataFrame([[2., np.nan, 1.],\n [2., 3., 1.]])\n result = df.rank(1, numeric_only=False, ascending=True)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame([[1., np.nan, 2.],\n [2., 1., 3.]])\n result = df.rank(1, numeric_only=False, ascending=False)\n tm.assert_frame_equal(result, expected)\n\n # mixed-type frames\n self.mixed_frame['datetime'] = datetime.now()\n self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)\n\n result = self.mixed_frame.rank(1)\n expected = self.mixed_frame.rank(1, numeric_only=True)\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({\"a\": [1e-20, -5, 1e-20 + 1e-40, 10,\n 1e60, 1e80, 1e-30]})\n exp = DataFrame({\"a\": [3.5, 1., 3.5, 5., 6., 7., 2.]})\n tm.assert_frame_equal(df.rank(), exp)\n\n def test_rank_na_option(self):\n rankdata = pytest.importorskip('scipy.stats.rankdata')\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n # bottom\n ranks0 = self.frame.rank(na_option='bottom')\n ranks1 = self.frame.rank(1, na_option='bottom')\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n\n tm.assert_almost_equal(ranks0.values, exp0)\n tm.assert_almost_equal(ranks1.values, exp1)\n\n # top\n ranks0 = self.frame.rank(na_option='top')\n ranks1 = self.frame.rank(1, na_option='top')\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fval0)\n exp1 = np.apply_along_axis(rankdata, 1, fval1)\n\n tm.assert_almost_equal(ranks0.values, exp0)\n tm.assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # bottom\n ranks0 = self.frame.rank(na_option='top', ascending=False)\n ranks1 = self.frame.rank(1, na_option='top', ascending=False)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fvals)\n exp1 = np.apply_along_axis(rankdata, 1, -fvals)\n\n tm.assert_almost_equal(ranks0.values, exp0)\n tm.assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # top\n ranks0 = self.frame.rank(na_option='bottom', ascending=False)\n ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fval0)\n exp1 = np.apply_along_axis(rankdata, 1, -fval1)\n\n tm.assert_numpy_array_equal(ranks0.values, exp0)\n tm.assert_numpy_array_equal(ranks1.values, exp1)\n\n # bad values throw error\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n\n with pytest.raises(ValueError, match=msg):\n self.frame.rank(na_option='bad', ascending=False)\n\n # invalid type\n with pytest.raises(ValueError, match=msg):\n self.frame.rank(na_option=True, ascending=False)\n\n def test_rank_axis(self):\n # check if using axes' names gives the same result\n df = DataFrame([[2, 1], [4, 3]])\n tm.assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))\n tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))\n\n def test_rank_methods_frame(self):\n pytest.importorskip('scipy.stats.special')\n rankdata = pytest.importorskip('scipy.stats.rankdata')\n import scipy\n\n xs = np.random.randint(0, 21, (100, 26))\n xs = (xs - 10.0) / 10.0\n cols = [chr(ord('z') - i) for i in range(xs.shape[1])]\n\n for vals in [xs, xs + 1e6, xs * 1e-6]:\n df = DataFrame(vals, columns=cols)\n\n for ax in [0, 1]:\n for m in ['average', 'min', 'max', 'first', 'dense']:\n result = df.rank(axis=ax, method=m)\n sprank = np.apply_along_axis(\n rankdata, ax, vals,\n m if m != 'first' else 'ordinal')\n sprank = sprank.astype(np.float64)\n expected = DataFrame(sprank, columns=cols)\n\n if (LooseVersion(scipy.__version__) >=\n LooseVersion('0.17.0')):\n expected = expected.astype('float64')\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])\n def test_rank_descending(self, method, dtype):\n\n if 'i' in dtype:\n df = self.df.dropna()\n else:\n df = self.df.astype(dtype)\n\n res = df.rank(ascending=False)\n expected = (df.max() - df).rank()\n assert_frame_equal(res, expected)\n\n if method == 'first' and dtype == 'O':\n return\n\n expected = (df.max() - df).rank(method=method)\n\n if dtype != 'O':\n res2 = df.rank(method=method, ascending=False,\n numeric_only=True)\n assert_frame_equal(res2, expected)\n\n res3 = df.rank(method=method, ascending=False,\n numeric_only=False)\n assert_frame_equal(res3, expected)\n\n @pytest.mark.parametrize('axis', [0, 1])\n @pytest.mark.parametrize('dtype', [None, object])\n def test_rank_2d_tie_methods(self, method, axis, dtype):\n df = self.df\n\n def _check2d(df, expected, method='average', axis=0):\n exp_df = DataFrame({'A': expected, 'B': expected})\n\n if axis == 1:\n df = df.T\n exp_df = exp_df.T\n\n result = df.rank(method=method, axis=axis)\n assert_frame_equal(result, exp_df)\n\n disabled = {(object, 'first')}\n if (dtype, method) in disabled:\n return\n frame = df if dtype is None else df.astype(dtype)\n _check2d(frame, self.results[method], method=method, axis=axis)\n\n @pytest.mark.parametrize(\n \"method,exp\", [(\"dense\",\n [[1., 1., 1.],\n [1., 0.5, 2. / 3],\n [1., 0.5, 1. / 3]]),\n (\"min\",\n [[1. / 3, 1., 1.],\n [1. / 3, 1. / 3, 2. / 3],\n [1. / 3, 1. / 3, 1. / 3]]),\n (\"max\",\n [[1., 1., 1.],\n [1., 2. / 3, 2. / 3],\n [1., 2. / 3, 1. / 3]]),\n (\"average\",\n [[2. / 3, 1., 1.],\n [2. / 3, 0.5, 2. / 3],\n [2. / 3, 0.5, 1. / 3]]),\n (\"first\",\n [[1. / 3, 1., 1.],\n [2. / 3, 1. / 3, 2. / 3],\n [3. / 3, 2. / 3, 1. / 3]])])\n def test_rank_pct_true(self, method, exp):\n # see gh-15630.\n\n df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]])\n result = df.rank(method=method, pct=True)\n\n expected = DataFrame(exp)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.single\n @pytest.mark.high_memory\n def test_pct_max_many_rows(self):\n # GH 18271\n df = DataFrame({'A': np.arange(2**24 + 1),\n 'B': np.arange(2**24 + 1, 0, -1)})\n result = df.rank(pct=True).max()\n assert (result == 1).all()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382698,"cells":{"repo_name":{"kind":"string","value":"frank-tancf/scikit-learn"},"path":{"kind":"string","value":"sklearn/linear_model/tests/test_logistic.py"},"copies":{"kind":"string","value":"24"},"size":{"kind":"string","value":"39507"},"content":{"kind":"string","value":"import numpy as np\nimport scipy.sparse as sp\nfrom scipy import linalg, optimize, sparse\n\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_warns_message\nfrom sklearn.utils.testing import raises\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.utils import compute_class_weight\nfrom sklearn.utils.fixes import sp_version\n\nfrom sklearn.linear_model.logistic import (\n LogisticRegression,\n logistic_regression_path, LogisticRegressionCV,\n _logistic_loss_and_grad, _logistic_grad_hess,\n _multinomial_grad_hess, _logistic_loss,\n )\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.datasets import load_iris, make_classification\nfrom sklearn.metrics import log_loss\n\nX = [[-1, 0], [0, 1], [1, 1]]\nX_sp = sp.csr_matrix(X)\nY1 = [0, 1, 1]\nY2 = [2, 1, 0]\niris = load_iris()\n\n\ndef check_predictions(clf, X, y):\n \"\"\"Check that the model is able to fit the classification data\"\"\"\n n_samples = len(y)\n classes = np.unique(y)\n n_classes = classes.shape[0]\n\n predicted = clf.fit(X, y).predict(X)\n assert_array_equal(clf.classes_, classes)\n\n assert_equal(predicted.shape, (n_samples,))\n assert_array_equal(predicted, y)\n\n probabilities = clf.predict_proba(X)\n assert_equal(probabilities.shape, (n_samples, n_classes))\n assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))\n assert_array_equal(probabilities.argmax(axis=1), y)\n\n\ndef test_predict_2_classes():\n # Simple sanity check on a 2 classes dataset\n # Make sure it predicts the correct result on simple datasets.\n check_predictions(LogisticRegression(random_state=0), X, Y1)\n check_predictions(LogisticRegression(random_state=0), X_sp, Y1)\n\n check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)\n check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)\n\n check_predictions(LogisticRegression(fit_intercept=False,\n random_state=0), X, Y1)\n check_predictions(LogisticRegression(fit_intercept=False,\n random_state=0), X_sp, Y1)\n\n\ndef test_error():\n # Test for appropriate exception on errors\n msg = \"Penalty term must be positive\"\n assert_raise_message(ValueError, msg,\n LogisticRegression(C=-1).fit, X, Y1)\n assert_raise_message(ValueError, msg,\n LogisticRegression(C=\"test\").fit, X, Y1)\n\n for LR in [LogisticRegression, LogisticRegressionCV]:\n msg = \"Tolerance for stopping criteria must be positive\"\n assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)\n assert_raise_message(ValueError, msg, LR(tol=\"test\").fit, X, Y1)\n\n msg = \"Maximum number of iteration must be positive\"\n assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)\n assert_raise_message(ValueError, msg, LR(max_iter=\"test\").fit, X, Y1)\n\n\ndef test_predict_3_classes():\n check_predictions(LogisticRegression(C=10), X, Y2)\n check_predictions(LogisticRegression(C=10), X_sp, Y2)\n\n\ndef test_predict_iris():\n # Test logistic regression with the iris dataset\n n_samples, n_features = iris.data.shape\n\n target = iris.target_names[iris.target]\n\n # Test that both multinomial and OvR solvers handle\n # multiclass data correctly and give good accuracy\n # score (>0.95) for the training data.\n for clf in [LogisticRegression(C=len(iris.data)),\n LogisticRegression(C=len(iris.data), solver='lbfgs',\n multi_class='multinomial'),\n LogisticRegression(C=len(iris.data), solver='newton-cg',\n multi_class='multinomial'),\n LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,\n multi_class='ovr', random_state=42)]:\n clf.fit(iris.data, target)\n assert_array_equal(np.unique(target), clf.classes_)\n\n pred = clf.predict(iris.data)\n assert_greater(np.mean(pred == target), .95)\n\n probabilities = clf.predict_proba(iris.data)\n assert_array_almost_equal(probabilities.sum(axis=1),\n np.ones(n_samples))\n\n pred = iris.target_names[probabilities.argmax(axis=1)]\n assert_greater(np.mean(pred == target), .95)\n\n\ndef test_multinomial_validation():\n for solver in ['lbfgs', 'newton-cg', 'sag']:\n lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')\n assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])\n\n\ndef test_check_solver_option():\n X, y = iris.data, iris.target\n for LR in [LogisticRegression, LogisticRegressionCV]:\n\n msg = (\"Logistic Regression supports only liblinear, newton-cg, lbfgs\"\n \" and sag solvers, got wrong_name\")\n lr = LR(solver=\"wrong_name\")\n assert_raise_message(ValueError, msg, lr.fit, X, y)\n\n msg = \"multi_class should be either multinomial or ovr, got wrong_name\"\n lr = LR(solver='newton-cg', multi_class=\"wrong_name\")\n assert_raise_message(ValueError, msg, lr.fit, X, y)\n\n # only 'liblinear' solver\n msg = \"Solver liblinear does not support a multinomial backend.\"\n lr = LR(solver='liblinear', multi_class='multinomial')\n assert_raise_message(ValueError, msg, lr.fit, X, y)\n\n # all solvers except 'liblinear'\n for solver in ['newton-cg', 'lbfgs', 'sag']:\n msg = (\"Solver %s supports only l2 penalties, got l1 penalty.\" %\n solver)\n lr = LR(solver=solver, penalty='l1')\n assert_raise_message(ValueError, msg, lr.fit, X, y)\n\n msg = (\"Solver %s supports only dual=False, got dual=True\" %\n solver)\n lr = LR(solver=solver, dual=True)\n assert_raise_message(ValueError, msg, lr.fit, X, y)\n\n\ndef test_multinomial_binary():\n # Test multinomial LR on a binary problem.\n target = (iris.target > 0).astype(np.intp)\n target = np.array([\"setosa\", \"not-setosa\"])[target]\n\n for solver in ['lbfgs', 'newton-cg', 'sag']:\n clf = LogisticRegression(solver=solver, multi_class='multinomial',\n random_state=42, max_iter=2000)\n clf.fit(iris.data, target)\n\n assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))\n assert_equal(clf.intercept_.shape, (1,))\n assert_array_equal(clf.predict(iris.data), target)\n\n mlr = LogisticRegression(solver=solver, multi_class='multinomial',\n random_state=42, fit_intercept=False)\n mlr.fit(iris.data, target)\n pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),\n axis=1)]\n assert_greater(np.mean(pred == target), .9)\n\n\ndef test_sparsify():\n # Test sparsify and densify members.\n n_samples, n_features = iris.data.shape\n target = iris.target_names[iris.target]\n clf = LogisticRegression(random_state=0).fit(iris.data, target)\n\n pred_d_d = clf.decision_function(iris.data)\n\n clf.sparsify()\n assert_true(sp.issparse(clf.coef_))\n pred_s_d = clf.decision_function(iris.data)\n\n sp_data = sp.coo_matrix(iris.data)\n pred_s_s = clf.decision_function(sp_data)\n\n clf.densify()\n pred_d_s = clf.decision_function(sp_data)\n\n assert_array_almost_equal(pred_d_d, pred_s_d)\n assert_array_almost_equal(pred_d_d, pred_s_s)\n assert_array_almost_equal(pred_d_d, pred_d_s)\n\n\ndef test_inconsistent_input():\n # Test that an exception is raised on inconsistent input\n rng = np.random.RandomState(0)\n X_ = rng.random_sample((5, 10))\n y_ = np.ones(X_.shape[0])\n y_[0] = 0\n\n clf = LogisticRegression(random_state=0)\n\n # Wrong dimensions for training data\n y_wrong = y_[:-1]\n assert_raises(ValueError, clf.fit, X, y_wrong)\n\n # Wrong dimensions for test data\n assert_raises(ValueError, clf.fit(X_, y_).predict,\n rng.random_sample((3, 12)))\n\n\ndef test_write_parameters():\n # Test that we can write to coef_ and intercept_\n clf = LogisticRegression(random_state=0)\n clf.fit(X, Y1)\n clf.coef_[:] = 0\n clf.intercept_[:] = 0\n assert_array_almost_equal(clf.decision_function(X), 0)\n\n\n@raises(ValueError)\ndef test_nan():\n # Test proper NaN handling.\n # Regression test for Issue #252: fit used to go into an infinite loop.\n Xnan = np.array(X, dtype=np.float64)\n Xnan[0, 1] = np.nan\n LogisticRegression(random_state=0).fit(Xnan, Y1)\n\n\ndef test_consistency_path():\n # Test that the path algorithm is consistent\n rng = np.random.RandomState(0)\n X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))\n y = [1] * 100 + [-1] * 100\n Cs = np.logspace(0, 4, 10)\n\n f = ignore_warnings\n # can't test with fit_intercept=True since LIBLINEAR\n # penalizes the intercept\n for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):\n coefs, Cs, _ = f(logistic_regression_path)(\n X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,\n random_state=0)\n for i, C in enumerate(Cs):\n lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,\n random_state=0)\n lr.fit(X, y)\n lr_coef = lr.coef_.ravel()\n assert_array_almost_equal(lr_coef, coefs[i], decimal=4,\n err_msg=\"with solver = %s\" % solver)\n\n # test for fit_intercept=True\n for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):\n Cs = [1e3]\n coefs, Cs, _ = f(logistic_regression_path)(\n X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,\n intercept_scaling=10000., random_state=0)\n lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,\n intercept_scaling=10000., random_state=0)\n lr.fit(X, y)\n lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])\n assert_array_almost_equal(lr_coef, coefs[0], decimal=4,\n err_msg=\"with solver = %s\" % solver)\n\n\ndef test_liblinear_dual_random_state():\n # random_state is relevant for liblinear solver only if dual=True\n X, y = make_classification(n_samples=20)\n lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)\n lr1.fit(X, y)\n lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)\n lr2.fit(X, y)\n lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)\n lr3.fit(X, y)\n\n # same result for same random state\n assert_array_almost_equal(lr1.coef_, lr2.coef_)\n # different results for different random states\n msg = \"Arrays are not almost equal to 6 decimals\"\n assert_raise_message(AssertionError, msg,\n assert_array_almost_equal, lr1.coef_, lr3.coef_)\n\n\ndef test_logistic_loss_and_grad():\n X_ref, y = make_classification(n_samples=20)\n n_features = X_ref.shape[1]\n\n X_sp = X_ref.copy()\n X_sp[X_sp < .1] = 0\n X_sp = sp.csr_matrix(X_sp)\n for X in (X_ref, X_sp):\n w = np.zeros(n_features)\n\n # First check that our derivation of the grad is correct\n loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)\n approx_grad = optimize.approx_fprime(\n w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3\n )\n assert_array_almost_equal(grad, approx_grad, decimal=2)\n\n # Second check that our intercept implementation is good\n w = np.zeros(n_features + 1)\n loss_interp, grad_interp = _logistic_loss_and_grad(\n w, X, y, alpha=1.\n )\n assert_array_almost_equal(loss, loss_interp)\n\n approx_grad = optimize.approx_fprime(\n w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3\n )\n assert_array_almost_equal(grad_interp, approx_grad, decimal=2)\n\n\ndef test_logistic_grad_hess():\n rng = np.random.RandomState(0)\n n_samples, n_features = 50, 5\n X_ref = rng.randn(n_samples, n_features)\n y = np.sign(X_ref.dot(5 * rng.randn(n_features)))\n X_ref -= X_ref.mean()\n X_ref /= X_ref.std()\n X_sp = X_ref.copy()\n X_sp[X_sp < .1] = 0\n X_sp = sp.csr_matrix(X_sp)\n for X in (X_ref, X_sp):\n w = .1 * np.ones(n_features)\n\n # First check that _logistic_grad_hess is consistent\n # with _logistic_loss_and_grad\n loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)\n grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)\n assert_array_almost_equal(grad, grad_2)\n\n # Now check our hessian along the second direction of the grad\n vector = np.zeros_like(grad)\n vector[1] = 1\n hess_col = hess(vector)\n\n # Computation of the Hessian is particularly fragile to numerical\n # errors when doing simple finite differences. Here we compute the\n # grad along a path in the direction of the vector and then use a\n # least-square regression to estimate the slope\n e = 1e-3\n d_x = np.linspace(-e, e, 30)\n d_grad = np.array([\n _logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]\n for t in d_x\n ])\n\n d_grad -= d_grad.mean(axis=0)\n approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()\n\n assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)\n\n # Second check that our intercept implementation is good\n w = np.zeros(n_features + 1)\n loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)\n loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)\n grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)\n assert_array_almost_equal(loss_interp, loss_interp_2)\n assert_array_almost_equal(grad_interp, grad_interp_2)\n\n\ndef test_logistic_cv():\n # test for LogisticRegressionCV object\n n_samples, n_features = 50, 5\n rng = np.random.RandomState(0)\n X_ref = rng.randn(n_samples, n_features)\n y = np.sign(X_ref.dot(5 * rng.randn(n_features)))\n X_ref -= X_ref.mean()\n X_ref /= X_ref.std()\n lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,\n solver='liblinear')\n lr_cv.fit(X_ref, y)\n lr = LogisticRegression(C=1., fit_intercept=False)\n lr.fit(X_ref, y)\n assert_array_almost_equal(lr.coef_, lr_cv.coef_)\n\n assert_array_equal(lr_cv.coef_.shape, (1, n_features))\n assert_array_equal(lr_cv.classes_, [-1, 1])\n assert_equal(len(lr_cv.classes_), 2)\n\n coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))\n assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))\n assert_array_equal(lr_cv.Cs_.shape, (1, ))\n scores = np.asarray(list(lr_cv.scores_.values()))\n assert_array_equal(scores.shape, (1, 3, 1))\n\n\ndef test_logistic_cv_sparse():\n X, y = make_classification(n_samples=50, n_features=5,\n random_state=0)\n X[X < 1.0] = 0.0\n csr = sp.csr_matrix(X)\n\n clf = LogisticRegressionCV(fit_intercept=True)\n clf.fit(X, y)\n clfs = LogisticRegressionCV(fit_intercept=True)\n clfs.fit(csr, y)\n assert_array_almost_equal(clfs.coef_, clf.coef_)\n assert_array_almost_equal(clfs.intercept_, clf.intercept_)\n assert_equal(clfs.C_, clf.C_)\n\n\ndef test_intercept_logistic_helper():\n n_samples, n_features = 10, 5\n X, y = make_classification(n_samples=n_samples, n_features=n_features,\n random_state=0)\n\n # Fit intercept case.\n alpha = 1.\n w = np.ones(n_features + 1)\n grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)\n loss_interp = _logistic_loss(w, X, y, alpha)\n\n # Do not fit intercept. This can be considered equivalent to adding\n # a feature vector of ones, i.e column of one vectors.\n X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))\n grad, hess = _logistic_grad_hess(w, X_, y, alpha)\n loss = _logistic_loss(w, X_, y, alpha)\n\n # In the fit_intercept=False case, the feature vector of ones is\n # penalized. This should be taken care of.\n assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)\n\n # Check gradient.\n assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])\n assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])\n\n rng = np.random.RandomState(0)\n grad = rng.rand(n_features + 1)\n hess_interp = hess_interp(grad)\n hess = hess(grad)\n assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])\n assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])\n\n\ndef test_ovr_multinomial_iris():\n # Test that OvR and multinomial are correct using the iris dataset.\n train, target = iris.data, iris.target\n n_samples, n_features = train.shape\n\n # The cv indices from stratified kfold (where stratification is done based\n # on the fine-grained iris classes, i.e, before the classes 0 and 1 are\n # conflated) is used for both clf and clf1\n n_cv = 2\n cv = StratifiedKFold(n_cv)\n precomputed_folds = list(cv.split(train, target))\n\n # Train clf on the original dataset where classes 0 and 1 are separated\n clf = LogisticRegressionCV(cv=precomputed_folds)\n clf.fit(train, target)\n\n # Conflate classes 0 and 1 and train clf1 on this modified dataset\n clf1 = LogisticRegressionCV(cv=precomputed_folds)\n target_copy = target.copy()\n target_copy[target_copy == 0] = 1\n clf1.fit(train, target_copy)\n\n # Ensure that what OvR learns for class2 is same regardless of whether\n # classes 0 and 1 are separated or not\n assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])\n assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)\n assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)\n\n # Test the shape of various attributes.\n assert_equal(clf.coef_.shape, (3, n_features))\n assert_array_equal(clf.classes_, [0, 1, 2])\n coefs_paths = np.asarray(list(clf.coefs_paths_.values()))\n assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))\n assert_equal(clf.Cs_.shape, (10, ))\n scores = np.asarray(list(clf.scores_.values()))\n assert_equal(scores.shape, (3, n_cv, 10))\n\n # Test that for the iris data multinomial gives a better accuracy than OvR\n for solver in ['lbfgs', 'newton-cg', 'sag']:\n max_iter = 100 if solver == 'sag' else 15\n clf_multi = LogisticRegressionCV(\n solver=solver, multi_class='multinomial', max_iter=max_iter,\n random_state=42, tol=1e-2, cv=2)\n clf_multi.fit(train, target)\n multi_score = clf_multi.score(train, target)\n ovr_score = clf.score(train, target)\n assert_greater(multi_score, ovr_score)\n\n # Test attributes of LogisticRegressionCV\n assert_equal(clf.coef_.shape, clf_multi.coef_.shape)\n assert_array_equal(clf_multi.classes_, [0, 1, 2])\n coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))\n assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,\n n_features + 1))\n assert_equal(clf_multi.Cs_.shape, (10, ))\n scores = np.asarray(list(clf_multi.scores_.values()))\n assert_equal(scores.shape, (3, n_cv, 10))\n\n\ndef test_logistic_regression_solvers():\n X, y = make_classification(n_features=10, n_informative=5, random_state=0)\n\n ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)\n lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)\n lib = LogisticRegression(fit_intercept=False)\n sag = LogisticRegression(solver='sag', fit_intercept=False,\n random_state=42)\n ncg.fit(X, y)\n lbf.fit(X, y)\n sag.fit(X, y)\n lib.fit(X, y)\n assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)\n assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)\n assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)\n assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)\n assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)\n assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)\n\n\ndef test_logistic_regression_solvers_multiclass():\n X, y = make_classification(n_samples=20, n_features=20, n_informative=10,\n n_classes=3, random_state=0)\n tol = 1e-6\n ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)\n lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)\n lib = LogisticRegression(fit_intercept=False, tol=tol)\n sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,\n max_iter=1000, random_state=42)\n ncg.fit(X, y)\n lbf.fit(X, y)\n sag.fit(X, y)\n lib.fit(X, y)\n assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)\n assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)\n assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)\n assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)\n assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)\n assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)\n\n\ndef test_logistic_regressioncv_class_weights():\n X, y = make_classification(n_samples=20, n_features=20, n_informative=10,\n n_classes=3, random_state=0)\n\n msg = (\"In LogisticRegressionCV the liblinear solver cannot handle \"\n \"multiclass with class_weight of type dict. Use the lbfgs, \"\n \"newton-cg or sag solvers or set class_weight='balanced'\")\n clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},\n solver='liblinear')\n assert_raise_message(ValueError, msg, clf_lib.fit, X, y)\n y_ = y.copy()\n y_[y == 2] = 1\n clf_lib.fit(X, y_)\n assert_array_equal(clf_lib.classes_, [0, 1])\n\n # Test for class_weight=balanced\n X, y = make_classification(n_samples=20, n_features=20, n_informative=10,\n random_state=0)\n clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,\n class_weight='balanced')\n clf_lbf.fit(X, y)\n clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,\n class_weight='balanced')\n clf_lib.fit(X, y)\n clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,\n class_weight='balanced', max_iter=2000)\n clf_sag.fit(X, y)\n assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)\n assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)\n assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)\n\n\ndef test_logistic_regression_sample_weights():\n X, y = make_classification(n_samples=20, n_features=5, n_informative=3,\n n_classes=2, random_state=0)\n sample_weight = y + 1\n\n for LR in [LogisticRegression, LogisticRegressionCV]:\n\n # Test that passing sample_weight as ones is the same as\n # not passing them at all (default None)\n for solver in ['lbfgs', 'liblinear']:\n clf_sw_none = LR(solver=solver, fit_intercept=False)\n clf_sw_none.fit(X, y)\n clf_sw_ones = LR(solver=solver, fit_intercept=False)\n clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))\n assert_array_almost_equal(\n clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)\n\n # Test that sample weights work the same with the lbfgs,\n # newton-cg, and 'sag' solvers\n clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)\n clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)\n clf_sw_n = LR(solver='newton-cg', fit_intercept=False)\n clf_sw_n.fit(X, y, sample_weight=sample_weight)\n clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10)\n # ignore convergence warning due to small dataset\n with ignore_warnings():\n clf_sw_sag.fit(X, y, sample_weight=sample_weight)\n clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False)\n clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)\n assert_array_almost_equal(\n clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)\n assert_array_almost_equal(\n clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)\n assert_array_almost_equal(\n clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)\n\n # Test that passing class_weight as [1,2] is the same as\n # passing class weight = [1,1] but adjusting sample weights\n # to be 2 for all instances of class 2\n for solver in ['lbfgs', 'liblinear']:\n clf_cw_12 = LR(solver=solver, fit_intercept=False,\n class_weight={0: 1, 1: 2})\n clf_cw_12.fit(X, y)\n clf_sw_12 = LR(solver=solver, fit_intercept=False)\n clf_sw_12.fit(X, y, sample_weight=sample_weight)\n assert_array_almost_equal(\n clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)\n\n # Test the above for l1 penalty and l2 penalty with dual=True.\n # since the patched liblinear code is different.\n clf_cw = LogisticRegression(\n solver=\"liblinear\", fit_intercept=False, class_weight={0: 1, 1: 2},\n penalty=\"l1\")\n clf_cw.fit(X, y)\n clf_sw = LogisticRegression(\n solver=\"liblinear\", fit_intercept=False, penalty=\"l1\")\n clf_sw.fit(X, y, sample_weight)\n assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)\n\n clf_cw = LogisticRegression(\n solver=\"liblinear\", fit_intercept=False, class_weight={0: 1, 1: 2},\n penalty=\"l2\", dual=True)\n clf_cw.fit(X, y)\n clf_sw = LogisticRegression(\n solver=\"liblinear\", fit_intercept=False, penalty=\"l2\", dual=True)\n clf_sw.fit(X, y, sample_weight)\n assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)\n\n\ndef _compute_class_weight_dictionary(y):\n # helper for returning a dictionary instead of an array\n classes = np.unique(y)\n class_weight = compute_class_weight(\"balanced\", classes, y)\n class_weight_dict = dict(zip(classes, class_weight))\n return class_weight_dict\n\n\ndef test_logistic_regression_class_weights():\n # Multinomial case: remove 90% of class 0\n X = iris.data[45:, :]\n y = iris.target[45:]\n solvers = (\"lbfgs\", \"newton-cg\")\n class_weight_dict = _compute_class_weight_dictionary(y)\n\n for solver in solvers:\n clf1 = LogisticRegression(solver=solver, multi_class=\"multinomial\",\n class_weight=\"balanced\")\n clf2 = LogisticRegression(solver=solver, multi_class=\"multinomial\",\n class_weight=class_weight_dict)\n clf1.fit(X, y)\n clf2.fit(X, y)\n assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)\n\n # Binary case: remove 90% of class 0 and 100% of class 2\n X = iris.data[45:100, :]\n y = iris.target[45:100]\n solvers = (\"lbfgs\", \"newton-cg\", \"liblinear\")\n class_weight_dict = _compute_class_weight_dictionary(y)\n\n for solver in solvers:\n clf1 = LogisticRegression(solver=solver, multi_class=\"ovr\",\n class_weight=\"balanced\")\n clf2 = LogisticRegression(solver=solver, multi_class=\"ovr\",\n class_weight=class_weight_dict)\n clf1.fit(X, y)\n clf2.fit(X, y)\n assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)\n\n\ndef test_multinomial_logistic_regression_with_classweight_auto():\n X, y = iris.data, iris.target\n model = LogisticRegression(multi_class='multinomial',\n class_weight='auto', solver='lbfgs')\n # 'auto' is deprecated and will be removed in 0.19\n assert_warns_message(DeprecationWarning,\n \"class_weight='auto' heuristic is deprecated\",\n model.fit, X, y)\n\n\ndef test_logistic_regression_convergence_warnings():\n # Test that warnings are raised if model does not converge\n\n X, y = make_classification(n_samples=20, n_features=20)\n clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)\n assert_warns(ConvergenceWarning, clf_lib.fit, X, y)\n assert_equal(clf_lib.n_iter_, 2)\n\n\ndef test_logistic_regression_multinomial():\n # Tests for the multinomial option in logistic regression\n\n # Some basic attributes of Logistic Regression\n n_samples, n_features, n_classes = 50, 20, 3\n X, y = make_classification(n_samples=n_samples,\n n_features=n_features,\n n_informative=10,\n n_classes=n_classes, random_state=0)\n\n # 'lbfgs' is used as a referenced\n solver = 'lbfgs'\n ref_i = LogisticRegression(solver=solver, multi_class='multinomial')\n ref_w = LogisticRegression(solver=solver, multi_class='multinomial',\n fit_intercept=False)\n ref_i.fit(X, y)\n ref_w.fit(X, y)\n assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))\n assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))\n for solver in ['sag', 'newton-cg']:\n clf_i = LogisticRegression(solver=solver, multi_class='multinomial',\n random_state=42, max_iter=1000, tol=1e-6)\n clf_w = LogisticRegression(solver=solver, multi_class='multinomial',\n random_state=42, max_iter=1000, tol=1e-6,\n fit_intercept=False)\n clf_i.fit(X, y)\n clf_w.fit(X, y)\n assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))\n assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))\n\n # Compare solutions between lbfgs and the other solvers\n assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)\n assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)\n assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)\n\n # Test that the path give almost the same results. However since in this\n # case we take the average of the coefs after fitting across all the\n # folds, it need not be exactly the same.\n for solver in ['lbfgs', 'newton-cg', 'sag']:\n clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,\n multi_class='multinomial', Cs=[1.])\n clf_path.fit(X, y)\n assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)\n assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)\n\n\ndef test_multinomial_grad_hess():\n rng = np.random.RandomState(0)\n n_samples, n_features, n_classes = 100, 5, 3\n X = rng.randn(n_samples, n_features)\n w = rng.rand(n_classes, n_features)\n Y = np.zeros((n_samples, n_classes))\n ind = np.argmax(np.dot(X, w.T), axis=1)\n Y[range(0, n_samples), ind] = 1\n w = w.ravel()\n sample_weights = np.ones(X.shape[0])\n grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,\n sample_weight=sample_weights)\n # extract first column of hessian matrix\n vec = np.zeros(n_features * n_classes)\n vec[0] = 1\n hess_col = hessp(vec)\n\n # Estimate hessian using least squares as done in\n # test_logistic_grad_hess\n e = 1e-3\n d_x = np.linspace(-e, e, 30)\n d_grad = np.array([\n _multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,\n sample_weight=sample_weights)[0]\n for t in d_x\n ])\n d_grad -= d_grad.mean(axis=0)\n approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()\n assert_array_almost_equal(hess_col, approx_hess_col)\n\n\ndef test_liblinear_decision_function_zero():\n # Test negative prediction when decision_function values are zero.\n # Liblinear predicts the positive class when decision_function values\n # are zero. This is a test to verify that we do not do the same.\n # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600\n # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623\n X, y = make_classification(n_samples=5, n_features=5)\n clf = LogisticRegression(fit_intercept=False)\n clf.fit(X, y)\n\n # Dummy data such that the decision function becomes zero.\n X = np.zeros((5, 5))\n assert_array_equal(clf.predict(X), np.zeros(5))\n\n\ndef test_liblinear_logregcv_sparse():\n # Test LogRegCV with solver='liblinear' works for sparse matrices\n\n X, y = make_classification(n_samples=10, n_features=5)\n clf = LogisticRegressionCV(solver='liblinear')\n clf.fit(sparse.csr_matrix(X), y)\n\n\ndef test_logreg_intercept_scaling():\n # Test that the right error message is thrown when intercept_scaling <= 0\n\n for i in [-1, 0]:\n clf = LogisticRegression(intercept_scaling=i)\n msg = ('Intercept scaling is %r but needs to be greater than 0.'\n ' To disable fitting an intercept,'\n ' set fit_intercept=False.' % clf.intercept_scaling)\n assert_raise_message(ValueError, msg, clf.fit, X, Y1)\n\n\ndef test_logreg_intercept_scaling_zero():\n # Test that intercept_scaling is ignored when fit_intercept is False\n\n clf = LogisticRegression(fit_intercept=False)\n clf.fit(X, Y1)\n assert_equal(clf.intercept_, 0.)\n\n\ndef test_logreg_cv_penalty():\n # Test that the correct penalty is passed to the final fit.\n X, y = make_classification(n_samples=50, n_features=20, random_state=0)\n lr_cv = LogisticRegressionCV(penalty=\"l1\", Cs=[1.0], solver='liblinear')\n lr_cv.fit(X, y)\n lr = LogisticRegression(penalty=\"l1\", C=1.0, solver='liblinear')\n lr.fit(X, y)\n assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))\n\n\ndef test_logreg_predict_proba_multinomial():\n X, y = make_classification(n_samples=10, n_features=20, random_state=0,\n n_classes=3, n_informative=10)\n\n # Predicted probabilites using the true-entropy loss should give a\n # smaller loss than those using the ovr method.\n clf_multi = LogisticRegression(multi_class=\"multinomial\", solver=\"lbfgs\")\n clf_multi.fit(X, y)\n clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))\n clf_ovr = LogisticRegression(multi_class=\"ovr\", solver=\"lbfgs\")\n clf_ovr.fit(X, y)\n clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))\n assert_greater(clf_ovr_loss, clf_multi_loss)\n\n # Predicted probabilites using the soft-max function should give a\n # smaller loss than those using the logistic function.\n clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))\n clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))\n assert_greater(clf_wrong_loss, clf_multi_loss)\n\n\n@ignore_warnings\ndef test_max_iter():\n # Test that the maximum number of iteration is reached\n X, y_bin = iris.data, iris.target.copy()\n y_bin[y_bin == 2] = 0\n\n solvers = ['newton-cg', 'liblinear', 'sag']\n # old scipy doesn't have maxiter\n if sp_version >= (0, 12):\n solvers.append('lbfgs')\n\n for max_iter in range(1, 5):\n for solver in solvers:\n for multi_class in ['ovr', 'multinomial']:\n if solver == 'liblinear' and multi_class == 'multinomial':\n continue\n lr = LogisticRegression(max_iter=max_iter, tol=1e-15,\n multi_class=multi_class,\n random_state=0, solver=solver)\n lr.fit(X, y_bin)\n assert_equal(lr.n_iter_[0], max_iter)\n\n\ndef test_n_iter():\n # Test that self.n_iter_ has the correct format.\n X, y = iris.data, iris.target\n y_bin = y.copy()\n y_bin[y_bin == 2] = 0\n\n n_Cs = 4\n n_cv_fold = 2\n\n for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:\n # OvR case\n n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]\n clf = LogisticRegression(tol=1e-2, multi_class='ovr',\n solver=solver, C=1.,\n random_state=42, max_iter=100)\n clf.fit(X, y)\n assert_equal(clf.n_iter_.shape, (n_classes,))\n\n n_classes = np.unique(y).shape[0]\n clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',\n solver=solver, Cs=n_Cs, cv=n_cv_fold,\n random_state=42, max_iter=100)\n clf.fit(X, y)\n assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))\n clf.fit(X, y_bin)\n assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))\n\n # multinomial case\n n_classes = 1\n if solver in ('liblinear', 'sag'):\n break\n\n clf = LogisticRegression(tol=1e-2, multi_class='multinomial',\n solver=solver, C=1.,\n random_state=42, max_iter=100)\n clf.fit(X, y)\n assert_equal(clf.n_iter_.shape, (n_classes,))\n\n clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',\n solver=solver, Cs=n_Cs, cv=n_cv_fold,\n random_state=42, max_iter=100)\n clf.fit(X, y)\n assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))\n clf.fit(X, y_bin)\n assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))\n\n\n@ignore_warnings\ndef test_warm_start():\n # A 1-iteration second fit on same data should give almost same result\n # with warm starting, and quite different result without warm starting.\n # Warm starting does not work with liblinear solver.\n X, y = iris.data, iris.target\n\n solvers = ['newton-cg', 'sag']\n # old scipy doesn't have maxiter\n if sp_version >= (0, 12):\n solvers.append('lbfgs')\n\n for warm_start in [True, False]:\n for fit_intercept in [True, False]:\n for solver in solvers:\n for multi_class in ['ovr', 'multinomial']:\n clf = LogisticRegression(tol=1e-4, multi_class=multi_class,\n warm_start=warm_start,\n solver=solver,\n random_state=42, max_iter=100,\n fit_intercept=fit_intercept)\n clf.fit(X, y)\n coef_1 = clf.coef_\n\n clf.max_iter = 1\n with ignore_warnings():\n clf.fit(X, y)\n cum_diff = np.sum(np.abs(coef_1 - clf.coef_))\n msg = (\"Warm starting issue with %s solver in %s mode \"\n \"with fit_intercept=%s and warm_start=%s\"\n % (solver, multi_class, str(fit_intercept),\n str(warm_start)))\n if warm_start:\n assert_greater(2.0, cum_diff, msg)\n else:\n assert_greater(cum_diff, 2.0, msg)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":382699,"cells":{"repo_name":{"kind":"string","value":"sanghack81/SDCIT"},"path":{"kind":"string","value":"experiments/draw_figures.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"18832"},"content":{"kind":"string","value":"import collections\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport scipy.stats\nimport seaborn as sns\nfrom os.path import exists\n\nfrom experiments.exp_setup import SDCIT_RESULT_DIR, SDCIT_FIGURE_DIR\nfrom sdcit.utils import AUPC\n\nnames_chsic_chaotic = ['independent', 'gamma', 'noise', 'trial', 'N', 'runtime', 'statistic', 'pvalue']\nnames_chsic_postnonlinear = ['independent', 'noise', 'trial', 'N', 'runtime', 'statistic', 'pvalue']\nnames_kcit_chaotic = ['independent', 'gamma', 'noise', 'trial', 'N', 'runtime', 'statistic', 'boot_p_value', 'appr_p_value']\nnames_kcit_postnonlinear = ['independent', 'noise', 'trial', 'N', 'runtime', 'statistic', 'boot_p_value', 'appr_p_value']\nnames_sdcit_chaotic = ['independent', 'gamma', 'trial', 'N', 'statistic', 'pvalue']\nnames_sdcit_postnonlinear = ['independent', 'noise', 'trial', 'N', 'statistic', 'pvalue']\nnames_kcipt_chaotic = ['independent', 'gamma', 'trial', 'N', 'statistic', 'pvalue', 'B']\nnames_kcipt_postnonlinear = ['independent', 'noise', 'trial', 'N', 'statistic', 'pvalue', 'B']\n\nnames = {('CHSIC', 'chaotic'): names_chsic_chaotic,\n ('CHSIC', 'postnonlinear'): names_chsic_postnonlinear,\n ('KCIT', 'chaotic'): names_kcit_chaotic,\n ('KCIT', 'postnonlinear'): names_kcit_postnonlinear,\n ('KCIT2', 'chaotic'): names_kcit_chaotic,\n ('KCIT2', 'postnonlinear'): names_kcit_postnonlinear,\n ('SDCIT', 'chaotic'): names_sdcit_chaotic,\n ('SDCIT', 'postnonlinear'): names_sdcit_postnonlinear,\n ('KCIPT', 'chaotic'): names_kcipt_chaotic,\n ('KCIPT', 'postnonlinear'): names_kcipt_postnonlinear,\n }\n\npvalue_column = collections.defaultdict(lambda: 'pvalue')\npvalue_column['KCIT'] = 'boot_p_value'\npvalue_column['KCIT2'] = 'boot_p_value'\n\ncolor_palettes = sns.color_palette('Paired', 10)\nmethod_color_codes = {'KCIT': 3, 'SDCIT': 5, 'KCIPT': 1, 'CHSIC': 9, 'KCIT2': 2}\nmarkers = collections.defaultdict(lambda: 'o')\nmarkers.update({'KCIT': 'o', 'SDCIT': 's', 'KCIPT': '*', 'CHSIC': '^', 'KCIT2': 'o'})\nall_algos = ['KCIT', 'SDCIT', 'KCIPT', 'CHSIC', 'KCIT2']\n\n\ndef algo_name(org_name):\n map = {'KCIT2': 'KCIT', 'KCIT': 'KCIT (org.)'}\n if org_name in map:\n return map[org_name]\n else:\n return org_name\n\n\ndef draw_aupc_chaotic():\n data = 'chaotic'\n\n aupc_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n\n for group_key, group_df in df.groupby(by=['gamma', 'independent', 'N']):\n group_key = (int(group_key[0] * 10) / 10, *group_key[1:])\n if group_key[1] == 0:\n aupc_data.append([algo, *group_key, AUPC(group_df[pvalue_column[algo]])])\n\n print(draw_aupc_chaotic.__name__)\n [print(xx) for xx in aupc_data]\n\n aupc_data = np.array(aupc_data)\n aupc_df = pd.DataFrame({'algorithm': aupc_data[:, 0],\n 'gamma': aupc_data[:, 1],\n 'independent': aupc_data[:, 2],\n 'N': aupc_data[:, 3],\n 'AUPC': aupc_data[:, 4]})\n aupc_df['gamma'] = aupc_df['gamma'].astype(float)\n aupc_df['independent'] = aupc_df['independent'].astype(int)\n aupc_df['N'] = aupc_df['N'].map(int)\n aupc_df['AUPC'] = aupc_df['AUPC'].astype(float)\n\n aupc_df = aupc_df[aupc_df['independent'] == 0]\n aupc_df[\"algo-N\"] = aupc_df[\"algorithm\"].map(str) + aupc_df[\"N\"].map(lambda xxx: ' (' + str(xxx) + ')')\n sns_setting()\n for k, gdf in aupc_df.groupby(['algorithm', 'N']):\n print('chaotic', k, gdf['AUPC'])\n if k[1] == 400:\n plt.plot(gdf['gamma'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0])))\n else:\n plt.plot(gdf['gamma'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label='_nolegend_')\n\n plt.axes().set_xlabel(r'$\\gamma$')\n plt.axes().set_ylabel('Area Under Power Curve')\n plt.axes().set_ylim([0.45, 1.05])\n\n handles, labels = plt.axes().get_legend_handles_labels()\n # plt.axes().legend(handles[::-1], labels[::-1])\n\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/{}_aupc.pdf'.format(data), transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\ndef draw_calib_chaotic():\n data = 'chaotic'\n calib_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n for k, gdf in df.groupby(by=['independent', 'gamma', 'N']):\n if float(k[0]) == 1:\n D, _ = scipy.stats.kstest(gdf[pvalue_column[algo]], 'uniform')\n calib_data.append([algo, float(k[1]), int(k[2]), D])\n\n print(draw_calib_chaotic.__name__)\n [print(xx) for xx in calib_data]\n\n df = pd.DataFrame(calib_data, columns=['algo', 'gamma', 'N', 'D'])\n df['gamma'] = df['gamma'].astype(float)\n df['N'] = df['N'].map(int)\n df['D'] = df['D'].astype(float)\n sns_setting()\n for k, gdf in df.groupby(['algo', 'N']):\n if k[1] == 400:\n plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0])))\n else:\n plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label='_nolegend_')\n handles, labels = plt.axes().get_legend_handles_labels()\n plt.axes().legend(handles[::-1], labels[::-1], ncol=2)\n plt.axes().set_xlabel(r'$\\gamma$')\n plt.axes().set_ylabel('KS test statistic')\n plt.axes().set_ylim([0.0, 0.5])\n plt.axes().invert_yaxis()\n plt.axes().set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])\n\n handles, labels = plt.axes().get_legend_handles_labels()\n # plt.axes().legend(handles[::-1], labels[::-1])\n\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/chaotic_calib.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\ndef draw_type_I_error_chaotic():\n data = 'chaotic'\n calib_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n for k, gdf in df.groupby(by=['independent', 'gamma', 'N']):\n if float(k[0]) == 1:\n calib_data.append([algo, float(k[1]), int(k[2]), np.mean(gdf[pvalue_column[algo]] <= 0.05)])\n\n print(draw_type_I_error_chaotic.__name__)\n [print(xx) for xx in calib_data]\n\n df = pd.DataFrame(calib_data, columns=['algo', 'gamma', 'N', 'D'])\n df['gamma'] = df['gamma'].astype(float)\n df['N'] = df['N'].map(int)\n df['D'] = df['D'].astype(float)\n sns_setting()\n for k, gdf in df.groupby(['algo', 'N']):\n if k[1] == 400:\n plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0])))\n else:\n plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label='_nolegend_')\n plt.axes().set_xlabel(r'$\\gamma$')\n plt.axes().set_xticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])\n plt.axes().set_ylabel('Type I error')\n plt.axes().set_ylim([0.0, 0.2])\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/chaotic_type_I.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\ndef draw_aupc_postnonlinear():\n data = 'postnonlinear'\n aupc_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n\n for group_key, group_df in df.groupby(by=['noise', 'independent', 'N']):\n group_key = (int(group_key[0] * 10) / 10, int(group_key[1]), int(group_key[2]))\n aupc_data.append([algo, *group_key, AUPC(group_df[pvalue_column[algo]])])\n\n print(draw_aupc_postnonlinear.__name__)\n [print(xx) for xx in aupc_data]\n\n aupc_data = np.array(aupc_data)\n aupc_df = pd.DataFrame({'algorithm': [str(v) for v in aupc_data[:, 0]],\n 'noise': [int(float(v)) for v in aupc_data[:, 1]],\n 'independent': [int(v) for v in aupc_data[:, 2]],\n 'N': [int(v) for v in aupc_data[:, 3]],\n 'AUPC': [float(v) for v in aupc_data[:, 4]]})\n aupc_df['dimension'] = (aupc_df['noise'] + 1).astype(int)\n\n aupc_df = aupc_df[aupc_df['independent'] == 0]\n aupc_df[\"algo-N\"] = aupc_df[\"algorithm\"].map(str) + aupc_df[\"N\"].map(lambda xxx: ' (' + str(xxx) + ')')\n sns_setting()\n for k, gdf in aupc_df.groupby(['algorithm', 'N']):\n gdf = gdf[gdf['dimension'] <= 5]\n if k[1] == 400:\n plt.plot(gdf['dimension'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0])))\n else:\n plt.plot(gdf['dimension'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label='_nolegend_')\n plt.axes().set_xlabel('dimension')\n plt.axes().set_ylabel('Area Under Power Curve')\n plt.axes().set_ylim([0.45, 1.05])\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_aupc.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\ndef draw_aupc_postnonlinear_highdim():\n data = 'postnonlinear'\n aupc_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n\n for group_key, group_df in df.groupby(by=['noise', 'independent', 'N']):\n group_key = (int(group_key[0] * 10) / 10, int(group_key[1]), int(group_key[2]))\n aupc_data.append([algo, *group_key, AUPC(group_df[pvalue_column[algo]])])\n\n print(draw_aupc_postnonlinear_highdim.__name__)\n [print(xx) for xx in aupc_data]\n\n aupc_data = np.array(aupc_data)\n aupc_df = pd.DataFrame({'algorithm': [str(v) for v in aupc_data[:, 0]],\n 'noise': [int(float(v)) for v in aupc_data[:, 1]],\n 'independent': [int(v) for v in aupc_data[:, 2]],\n 'N': [int(v) for v in aupc_data[:, 3]],\n 'AUPC': [float(v) for v in aupc_data[:, 4]]})\n aupc_df['dimension'] = (aupc_df['noise'] + 1).astype(int)\n\n aupc_df = aupc_df[aupc_df['independent'] == 0]\n aupc_df[\"algo-N\"] = aupc_df[\"algorithm\"].map(str) + aupc_df[\"N\"].map(lambda xxx: ' (' + str(xxx) + ')')\n sns_setting()\n for k, gdf in aupc_df.groupby(['algorithm', 'N']):\n if k[1] == 400:\n plt.plot([int(v) for v in gdf['dimension']], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':',\n label=algo_name(str(k[0])))\n\n plt.axes().set_xlabel('dimension')\n plt.axes().set_ylabel('Area Under Power Curve')\n plt.axes().set_ylim([0.95, 1.01])\n plt.axes().set_xscale('log')\n plt.xticks([1, 5, 10, 20, 50], [1, 5, 10, 20, 50])\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_aupc_highdim.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\ndef draw_calib_postnonlinear():\n data = 'postnonlinear'\n calib_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n for k, gdf in df.groupby(by=['independent', 'noise', 'N']):\n if float(k[0]) == 1:\n D, _ = scipy.stats.kstest(gdf[pvalue_column[algo]], 'uniform')\n calib_data.append([algo, float(k[1]), int(k[2]), D])\n\n print(draw_calib_postnonlinear.__name__)\n [print(xx) for xx in calib_data]\n\n df = pd.DataFrame(calib_data, columns=['algo', 'noise', 'N', 'D'])\n df['noise'] = df['noise'].map(int)\n df['dimension'] = (df['noise'] + 1).astype(int)\n df['N'] = df['N'].map(int)\n df['D'] = df['D'].astype(float)\n sns_setting()\n for k, gdf in df.groupby(['algo', 'N']):\n gdf = gdf[gdf['dimension'] <= 5]\n if k[1] == 400:\n plt.plot([int(v) for v in gdf['dimension']], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':',\n label=algo_name(str(k[0])))\n else:\n plt.plot([int(v) for v in gdf['dimension']], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':',\n label='_nolegend_')\n plt.axes().set_xlabel('dimension')\n plt.axes().set_ylabel('KS test statistic')\n plt.axes().set_ylim([0.0, 0.5])\n plt.axes().invert_yaxis()\n plt.axes().set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])\n\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_calib.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\ndef sns_setting():\n paper_rc = {'lines.linewidth': 1, 'lines.markersize': 2}\n sns.set_context(\"paper\", rc=paper_rc)\n sns.set(style='white', font_scale=1.4)\n plt.figure(figsize=[4, 3])\n plt.rc('text', usetex=True)\n plt.rc('text.latex', preamble=r'\\usepackage{cmbright}')\n\n\ndef draw_calib_postnonlinear_highdim():\n data = 'postnonlinear'\n calib_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n for k, gdf in df.groupby(by=['independent', 'noise', 'N']):\n if float(k[0]) == 1 and k[2] == 400:\n dd, _ = scipy.stats.kstest(gdf[pvalue_column[algo]], 'uniform')\n calib_data.append([algo, float(k[1]), int(k[2]), dd])\n\n print(draw_calib_postnonlinear_highdim.__name__)\n [print(xx) for xx in calib_data]\n\n df = pd.DataFrame(calib_data, columns=['algo', 'noise', 'N', 'D'])\n df['noise'] = df['noise'].map(int)\n df['dimension'] = (df['noise'] + 1).astype(int)\n df['N'] = df['N'].map(int)\n df['D'] = df['D'].astype(float)\n sns_setting()\n for k, gdf in df.groupby(['algo', 'N']):\n print('postnonlinear', k, gdf['D'])\n if k[1] == 400:\n plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0])))\n else:\n plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label='_nolegend_')\n plt.axes().set_xlabel('dimension')\n plt.axes().set_ylabel('KS test statistic')\n plt.axes().set_xscale('log')\n plt.axes().set_ylim([0.0, 0.5])\n plt.axes().invert_yaxis()\n plt.xticks([1, 5, 10, 20, 50], [1, 5, 10, 20, 50])\n plt.axes().set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])\n\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_calib_highdim.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\ndef draw_type_I_postnonlinear_highdim():\n data = 'postnonlinear'\n calib_data = []\n for algo in all_algos:\n df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)])\n for k, gdf in df.groupby(by=['independent', 'noise', 'N']):\n if float(k[0]) == 1 and k[2] == 400:\n dd = np.mean(gdf[pvalue_column[algo]] <= 0.05)\n calib_data.append([algo, float(k[1]), int(k[2]), dd])\n\n print(draw_type_I_postnonlinear_highdim.__name__)\n [print(xx) for xx in calib_data]\n\n df = pd.DataFrame(calib_data, columns=['algo', 'noise', 'N', 'D'])\n df['noise'] = df['noise'].map(int)\n df['dimension'] = (df['noise'] + 1).astype(int)\n df['N'] = df['N'].map(int)\n df['D'] = df['D'].astype(float)\n sns_setting()\n for k, gdf in df.groupby(['algo', 'N']):\n if k[1] == 400:\n plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0])))\n else:\n plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]],\n ls='-' if k[1] == 400 else ':', label='_nolegend_')\n plt.axes().set_xlabel('dimension')\n plt.axes().set_xscale('log')\n plt.xticks([1, 5, 10, 20, 50], [1, 5, 10, 20, 50])\n plt.axes().set_ylim([0.0, 0.2])\n handles, labels = plt.axes().get_legend_handles_labels()\n plt.axes().legend(handles[::-1], labels[::-1])\n sns.despine()\n plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_type_I_highdim.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02)\n plt.close()\n\n\nif __name__ == '__main__':\n for data in ['chaotic', 'postnonlinear']:\n for algo in all_algos:\n assert exists(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv'), 'run tests first -- missing {}'.format(algo.lower() + '_' + data + '.csv')\n if True:\n # chaotic series\n draw_aupc_chaotic()\n draw_calib_chaotic()\n\n # postnonlinear-noise\n draw_aupc_postnonlinear()\n draw_calib_postnonlinear()\n draw_aupc_postnonlinear_highdim()\n draw_calib_postnonlinear_highdim()\n\n # type I for both\n draw_type_I_error_chaotic()\n draw_type_I_postnonlinear_highdim()\n"},"license":{"kind":"string","value":"mit"}}}],"truncated":false,"partial":true},"paginationData":{"pageIndex":3826,"numItemsPerPage":100,"numTotalItems":383795,"offset":382600,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTk5NTI4OCwic3ViIjoiL2RhdGFzZXRzL2h1Z2dpbmdmYWNlLWNvdXJzZS9jb2RlcGFycm90LWRzLXRyYWluIiwiZXhwIjoxNzU1OTk4ODg4LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.dwqh_J54l5y1xaM-1yoyBG4YZ4vjEzhcji-dYBAACBkBw74iH0ok_p0gC8GiAVg7ND-VWwd1w8Del7gAz0w5Bw","displayUrls":true},"discussionsStats":{"closed":0,"open":3,"total":3},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
abimannans/scikit-learn
examples/calibration/plot_calibration_curve.py
225
5903
""" ============================== Probability Calibration curves ============================== When performing classification one often wants to predict not only the class label, but also the associated probability. This probability gives some kind of confidence on the prediction. This example demonstrates how to display how well calibrated the predicted probabilities are and how to calibrate an uncalibrated classifier. The experiment is performed on an artificial dataset for binary classification with 100.000 samples (1.000 of them are used for model fitting) with 20 features. Of the 20 features, only 2 are informative and 10 are redundant. The first figure shows the estimated probabilities obtained with logistic regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic calibration and sigmoid calibration. The calibration performance is evaluated with Brier score, reported in the legend (the smaller the better). One can observe here that logistic regression is well calibrated while raw Gaussian naive Bayes performs very badly. This is because of the redundant features which violate the assumption of feature-independence and result in an overly confident classifier, which is indicated by the typical transposed-sigmoid curve. Calibration of the probabilities of Gaussian naive Bayes with isotonic regression can fix this issue as can be seen from the nearly diagonal calibration curve. Sigmoid calibration also improves the brier score slightly, albeit not as strongly as the non-parametric isotonic regression. This can be attributed to the fact that we have plenty of calibration data such that the greater flexibility of the non-parametric model can be exploited. The second figure shows the calibration curve of a linear support-vector classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian naive Bayes: the calibration curve has a sigmoid curve, which is typical for an under-confident classifier. In the case of LinearSVC, this is caused by the margin property of the hinge loss, which lets the model focus on hard samples that are close to the decision boundary (the support vectors). Both kinds of calibration can fix this issue and yield nearly identical results. This shows that sigmoid calibration can deal with situations where the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC) but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes). """ print(__doc__) # Author: Alexandre Gramfort <[email protected]> # Jan Hendrik Metzen <[email protected]> # License: BSD Style. import matplotlib.pyplot as plt from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression from sklearn.metrics import (brier_score_loss, precision_score, recall_score, f1_score) from sklearn.calibration import CalibratedClassifierCV, calibration_curve from sklearn.cross_validation import train_test_split # Create dataset of classification task with many redundant and few # informative features X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=10, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99, random_state=42) def plot_calibration_curve(est, name, fig_index): """Plot calibration curve for est w/o and with calibration. """ # Calibrated with isotonic calibration isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic') # Calibrated with sigmoid calibration sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid') # Logistic regression with no calibration as baseline lr = LogisticRegression(C=1., solver='lbfgs') fig = plt.figure(fig_index, figsize=(10, 10)) ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax2 = plt.subplot2grid((3, 1), (2, 0)) ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated") for clf, name in [(lr, 'Logistic'), (est, name), (isotonic, name + ' + Isotonic'), (sigmoid, name + ' + Sigmoid')]: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) if hasattr(clf, "predict_proba"): prob_pos = clf.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = clf.decision_function(X_test) prob_pos = \ (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max()) print("%s:" % name) print("\tBrier: %1.3f" % (clf_score)) print("\tPrecision: %1.3f" % precision_score(y_test, y_pred)) print("\tRecall: %1.3f" % recall_score(y_test, y_pred)) print("\tF1: %1.3f\n" % f1_score(y_test, y_pred)) fraction_of_positives, mean_predicted_value = \ calibration_curve(y_test, prob_pos, n_bins=10) ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s (%1.3f)" % (name, clf_score)) ax2.hist(prob_pos, range=(0, 1), bins=10, label=name, histtype="step", lw=2) ax1.set_ylabel("Fraction of positives") ax1.set_ylim([-0.05, 1.05]) ax1.legend(loc="lower right") ax1.set_title('Calibration plots (reliability curve)') ax2.set_xlabel("Mean predicted value") ax2.set_ylabel("Count") ax2.legend(loc="upper center", ncol=2) plt.tight_layout() # Plot calibration cuve for Gaussian Naive Bayes plot_calibration_curve(GaussianNB(), "Naive Bayes", 1) # Plot calibration cuve for Linear SVC plot_calibration_curve(LinearSVC(), "SVC", 2) plt.show()
bsd-3-clause
suiyuan2009/tensorflow
tensorflow/contrib/learn/python/learn/estimators/kmeans.py
34
10130
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of k-means clustering on top of tf.learn API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from tensorflow.contrib.factorization.python.ops import clustering_ops from tensorflow.contrib.framework.python.ops import variables from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.summary import summary from tensorflow.python.ops.control_flow_ops import with_dependencies from tensorflow.python.platform import tf_logging as logging from tensorflow.python.summary import summary from tensorflow.python.training import session_run_hook from tensorflow.python.training.session_run_hook import SessionRunArgs class _LossRelativeChangeHook(session_run_hook.SessionRunHook): """Stops when the change in loss goes below a tolerance.""" def __init__(self, tolerance): """Initializes _LossRelativeChangeHook. Args: tolerance: A relative tolerance of change between iterations. """ self._tolerance = tolerance self._prev_loss = None def begin(self): self._loss_tensor = ops.get_default_graph().get_tensor_by_name( KMeansClustering.LOSS_OP_NAME + ':0') assert self._loss_tensor is not None def before_run(self, run_context): del run_context return SessionRunArgs( fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor}) def after_run(self, run_context, run_values): loss = run_values.results[KMeansClustering.LOSS_OP_NAME] assert loss is not None if self._prev_loss is not None: relative_change = (abs(loss - self._prev_loss) / (1 + abs(self._prev_loss))) if relative_change < self._tolerance: run_context.request_stop() self._prev_loss = loss class _InitializeClustersHook(session_run_hook.SessionRunHook): """Initializes clusters or waits for cluster initialization.""" def __init__(self, init_op, is_initialized_op, is_chief): self._init_op = init_op self._is_chief = is_chief self._is_initialized_op = is_initialized_op def after_create_session(self, session, _): assert self._init_op.graph == ops.get_default_graph() assert self._is_initialized_op.graph == self._init_op.graph while True: try: if session.run(self._is_initialized_op): break elif self._is_chief: session.run(self._init_op) else: time.sleep(1) except RuntimeError as e: logging.info(e) def _parse_tensor_or_dict(features): """Helper function to parse features.""" if isinstance(features, dict): keys = sorted(features.keys()) with ops.colocate_with(features[keys[0]]): features = array_ops.concat([features[k] for k in keys], 1) return features def _kmeans_clustering_model_fn(features, labels, mode, params, config): """Model function for KMeansClustering estimator.""" assert labels is None, labels (all_scores, model_predictions, losses, is_initialized, init_op, training_op) = clustering_ops.KMeans( _parse_tensor_or_dict(features), params.get('num_clusters'), initial_clusters=params.get('training_initial_clusters'), distance_metric=params.get('distance_metric'), use_mini_batch=params.get('use_mini_batch'), mini_batch_steps_per_iteration=params.get( 'mini_batch_steps_per_iteration'), random_seed=params.get('random_seed'), kmeans_plus_plus_num_retries=params.get( 'kmeans_plus_plus_num_retries')).training_graph() incr_step = state_ops.assign_add(variables.get_global_step(), 1) loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME) summary.scalar('loss/raw', loss) training_op = with_dependencies([training_op, incr_step], loss) predictions = { KMeansClustering.ALL_SCORES: all_scores[0], KMeansClustering.CLUSTER_IDX: model_predictions[0], } eval_metric_ops = {KMeansClustering.SCORES: loss} training_hooks = [_InitializeClustersHook( init_op, is_initialized, config.is_chief)] relative_tolerance = params.get('relative_tolerance') if relative_tolerance is not None: training_hooks.append(_LossRelativeChangeHook(relative_tolerance)) return ModelFnOps( mode=mode, predictions=predictions, eval_metric_ops=eval_metric_ops, loss=loss, train_op=training_op, training_hooks=training_hooks) # TODO(agarwal,ands): support sharded input. class KMeansClustering(estimator.Estimator): """An Estimator for K-Means clustering.""" SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE RANDOM_INIT = clustering_ops.RANDOM_INIT KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT SCORES = 'scores' CLUSTER_IDX = 'cluster_idx' CLUSTERS = 'clusters' ALL_SCORES = 'all_scores' LOSS_OP_NAME = 'kmeans_loss' def __init__(self, num_clusters, model_dir=None, initial_clusters=RANDOM_INIT, distance_metric=SQUARED_EUCLIDEAN_DISTANCE, random_seed=0, use_mini_batch=True, mini_batch_steps_per_iteration=1, kmeans_plus_plus_num_retries=2, relative_tolerance=None, config=None): """Creates a model for running KMeans training and inference. Args: num_clusters: number of clusters to train. model_dir: the directory to save the model results and log files. initial_clusters: specifies how to initialize the clusters for training. See clustering_ops.kmeans for the possible values. distance_metric: the distance metric used for clustering. See clustering_ops.kmeans for the possible values. random_seed: Python integer. Seed for PRNG used to initialize centers. use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume full batch. mini_batch_steps_per_iteration: number of steps after which the updated cluster centers are synced back to a master copy. See clustering_ops.py for more details. kmeans_plus_plus_num_retries: For each point that is sampled during kmeans++ initialization, this parameter specifies the number of additional points to draw from the current distribution before selecting the best. If a negative value is specified, a heuristic is used to sample O(log(num_to_sample)) additional points. relative_tolerance: A relative tolerance of change in the loss between iterations. Stops learning if the loss changes less than this amount. Note that this may not work correctly if use_mini_batch=True. config: See Estimator """ params = {} params['num_clusters'] = num_clusters params['training_initial_clusters'] = initial_clusters params['distance_metric'] = distance_metric params['random_seed'] = random_seed params['use_mini_batch'] = use_mini_batch params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries params['relative_tolerance'] = relative_tolerance super(KMeansClustering, self).__init__( model_fn=_kmeans_clustering_model_fn, params=params, model_dir=model_dir, config=config) def predict_cluster_idx(self, input_fn=None): """Yields predicted cluster indices.""" key = KMeansClustering.CLUSTER_IDX results = super(KMeansClustering, self).predict( input_fn=input_fn, outputs=[key]) for result in results: yield result[key] def score(self, input_fn=None, steps=None): """Predict total sum of distances to nearest clusters. Note that this function is different from the corresponding one in sklearn which returns the negative of the sum of distances. Args: input_fn: see predict. steps: see predict. Returns: Total sum of distances to nearest clusters. """ return np.sum( self.evaluate( input_fn=input_fn, steps=steps)[KMeansClustering.SCORES]) def transform(self, input_fn=None, as_iterable=False): """Transforms each element to distances to cluster centers. Note that this function is different from the corresponding one in sklearn. For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN distance. Args: input_fn: see predict. as_iterable: see predict Returns: Array with same number of rows as x, and num_clusters columns, containing distances to the cluster centers. """ key = KMeansClustering.ALL_SCORES results = super(KMeansClustering, self).predict( input_fn=input_fn, outputs=[key], as_iterable=as_iterable) if not as_iterable: return results[key] else: return results def clusters(self): """Returns cluster centers.""" return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
apache-2.0
EliHar/Pattern_recognition
openface1/util/tsne.py
11
1261
#!/usr/bin/env python2 import numpy as np import pandas as pd from sklearn.decomposition import PCA from sklearn.manifold import TSNE import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm plt.style.use('bmh') import argparse print(""" Note: This example assumes that `name i` corresponds to `label i` in `labels.csv`. """) parser = argparse.ArgumentParser() parser.add_argument('workDir', type=str) parser.add_argument('--names', type=str, nargs='+', required=True) args = parser.parse_args() y = pd.read_csv("{}/labels.csv".format(args.workDir)).as_matrix()[:, 0] X = pd.read_csv("{}/reps.csv".format(args.workDir)).as_matrix() target_names = np.array(args.names) colors = cm.Dark2(np.linspace(0, 1, len(target_names))) X_pca = PCA(n_components=50).fit_transform(X, X) tsne = TSNE(n_components=2, init='random', random_state=0) X_r = tsne.fit_transform(X_pca) for c, i, target_name in zip(colors, list(range(1, len(target_names) + 1)), target_names): plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name) plt.legend() out = "{}/tsne.pdf".format(args.workDir) plt.savefig(out) print("Saved to: {}".format(out))
mit
ishanic/scikit-learn
sklearn/covariance/graph_lasso_.py
127
25626
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized estimator. """ # Author: Gael Varoquaux <[email protected]> # License: BSD 3 clause # Copyright: INRIA import warnings import operator import sys import time import numpy as np from scipy import linalg from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance, log_likelihood) from ..utils import ConvergenceWarning from ..utils.extmath import pinvh from ..utils.validation import check_random_state, check_array from ..linear_model import lars_path from ..linear_model import cd_fast from ..cross_validation import check_cv, cross_val_score from ..externals.joblib import Parallel, delayed import collections # Helper functions to compute the objective and dual objective functions # of the l1-penalized estimator def _objective(mle, precision_, alpha): """Evaluation of the graph-lasso objective function the objective function is made of a shifted scaled version of the normalized log-likelihood (i.e. its empirical mean over the samples) and a penalisation term to promote sparsity """ p = precision_.shape[0] cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi) cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) return cost def _dual_gap(emp_cov, precision_, alpha): """Expression of the dual gap convergence criterion The specific definition is given in Duchi "Projected Subgradient Methods for Learning Sparse Gaussians". """ gap = np.sum(emp_cov * precision_) gap -= precision_.shape[0] gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) return gap def alpha_max(emp_cov): """Find the maximum alpha for which there are some non-zeros off-diagonal. Parameters ---------- emp_cov : 2D array, (n_features, n_features) The sample covariance matrix Notes ----- This results from the bound for the all the Lasso that are solved in GraphLasso: each time, the row of cov corresponds to Xy. As the bound for alpha is given by `max(abs(Xy))`, the result follows. """ A = np.copy(emp_cov) A.flat[::A.shape[0] + 1] = 0 return np.max(np.abs(A)) # The g-lasso algorithm def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False, return_costs=False, eps=np.finfo(np.float64).eps, return_n_iter=False): """l1-penalized covariance estimator Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- emp_cov : 2D ndarray, shape (n_features, n_features) Empirical covariance from which to compute the covariance estimate. alpha : positive float The regularization parameter: the higher alpha, the more regularization, the sparser the inverse covariance. cov_init : 2D array (n_features, n_features), optional The initial guess for the covariance. mode : {'cd', 'lars'} The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : positive float, optional The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter : integer, optional The maximum number of iterations. verbose : boolean, optional If verbose is True, the objective function and dual gap are printed at each iteration. return_costs : boolean, optional If return_costs is True, the objective function and dual gap at each iteration are returned. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. return_n_iter : bool, optional Whether or not to return the number of iterations. Returns ------- covariance : 2D ndarray, shape (n_features, n_features) The estimated covariance matrix. precision : 2D ndarray, shape (n_features, n_features) The estimated (sparse) precision matrix. costs : list of (objective, dual_gap) pairs The list of values of the objective function and the dual gap at each iteration. Returned only if return_costs is True. n_iter : int Number of iterations. Returned only if `return_n_iter` is set to True. See Also -------- GraphLasso, GraphLassoCV Notes ----- The algorithm employed to solve this problem is the GLasso algorithm, from the Friedman 2008 Biostatistics paper. It is the same algorithm as in the R `glasso` package. One possible difference with the `glasso` R package is that the diagonal coefficients are not penalized. """ _, n_features = emp_cov.shape if alpha == 0: if return_costs: precision_ = linalg.inv(emp_cov) cost = - 2. * log_likelihood(emp_cov, precision_) cost += n_features * np.log(2 * np.pi) d_gap = np.sum(emp_cov * precision_) - n_features if return_n_iter: return emp_cov, precision_, (cost, d_gap), 0 else: return emp_cov, precision_, (cost, d_gap) else: if return_n_iter: return emp_cov, linalg.inv(emp_cov), 0 else: return emp_cov, linalg.inv(emp_cov) if cov_init is None: covariance_ = emp_cov.copy() else: covariance_ = cov_init.copy() # As a trivial regularization (Tikhonov like), we scale down the # off-diagonal coefficients of our starting point: This is needed, as # in the cross-validation the cov_init can easily be # ill-conditioned, and the CV loop blows. Beside, this takes # conservative stand-point on the initial conditions, and it tends to # make the convergence go faster. covariance_ *= 0.95 diagonal = emp_cov.flat[::n_features + 1] covariance_.flat[::n_features + 1] = diagonal precision_ = pinvh(covariance_) indices = np.arange(n_features) costs = list() # The different l1 regression solver have different numerical errors if mode == 'cd': errors = dict(over='raise', invalid='ignore') else: errors = dict(invalid='raise') try: # be robust to the max_iter=0 edge case, see: # https://github.com/scikit-learn/scikit-learn/issues/4134 d_gap = np.inf for i in range(max_iter): for idx in range(n_features): sub_covariance = covariance_[indices != idx].T[indices != idx] row = emp_cov[idx, indices != idx] with np.errstate(**errors): if mode == 'cd': # Use coordinate descent coefs = -(precision_[indices != idx, idx] / (precision_[idx, idx] + 1000 * eps)) coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram( coefs, alpha, 0, sub_covariance, row, row, max_iter, enet_tol, check_random_state(None), False) else: # Use LARS _, _, coefs = lars_path( sub_covariance, row, Xy=row, Gram=sub_covariance, alpha_min=alpha / (n_features - 1), copy_Gram=True, method='lars', return_path=False) # Update the precision matrix precision_[idx, idx] = ( 1. / (covariance_[idx, idx] - np.dot(covariance_[indices != idx, idx], coefs))) precision_[indices != idx, idx] = (- precision_[idx, idx] * coefs) precision_[idx, indices != idx] = (- precision_[idx, idx] * coefs) coefs = np.dot(sub_covariance, coefs) covariance_[idx, indices != idx] = coefs covariance_[indices != idx, idx] = coefs d_gap = _dual_gap(emp_cov, precision_, alpha) cost = _objective(emp_cov, precision_, alpha) if verbose: print( '[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e' % (i, cost, d_gap)) if return_costs: costs.append((cost, d_gap)) if np.abs(d_gap) < tol: break if not np.isfinite(cost) and i > 0: raise FloatingPointError('Non SPD result: the system is ' 'too ill-conditioned for this solver') else: warnings.warn('graph_lasso: did not converge after %i iteration:' ' dual gap: %.3e' % (max_iter, d_gap), ConvergenceWarning) except FloatingPointError as e: e.args = (e.args[0] + '. The system is too ill-conditioned for this solver',) raise e if return_costs: if return_n_iter: return covariance_, precision_, costs, i + 1 else: return covariance_, precision_, costs else: if return_n_iter: return covariance_, precision_, i + 1 else: return covariance_, precision_ class GraphLasso(EmpiricalCovariance): """Sparse inverse covariance estimation with an l1-penalized estimator. Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- alpha : positive float, default 0.01 The regularization parameter: the higher alpha, the more regularization, the sparser the inverse covariance. mode : {'cd', 'lars'}, default 'cd' The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : positive float, default 1e-4 The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter : integer, default 100 The maximum number of iterations. verbose : boolean, default False If verbose is True, the objective function and dual gap are plotted at each iteration. assume_centered : boolean, default False If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False, data are centered before computation. Attributes ---------- covariance_ : array-like, shape (n_features, n_features) Estimated covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. n_iter_ : int Number of iterations run. See Also -------- graph_lasso, GraphLassoCV """ def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False, assume_centered=False): self.alpha = alpha self.mode = mode self.tol = tol self.enet_tol = enet_tol self.max_iter = max_iter self.verbose = verbose self.assume_centered = assume_centered # The base class needs this for the score method self.store_precision = True def fit(self, X, y=None): X = check_array(X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) emp_cov = empirical_covariance( X, assume_centered=self.assume_centered) self.covariance_, self.precision_, self.n_iter_ = graph_lasso( emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=self.verbose, return_n_iter=True) return self # Cross-validation with GraphLasso def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd', tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False): """l1-penalized covariance estimator along a path of decreasing alphas Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- X : 2D ndarray, shape (n_samples, n_features) Data from which to compute the covariance estimate. alphas : list of positive floats The list of regularization parameters, decreasing order. X_test : 2D array, shape (n_test_samples, n_features), optional Optional test matrix to measure generalisation error. mode : {'cd', 'lars'} The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where p > n. Elsewhere prefer cd which is more numerically stable. tol : positive float, optional The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter : integer, optional The maximum number of iterations. verbose : integer, optional The higher the verbosity flag, the more information is printed during the fitting. Returns ------- covariances_ : List of 2D ndarray, shape (n_features, n_features) The estimated covariance matrices. precisions_ : List of 2D ndarray, shape (n_features, n_features) The estimated (sparse) precision matrices. scores_ : List of float The generalisation error (log-likelihood) on the test data. Returned only if test data is passed. """ inner_verbose = max(0, verbose - 1) emp_cov = empirical_covariance(X) if cov_init is None: covariance_ = emp_cov.copy() else: covariance_ = cov_init covariances_ = list() precisions_ = list() scores_ = list() if X_test is not None: test_emp_cov = empirical_covariance(X_test) for alpha in alphas: try: # Capture the errors, and move on covariance_, precision_ = graph_lasso( emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol, enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose) covariances_.append(covariance_) precisions_.append(precision_) if X_test is not None: this_score = log_likelihood(test_emp_cov, precision_) except FloatingPointError: this_score = -np.inf covariances_.append(np.nan) precisions_.append(np.nan) if X_test is not None: if not np.isfinite(this_score): this_score = -np.inf scores_.append(this_score) if verbose == 1: sys.stderr.write('.') elif verbose > 1: if X_test is not None: print('[graph_lasso_path] alpha: %.2e, score: %.2e' % (alpha, this_score)) else: print('[graph_lasso_path] alpha: %.2e' % alpha) if X_test is not None: return covariances_, precisions_, scores_ return covariances_, precisions_ class GraphLassoCV(GraphLasso): """Sparse inverse covariance w/ cross-validated choice of the l1 penalty Read more in the :ref:`User Guide <sparse_inverse_covariance>`. Parameters ---------- alphas : integer, or list positive float, optional If an integer is given, it fixes the number of points on the grids of alpha to be used. If a list is given, it gives the grid to be used. See the notes in the class docstring for more details. n_refinements: strictly positive integer The number of times the grid is refined. Not used if explicit values of alphas are passed. cv : cross-validation generator, optional see sklearn.cross_validation module. If None is passed, defaults to a 3-fold strategy tol: positive float, optional The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped. enet_tol : positive float, optional The tolerance for the elastic net solver used to calculate the descent direction. This parameter controls the accuracy of the search direction for a given column update, not of the overall parameter estimate. Only used for mode='cd'. max_iter: integer, optional Maximum number of iterations. mode: {'cd', 'lars'} The Lasso solver to use: coordinate descent or LARS. Use LARS for very sparse underlying graphs, where number of features is greater than number of samples. Elsewhere prefer cd which is more numerically stable. n_jobs: int, optional number of jobs to run in parallel (default 1). verbose: boolean, optional If verbose is True, the objective function and duality gap are printed at each iteration. assume_centered : Boolean If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False, data are centered before computation. Attributes ---------- covariance_ : numpy.ndarray, shape (n_features, n_features) Estimated covariance matrix. precision_ : numpy.ndarray, shape (n_features, n_features) Estimated precision matrix (inverse covariance). alpha_ : float Penalization parameter selected. cv_alphas_ : list of float All penalization parameters explored. `grid_scores`: 2D numpy.ndarray (n_alphas, n_folds) Log-likelihood score on left-out data across folds. n_iter_ : int Number of iterations run for the optimal alpha. See Also -------- graph_lasso, GraphLasso Notes ----- The search for the optimal penalization parameter (alpha) is done on an iteratively refined grid: first the cross-validated scores on a grid are computed, then a new refined grid is centered around the maximum, and so on. One of the challenges which is faced here is that the solvers can fail to converge to a well-conditioned estimate. The corresponding values of alpha then come out as missing values, but the optimum may be close to these missing values. """ def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4, enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1, verbose=False, assume_centered=False): self.alphas = alphas self.n_refinements = n_refinements self.mode = mode self.tol = tol self.enet_tol = enet_tol self.max_iter = max_iter self.verbose = verbose self.cv = cv self.n_jobs = n_jobs self.assume_centered = assume_centered # The base class needs this for the score method self.store_precision = True def fit(self, X, y=None): """Fits the GraphLasso covariance model to X. Parameters ---------- X : ndarray, shape (n_samples, n_features) Data from which to compute the covariance estimate """ X = check_array(X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) emp_cov = empirical_covariance( X, assume_centered=self.assume_centered) cv = check_cv(self.cv, X, y, classifier=False) # List of (alpha, scores, covs) path = list() n_alphas = self.alphas inner_verbose = max(0, self.verbose - 1) if isinstance(n_alphas, collections.Sequence): alphas = self.alphas n_refinements = 1 else: n_refinements = self.n_refinements alpha_1 = alpha_max(emp_cov) alpha_0 = 1e-2 * alpha_1 alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1] t0 = time.time() for i in range(n_refinements): with warnings.catch_warnings(): # No need to see the convergence warnings on this grid: # they will always be points that will not converge # during the cross-validation warnings.simplefilter('ignore', ConvergenceWarning) # Compute the cross-validated loss on the current grid # NOTE: Warm-restarting graph_lasso_path has been tried, and # this did not allow to gain anything (same execution time with # or without). this_path = Parallel( n_jobs=self.n_jobs, verbose=self.verbose )( delayed(graph_lasso_path)( X[train], alphas=alphas, X_test=X[test], mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=int(.1 * self.max_iter), verbose=inner_verbose) for train, test in cv) # Little danse to transform the list in what we need covs, _, scores = zip(*this_path) covs = zip(*covs) scores = zip(*scores) path.extend(zip(alphas, scores, covs)) path = sorted(path, key=operator.itemgetter(0), reverse=True) # Find the maximum (avoid using built in 'max' function to # have a fully-reproducible selection of the smallest alpha # in case of equality) best_score = -np.inf last_finite_idx = 0 for index, (alpha, scores, _) in enumerate(path): this_score = np.mean(scores) if this_score >= .1 / np.finfo(np.float64).eps: this_score = np.nan if np.isfinite(this_score): last_finite_idx = index if this_score >= best_score: best_score = this_score best_index = index # Refine the grid if best_index == 0: # We do not need to go back: we have chosen # the highest value of alpha for which there are # non-zero coefficients alpha_1 = path[0][0] alpha_0 = path[1][0] elif (best_index == last_finite_idx and not best_index == len(path) - 1): # We have non-converged models on the upper bound of the # grid, we need to refine the grid there alpha_1 = path[best_index][0] alpha_0 = path[best_index + 1][0] elif best_index == len(path) - 1: alpha_1 = path[best_index][0] alpha_0 = 0.01 * path[best_index][0] else: alpha_1 = path[best_index - 1][0] alpha_0 = path[best_index + 1][0] if not isinstance(n_alphas, collections.Sequence): alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2) alphas = alphas[1:-1] if self.verbose and n_refinements > 1: print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is' % (i + 1, n_refinements, time.time() - t0)) path = list(zip(*path)) grid_scores = list(path[1]) alphas = list(path[0]) # Finally, compute the score with alpha = 0 alphas.append(0) grid_scores.append(cross_val_score(EmpiricalCovariance(), X, cv=cv, n_jobs=self.n_jobs, verbose=inner_verbose)) self.grid_scores = np.array(grid_scores) best_alpha = alphas[best_index] self.alpha_ = best_alpha self.cv_alphas_ = alphas # Finally fit the model with the selected alpha self.covariance_, self.precision_, self.n_iter_ = graph_lasso( emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=inner_verbose, return_n_iter=True) return self
bsd-3-clause
BryanCutler/spark
python/pyspark/pandas/tests/test_frame_spark.py
1
6247
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import os import pandas as pd import pyspark from pyspark import pandas as pp from pyspark.pandas.testing.utils import ReusedSQLTestCase, SQLTestUtils, TestUtils class SparkFrameMethodsTest(ReusedSQLTestCase, SQLTestUtils, TestUtils): def test_frame_apply_negative(self): with self.assertRaisesRegex( ValueError, "The output of the function.* pyspark.sql.DataFrame.*int" ): pp.range(10).spark.apply(lambda scol: 1) def test_hint(self): pdf1 = pd.DataFrame( {"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]} ).set_index("lkey") pdf2 = pd.DataFrame( {"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]} ).set_index("rkey") kdf1 = pp.from_pandas(pdf1) kdf2 = pp.from_pandas(pdf2) if LooseVersion(pyspark.__version__) >= LooseVersion("3.0"): hints = ["broadcast", "merge", "shuffle_hash", "shuffle_replicate_nl"] else: hints = ["broadcast"] for hint in hints: self.assert_eq( pdf1.merge(pdf2, left_index=True, right_index=True).sort_values( ["value_x", "value_y"] ), kdf1.merge(kdf2.spark.hint(hint), left_index=True, right_index=True).sort_values( ["value_x", "value_y"] ), almost=True, ) self.assert_eq( pdf1.merge(pdf2 + 1, left_index=True, right_index=True).sort_values( ["value_x", "value_y"] ), kdf1.merge( (kdf2 + 1).spark.hint(hint), left_index=True, right_index=True ).sort_values(["value_x", "value_y"]), almost=True, ) def test_repartition(self): kdf = pp.DataFrame({"age": [5, 5, 2, 2], "name": ["Bob", "Bob", "Alice", "Alice"]}) num_partitions = kdf.to_spark().rdd.getNumPartitions() + 1 num_partitions += 1 new_kdf = kdf.spark.repartition(num_partitions) self.assertEqual(new_kdf.to_spark().rdd.getNumPartitions(), num_partitions) self.assert_eq(kdf.sort_index(), new_kdf.sort_index()) # Reserves Index kdf = kdf.set_index("age") num_partitions += 1 new_kdf = kdf.spark.repartition(num_partitions) self.assertEqual(new_kdf.to_spark().rdd.getNumPartitions(), num_partitions) self.assert_eq(kdf.sort_index(), new_kdf.sort_index()) # Reflects internal changes kdf = kdf.reset_index() kdf = kdf.set_index("name") kdf2 = kdf + 1 num_partitions += 1 self.assert_eq(kdf2.sort_index(), (kdf + 1).spark.repartition(num_partitions).sort_index()) # Reserves MultiIndex kdf = pp.DataFrame({"a": ["a", "b", "c"]}, index=[[1, 2, 3], [4, 5, 6]]) num_partitions = kdf.to_spark().rdd.getNumPartitions() + 1 new_kdf = kdf.spark.repartition(num_partitions) self.assertEqual(new_kdf.to_spark().rdd.getNumPartitions(), num_partitions) self.assert_eq(kdf.sort_index(), new_kdf.sort_index()) def test_coalesce(self): num_partitions = 10 kdf = pp.DataFrame({"age": [5, 5, 2, 2], "name": ["Bob", "Bob", "Alice", "Alice"]}) kdf = kdf.spark.repartition(num_partitions) num_partitions -= 1 new_kdf = kdf.spark.coalesce(num_partitions) self.assertEqual(new_kdf.to_spark().rdd.getNumPartitions(), num_partitions) self.assert_eq(kdf.sort_index(), new_kdf.sort_index()) # Reserves Index kdf = kdf.set_index("age") num_partitions -= 1 new_kdf = kdf.spark.coalesce(num_partitions) self.assertEqual(new_kdf.to_spark().rdd.getNumPartitions(), num_partitions) self.assert_eq(kdf.sort_index(), new_kdf.sort_index()) # Reflects internal changes kdf = kdf.reset_index() kdf = kdf.set_index("name") kdf2 = kdf + 1 num_partitions -= 1 self.assert_eq(kdf2.sort_index(), (kdf + 1).spark.coalesce(num_partitions).sort_index()) # Reserves MultiIndex kdf = pp.DataFrame({"a": ["a", "b", "c"]}, index=[[1, 2, 3], [4, 5, 6]]) num_partitions -= 1 kdf = kdf.spark.repartition(num_partitions) num_partitions -= 1 new_kdf = kdf.spark.coalesce(num_partitions) self.assertEqual(new_kdf.to_spark().rdd.getNumPartitions(), num_partitions) self.assert_eq(kdf.sort_index(), new_kdf.sort_index()) def test_checkpoint(self): with self.temp_dir() as tmp: self.spark.sparkContext.setCheckpointDir(tmp) kdf = pp.DataFrame({"a": ["a", "b", "c"]}) new_kdf = kdf.spark.checkpoint() self.assertIsNotNone(os.listdir(tmp)) self.assert_eq(kdf, new_kdf) def test_local_checkpoint(self): kdf = pp.DataFrame({"a": ["a", "b", "c"]}) new_kdf = kdf.spark.local_checkpoint() self.assert_eq(kdf, new_kdf) if __name__ == "__main__": import unittest from pyspark.pandas.tests.test_frame_spark import * # noqa: F401 try: import xmlrunner # type: ignore[import] testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
codekansas/spykes
examples/plot_neuropixels_example.py
2
7803
""" =================== Neuropixels Example =================== Use spykes to analyze data from UCL's Neuropixels """ # Authors: Mayank Agrawal <[email protected]> # # License: MIT ######################################################## import numpy as np import pandas as pd from spykes.plot.neurovis import NeuroVis from spykes.plot.popvis import PopVis import matplotlib.pyplot as plt from spykes.io.datasets import load_neuropixels_data plt.style.use('seaborn-ticks') ######################################################## # Neuropixels # ----------------------------- # Neuropixels is a new recording technique by UCL's `Cortex Lab # <http://www.ucl.ac.uk/neuropixels>`__ that is able to measure data from # hundreds of neurons. Below we show how this data can be worked with in Spykes # # 0 Download Data # ----------------------------- # # Download all data `here <http://data.cortexlab.net/dualPhase3/data/>`__. # # 1 Read In Data # ----------------------------- folder_names = ['posterior', 'frontal'] Fs = 30000.0 striatum = list() motor_ctx = list() thalamus = list() hippocampus = list() visual_ctx = list() # a lot of this code is adapted from Cortex Lab's MATLAB script # see here: http://data.cortexlab.net/dualPhase3/data/script_dualPhase3.m data_dict = load_neuropixels_data() for name in folder_names: clusters = np.squeeze(data_dict[name + '/spike_clusters.npy']) spike_times = (np.squeeze(data_dict[(name + '/spike_times.npy')])) / Fs spike_templates = (np.squeeze(data_dict[(name + '/spike_templates.npy')])) temps = (np.squeeze(data_dict[(name + '/templates.npy')])) winv = (np.squeeze(data_dict[(name + '/whitening_mat_inv.npy')])) y_coords = (np.squeeze(data_dict[(name + '/channel_positions.npy')]))[:, 1] # frontal times need to align with posterior if (name == 'frontal'): time_correction = data_dict[('timeCorrection.npy')] spike_times *= time_correction[0] spike_times += time_correction[1] data = data_dict[(name + '/cluster_groups.csv')] cids = np.array([x[0] for x in data]) cfg = np.array([x[1] for x in data]) # find good clusters and only use those spikes good_clusters = cids[cfg == 'good'] good_indices = (np.in1d(clusters, good_clusters)) real_spikes = spike_times[good_indices] real_clusters = clusters[good_indices] real_spike_templates = spike_templates[good_indices] # find how many spikes per cluster and then order spikes by which cluster # they are in counts_per_cluster = np.bincount(real_clusters) sort_idx = np.argsort(real_clusters) sorted_clusters = real_clusters[sort_idx] sorted_spikes = real_spikes[sort_idx] sorted_spike_templates = real_spike_templates[sort_idx] # find depth for each spike # this is translated from Cortex Lab's MATLAB code # for more details, check out the original code here: # https://github.com/cortex-lab/spikes/blob/master/analysis/templatePositionsAmplitudes.m temps_unw = np.zeros(temps.shape) for t in range(temps.shape[0]): temps_unw[t, :, :] = np.dot(temps[t, :, :], winv) temp_chan_amps = np.ptp(temps_unw, axis=1) temps_amps = np.max(temp_chan_amps, axis=1) thresh_vals = temps_amps * 0.3 thresh_vals = [thresh_vals for i in range(temp_chan_amps.shape[1])] thresh_vals = np.stack(thresh_vals, axis=1) temp_chan_amps[temp_chan_amps < thresh_vals] = 0 y_coords = np.reshape(y_coords, (y_coords.shape[0], 1)) temp_depths = np.sum( np.dot(temp_chan_amps, y_coords), axis=1) / (np.sum(temp_chan_amps, axis=1)) sorted_spike_depths = temp_depths[sorted_spike_templates] # create neurons and find region accumulator = 0 for idx, count in enumerate(counts_per_cluster): if count > 0: spike_times = sorted_spikes[accumulator:accumulator + count] neuron = NeuroVis(spiketimes=spike_times, name='%d' % (idx)) cluster_depth = np.mean( sorted_spike_depths[accumulator:accumulator + count]) if name == 'frontal': if (cluster_depth > 0 and cluster_depth < 1550): striatum.append(neuron) elif (cluster_depth > 1550 and cluster_depth < 3840): motor_ctx.append(neuron) elif name == 'posterior': if (cluster_depth > 0 and cluster_depth < 1634): thalamus.append(neuron) elif (cluster_depth > 1634 and cluster_depth < 2797): hippocampus.append(neuron) elif (cluster_depth > 2797 and cluster_depth < 3840): visual_ctx.append(neuron) accumulator += count print("Striatum (n = %d)" % len(striatum)) print("Motor Cortex (n = %d)" % len(motor_ctx)) print("Thalamus (n = %d)" % len(thalamus)) print("Hippocampus (n = %d)" % len(hippocampus)) print("Visual Cortex (n = %d)" % len(visual_ctx)) ######################################################## # 2 Create Data Frame # ----------------------------- df = pd.DataFrame() raw_data = data_dict['experiment1stimInfo.mat'] df['start'] = np.squeeze(raw_data['stimStarts']) df['stop'] = np.squeeze(raw_data['stimStops']) df['stimulus'] = np.squeeze(raw_data['stimIDs']) print(df.head()) ######################################################## # 3 Start Plotting # ----------------------------- # 3.1 Striatum # ~~~~~~~~~~~~ pop = PopVis(striatum, name='Striatum') fig = plt.figure(figsize=(30, 20)) all_psth = pop.get_all_psth( event='start', df=df, conditions='stimulus', plot=False, binsize=100, window=[-500, 2000]) pop.plot_heat_map(all_psth, cond_id=[ 2, 7, 13], sortorder='descend', neuron_names=False) ######################################################## pop.plot_population_psth(all_psth=all_psth, cond_id=[1, 7, 12]) ######################################################## # 3.2 Frontal # ~~~~~~~~~~~~ pop = PopVis(striatum + motor_ctx, name='Frontal') fig = plt.figure(figsize=(30, 20)) all_psth = pop.get_all_psth( event='start', df=df, conditions='stimulus', plot=False, binsize=100, window=[-500, 2000]) pop.plot_heat_map( all_psth, cond_id=[2, 7, 13], sortorder='descend', neuron_names=False) ######################################################## pop.plot_population_psth(all_psth=all_psth, cond_id=[1, 7, 12]) ######################################################## # 3.3 All Neurons # ~~~~~~~~~~~~ pop = PopVis(striatum + motor_ctx + thalamus + hippocampus + visual_ctx) fig = plt.figure(figsize=(30, 20)) all_psth = pop.get_all_psth( event='start', df=df, conditions='stimulus', plot=False, binsize=100, window=[-500, 2000]) pop.plot_heat_map( all_psth, cond_id=[2, 7, 13], sortorder='descend', neuron_names=False) ######################################################## pop.plot_population_psth(all_psth=all_psth, cond_id=[1, 7, 12]) ######################################################## # 3.4 Striatum vs. Motor Cortex # ~~~~~~~~~~~~ striatum_pop = PopVis(striatum, name='Striatum') motor_ctx_pop = PopVis(motor_ctx, name='Motor Cortex') striatum_psth = striatum_pop.get_all_psth( event='start', df=df, conditions='stimulus', plot=False, binsize=100, window=[-500, 2000]) motor_ctx_psth = motor_ctx_pop.get_all_psth( event='start', df=df, conditions='stimulus', plot=False, binsize=100, window=[-500, 2000]) ######################################################## striatum_pop.plot_population_psth(all_psth=striatum_psth, cond_id=[1, 7, 12]) ######################################################## motor_ctx_pop.plot_population_psth(all_psth=motor_ctx_psth, cond_id=[1, 7, 12])
mit
bobwalker99/Pydev
plugins/org.python.pydev/pysrc/pydevd.py
1
61172
''' Entry point module (keep at root): This module starts the debugger. ''' from __future__ import nested_scopes # Jython 2.1 support import atexit import os import sys import traceback from _pydevd_bundle.pydevd_constants import IS_JYTH_LESS25, IS_PY3K, IS_PY34_OLDER, get_thread_id, dict_keys, dict_pop, dict_contains, \ dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame, xrange, \ clear_cached_thread_id from _pydev_bundle import fix_getpass from _pydev_bundle import pydev_imports, pydev_log from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding from _pydev_bundle.pydev_is_thread_alive import is_thread_alive from _pydev_imps._pydev_saved_modules import threading from _pydev_imps._pydev_saved_modules import time from _pydev_imps._pydev_saved_modules import thread from _pydevd_bundle import pydevd_io, pydevd_vm_type, pydevd_tracing from _pydevd_bundle import pydevd_utils from _pydevd_bundle import pydevd_vars from _pydevd_bundle.pydevd_additional_thread_info import PyDBAdditionalThreadInfo from _pydevd_bundle.pydevd_breakpoints import ExceptionBreakpoint, update_exception_hook from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO, CMD_STEP_OVER, \ CMD_STEP_RETURN, CMD_STEP_INTO_MY_CODE, CMD_THREAD_SUSPEND, CMD_RUN_TO_LINE, \ CMD_ADD_EXCEPTION_BREAK, CMD_SMART_STEP_INTO, InternalConsoleExec, NetCommandFactory, \ PyDBDaemonThread, _queue, ReaderThread, GetGlobalDebugger, get_global_debugger, \ set_global_debugger, WriterThread, pydevd_find_thread_by_id, pydevd_log, \ start_client, start_server, InternalGetBreakpointException, InternalSendCurrExceptionTrace, \ InternalSendCurrExceptionTraceProceeded from _pydevd_bundle.pydevd_custom_frames import CustomFramesContainer, custom_frames_container_init from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads from _pydevd_bundle.pydevd_trace_dispatch import trace_dispatch as _trace_dispatch from _pydevd_bundle.pydevd_utils import save_main_module from pydevd_concurrency_analyser.pydevd_concurrency_logger import ThreadingLogger, AsyncioLogger, send_message, cur_time from pydevd_concurrency_analyser.pydevd_thread_wrappers import wrap_threads __version_info__ = (0, 0, 6) __version_info_str__ = [] for v in __version_info__: __version_info_str__.append(str(v)) __version__ = '.'.join(__version_info_str__) #IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe SUPPORT_PLUGINS = not IS_JYTH_LESS25 PluginManager = None if SUPPORT_PLUGINS: from _pydevd_bundle.pydevd_plugin_utils import PluginManager threadingEnumerate = threading.enumerate threadingCurrentThread = threading.currentThread try: 'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread). except: pass connected = False bufferStdOutToServer = False bufferStdErrToServer = False remote = False file_system_encoding = getfilesystemencoding() #======================================================================================================================= # PyDBCommandThread #======================================================================================================================= class PyDBCommandThread(PyDBDaemonThread): def __init__(self, py_db): PyDBDaemonThread.__init__(self) self._py_db_command_thread_event = py_db._py_db_command_thread_event self.py_db = py_db self.setName('pydevd.CommandThread') def _on_run(self): for i in xrange(1, 10): time.sleep(0.5) #this one will only start later on (because otherwise we may not have any non-daemon threads if self.killReceived: return if self.dontTraceMe: self.py_db.SetTrace(None) # no debugging on this thread try: while not self.killReceived: try: self.py_db.process_internal_commands() except: pydevd_log(0, 'Finishing debug communication...(2)') self._py_db_command_thread_event.clear() self._py_db_command_thread_event.wait(0.5) except: pydev_log.debug(sys.exc_info()[0]) #only got this error in interpreter shutdown #pydevd_log(0, 'Finishing debug communication...(3)') #======================================================================================================================= # CheckOutputThread # Non-daemonic thread guaranties that all data is written even if program is finished #======================================================================================================================= class CheckOutputThread(PyDBDaemonThread): def __init__(self, py_db): PyDBDaemonThread.__init__(self) self.py_db = py_db self.setName('pydevd.CheckAliveThread') self.daemon = False py_db.output_checker = self def _on_run(self): if self.dontTraceMe: disable_tracing = True if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0: # don't run untraced threads if we're in jython 2.2.1 or lower # jython bug: if we start a thread and another thread changes the tracing facility # it affects other threads (it's not set only for the thread but globally) # Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867 disable_tracing = False if disable_tracing: pydevd_tracing.SetTrace(None) # no debugging on this thread while not self.killReceived: time.sleep(0.3) if not self.py_db.has_threads_alive() and self.py_db.writer.empty() \ and not has_data_to_redirect(): try: pydev_log.debug("No alive threads, finishing debug session") self.py_db.finish_debugging_session() kill_all_pydev_threads() except: traceback.print_exc() self.killReceived = True self.py_db.check_output_redirect() def do_kill_pydev_thread(self): self.killReceived = True #======================================================================================================================= # PyDB #======================================================================================================================= class PyDB: """ Main debugging class Lots of stuff going on here: PyDB starts two threads on startup that connect to remote debugger (RDB) The threads continuously read & write commands to RDB. PyDB communicates with these threads through command queues. Every RDB command is processed by calling process_net_command. Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue Some commands need to be executed on the right thread (suspend/resume & friends) These are placed on the internal command queue. """ def __init__(self): set_global_debugger(self) pydevd_tracing.replace_sys_set_trace_func() self.reader = None self.writer = None self.output_checker = None self.quitting = None self.cmd_factory = NetCommandFactory() self._cmd_queue = {} # the hash of Queues. Key is thread id, value is thread self.breakpoints = {} self.file_to_id_to_line_breakpoint = {} self.file_to_id_to_plugin_breakpoint = {} # Note: breakpoints dict should not be mutated: a copy should be created # and later it should be assigned back (to prevent concurrency issues). self.break_on_uncaught_exceptions = {} self.break_on_caught_exceptions = {} self.ready_to_run = False self._main_lock = thread.allocate_lock() self._lock_running_thread_ids = thread.allocate_lock() self._py_db_command_thread_event = threading.Event() CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event self._finish_debugging_session = False self._termination_event_set = False self.signature_factory = None self.SetTrace = pydevd_tracing.SetTrace self.break_on_exceptions_thrown_in_same_context = False self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True # Suspend debugger even if breakpoint condition raises an exception SUSPEND_ON_BREAKPOINT_EXCEPTION = True self.suspend_on_breakpoint_exception = SUSPEND_ON_BREAKPOINT_EXCEPTION # By default user can step into properties getter/setter/deleter methods self.disable_property_trace = False self.disable_property_getter_trace = False self.disable_property_setter_trace = False self.disable_property_deleter_trace = False #this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that #acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not #find that thread alive anymore, we must remove it from this list and make the java side know that the thread #was killed. self._running_thread_ids = {} self._set_breakpoints_with_id = False # This attribute holds the file-> lines which have an @IgnoreException. self.filename_to_lines_where_exceptions_are_ignored = {} #working with plugins (lazily initialized) self.plugin = None self.has_plugin_line_breaks = False self.has_plugin_exception_breaks = False self.thread_analyser = None self.asyncio_analyser = None # matplotlib support in debugger and debug console self.mpl_in_use = False self.mpl_hooks_in_debug_console = False self.mpl_modules_for_patching = {} self._filename_to_not_in_scope = {} self.first_breakpoint_reached = False self.is_filter_enabled = pydevd_utils.is_filter_enabled() self.is_filter_libraries = pydevd_utils.is_filter_libraries() def get_plugin_lazy_init(self): if self.plugin is None and SUPPORT_PLUGINS: self.plugin = PluginManager(self) return self.plugin def not_in_scope(self, filename): return pydevd_utils.not_in_project_roots(filename) def is_ignored_by_filters(self, filename): return pydevd_utils.is_ignored_by_filter(filename) def first_appearance_in_scope(self, trace): if trace is None or self.not_in_scope(trace.tb_frame.f_code.co_filename): return False else: trace = trace.tb_next while trace is not None: frame = trace.tb_frame if not self.not_in_scope(frame.f_code.co_filename): return False trace = trace.tb_next return True def has_threads_alive(self): for t in threadingEnumerate(): if getattr(t, 'is_pydev_daemon_thread', False): #Important: Jython 2.5rc4 has a bug where a thread created with thread.start_new_thread won't be #set as a daemon thread, so, we also have to check for the 'is_pydev_daemon_thread' flag. #See: https://github.com/fabioz/PyDev.Debugger/issues/11 continue if isinstance(t, PyDBDaemonThread): pydev_log.error_once( 'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n') if is_thread_alive(t): if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"): return True return False def finish_debugging_session(self): self._finish_debugging_session = True def initialize_network(self, sock): try: sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it except: pass self.writer = WriterThread(sock) self.reader = ReaderThread(sock) self.writer.start() self.reader.start() time.sleep(0.1) # give threads time to start def connect(self, host, port): if host: s = start_client(host, port) else: s = start_server(port) self.initialize_network(s) def get_internal_queue(self, thread_id): """ returns internal command queue for a given thread. if new queue is created, notify the RDB about it """ if thread_id.startswith('__frame__'): thread_id = thread_id[thread_id.rfind('|') + 1:] try: return self._cmd_queue[thread_id] except KeyError: return self._cmd_queue.setdefault(thread_id, _queue.Queue()) #@UndefinedVariable def post_internal_command(self, int_cmd, thread_id): """ if thread_id is *, post to all """ if thread_id == "*": threads = threadingEnumerate() for t in threads: thread_id = get_thread_id(t) queue = self.get_internal_queue(thread_id) queue.put(int_cmd) else: queue = self.get_internal_queue(thread_id) queue.put(int_cmd) def check_output_redirect(self): global bufferStdOutToServer global bufferStdErrToServer if bufferStdOutToServer: init_stdout_redirect() self.check_output(sys.stdoutBuf, 1) #@UndefinedVariable if bufferStdErrToServer: init_stderr_redirect() self.check_output(sys.stderrBuf, 2) #@UndefinedVariable def check_output(self, out, outCtx): '''Checks the output to see if we have to send some buffered output to the debug server @param out: sys.stdout or sys.stderr @param outCtx: the context indicating: 1=stdout and 2=stderr (to know the colors to write it) ''' try: v = out.getvalue() if v: self.cmd_factory.make_io_message(v, outCtx, self) except: traceback.print_exc() def init_matplotlib_in_debug_console(self): # import hook and patches for matplotlib support in debug console from _pydev_bundle.pydev_import_hook import import_hook_manager for module in dict_keys(self.mpl_modules_for_patching): import_hook_manager.add_module_name(module, dict_pop(self.mpl_modules_for_patching, module)) def init_matplotlib_support(self): # prepare debugger for integration with matplotlib GUI event loop from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui # enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console, # in the debug console we have no interpreter instance with exec_queue, but we run this code in the main # thread and can call it directly. class _MatplotlibHelper: _return_control_osc = False def return_control(): # Some of the input hooks (e.g. Qt4Agg) check return control without doing # a single operation, so we don't return True on every # call when the debug hook is in place to allow the GUI to run _MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc return _MatplotlibHelper._return_control_osc from pydev_ipython.inputhook import set_return_control_callback set_return_control_callback(return_control) self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui), "matplotlib.pyplot": activate_pyplot, "pylab": activate_pylab } def process_internal_commands(self): '''This function processes internal commands ''' self._main_lock.acquire() try: self.check_output_redirect() curr_thread_id = get_thread_id(threadingCurrentThread()) program_threads_alive = {} all_threads = threadingEnumerate() program_threads_dead = [] self._lock_running_thread_ids.acquire() try: for t in all_threads: if getattr(t, 'is_pydev_daemon_thread', False): pass # I.e.: skip the DummyThreads created from pydev daemon threads elif isinstance(t, PyDBDaemonThread): pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n') elif is_thread_alive(t): if not self._running_thread_ids: # Fix multiprocessing debug with breakpoints in both main and child processes # (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main # thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't # get new id with its process number and the debugger loses access to both threads. # Therefore we should update thread_id for every main thread in the new process. # TODO: Investigate: should we do this for all threads in threading.enumerate()? # (i.e.: if a fork happens on Linux, this seems likely). old_thread_id = get_thread_id(t) clear_cached_thread_id(t) clear_cached_thread_id(threadingCurrentThread()) thread_id = get_thread_id(t) curr_thread_id = get_thread_id(threadingCurrentThread()) if pydevd_vars.has_additional_frames_by_id(old_thread_id): frames_by_id = pydevd_vars.get_additional_frames_by_id(old_thread_id) pydevd_vars.add_additional_frame_by_id(thread_id, frames_by_id) else: thread_id = get_thread_id(t) program_threads_alive[thread_id] = t if not dict_contains(self._running_thread_ids, thread_id): if not hasattr(t, 'additional_info'): # see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329 # Let's create the additional info right away! t.additional_info = PyDBAdditionalThreadInfo() self._running_thread_ids[thread_id] = t self.writer.add_command(self.cmd_factory.make_thread_created_message(t)) queue = self.get_internal_queue(thread_id) cmdsToReadd = [] # some commands must be processed by the thread itself... if that's the case, # we will re-add the commands to the queue after executing. try: while True: int_cmd = queue.get(False) if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec): # add import hooks for matplotlib patches if only debug console was started try: self.init_matplotlib_in_debug_console() self.mpl_in_use = True except: pydevd_log(2, "Matplotlib support in debug console failed", traceback.format_exc()) self.mpl_hooks_in_debug_console = True if int_cmd.can_be_executed_by(curr_thread_id): pydevd_log(2, "processing internal command ", str(int_cmd)) int_cmd.do_it(self) else: pydevd_log(2, "NOT processing internal command ", str(int_cmd)) cmdsToReadd.append(int_cmd) except _queue.Empty: #@UndefinedVariable for int_cmd in cmdsToReadd: queue.put(int_cmd) # this is how we exit thread_ids = list(self._running_thread_ids.keys()) for tId in thread_ids: if not dict_contains(program_threads_alive, tId): program_threads_dead.append(tId) finally: self._lock_running_thread_ids.release() for tId in program_threads_dead: try: self._process_thread_not_alive(tId) except: sys.stderr.write('Error iterating through %s (%s) - %s\n' % ( program_threads_alive, program_threads_alive.__class__, dir(program_threads_alive))) raise if len(program_threads_alive) == 0: self.finish_debugging_session() for t in all_threads: if hasattr(t, 'do_kill_pydev_thread'): t.do_kill_pydev_thread() finally: self._main_lock.release() def set_tracing_for_untraced_contexts(self, ignore_frame=None, overwrite_prev_trace=False): # Enable the tracing for existing threads (because there may be frames being executed that # are currently untraced). threads = threadingEnumerate() try: for t in threads: if getattr(t, 'is_pydev_daemon_thread', False): continue # TODO: optimize so that we only actually add that tracing if it's in # the new breakpoint context. additional_info = None try: additional_info = t.additional_info except AttributeError: pass # that's ok, no info currently set if additional_info is not None: for frame in additional_info.iter_frames(t): if frame is not ignore_frame: self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=overwrite_prev_trace) finally: frame = None t = None threads = None additional_info = None def consolidate_breakpoints(self, file, id_to_breakpoint, breakpoints): break_dict = {} for breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint): break_dict[pybreakpoint.line] = pybreakpoint breakpoints[file] = break_dict def add_break_on_exception( self, exception, notify_always, notify_on_terminate, notify_on_first_raise_only, ignore_libraries=False ): try: eb = ExceptionBreakpoint( exception, notify_always, notify_on_terminate, notify_on_first_raise_only, ignore_libraries ) except ImportError: pydev_log.error("Error unable to add break on exception for: %s (exception could not be imported)\n" % (exception,)) return None if eb.notify_on_terminate: cp = self.break_on_uncaught_exceptions.copy() cp[exception] = eb if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0: pydev_log.error("Exceptions to hook on terminate: %s\n" % (cp,)) self.break_on_uncaught_exceptions = cp if eb.notify_always: cp = self.break_on_caught_exceptions.copy() cp[exception] = eb if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0: pydev_log.error("Exceptions to hook always: %s\n" % (cp,)) self.break_on_caught_exceptions = cp return eb def update_after_exceptions_added(self, added): updated_on_caught = False updated_on_uncaught = False for eb in added: if not updated_on_uncaught and eb.notify_on_terminate: updated_on_uncaught = True update_exception_hook(self) if not updated_on_caught and eb.notify_always: updated_on_caught = True self.set_tracing_for_untraced_contexts() def _process_thread_not_alive(self, threadId): """ if thread is not alive, cancel trace_dispatch processing """ self._lock_running_thread_ids.acquire() try: thread = self._running_thread_ids.pop(threadId, None) if thread is None: return wasNotified = thread.additional_info.pydev_notify_kill if not wasNotified: thread.additional_info.pydev_notify_kill = True finally: self._lock_running_thread_ids.release() cmd = self.cmd_factory.make_thread_killed_message(threadId) self.writer.add_command(cmd) def set_suspend(self, thread, stop_reason): thread.additional_info.suspend_type = PYTHON_SUSPEND thread.additional_info.pydev_state = STATE_SUSPEND thread.stop_reason = stop_reason # If conditional breakpoint raises any exception during evaluation send details to Java if stop_reason == CMD_SET_BREAK and self.suspend_on_breakpoint_exception: self._send_breakpoint_condition_exception(thread) def _send_breakpoint_condition_exception(self, thread): """If conditional breakpoint raises an exception during evaluation send exception details to java """ thread_id = get_thread_id(thread) conditional_breakpoint_exception_tuple = thread.additional_info.conditional_breakpoint_exception # conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace) if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2: exc_type, stacktrace = conditional_breakpoint_exception_tuple int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace) # Reset the conditional_breakpoint_exception details to None thread.additional_info.conditional_breakpoint_exception = None self.post_internal_command(int_cmd, thread_id) def send_caught_exception_stack(self, thread, arg, curr_frame_id): """Sends details on the exception which was caught (and where we stopped) to the java side. arg is: exception type, description, traceback object """ thread_id = get_thread_id(thread) int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id) self.post_internal_command(int_cmd, thread_id) def send_caught_exception_stack_proceeded(self, thread): """Sends that some thread was resumed and is no longer showing an exception trace. """ thread_id = get_thread_id(thread) int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id) self.post_internal_command(int_cmd, thread_id) self.process_internal_commands() def do_wait_suspend(self, thread, frame, event, arg): #@UnusedVariable """ busy waits until the thread state changes to RUN it expects thread's state as attributes of the thread. Upon running, processes any outstanding Stepping commands. """ self.process_internal_commands() message = thread.additional_info.pydev_message cmd = self.cmd_factory.make_thread_suspend_message(get_thread_id(thread), frame, thread.stop_reason, message) self.writer.add_command(cmd) CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable try: from_this_thread = [] for frame_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames): if custom_frame.thread_id == thread.ident: # print >> sys.stderr, 'Frame created: ', frame_id self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(frame_id, custom_frame.name)) self.writer.add_command(self.cmd_factory.make_thread_suspend_message(frame_id, custom_frame.frame, CMD_THREAD_SUSPEND, "")) from_this_thread.append(frame_id) finally: CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable imported = False info = thread.additional_info if info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session: # before every stop check if matplotlib modules were imported inside script code if len(self.mpl_modules_for_patching) > 0: for module in dict_keys(self.mpl_modules_for_patching): if module in sys.modules: activate_function = dict_pop(self.mpl_modules_for_patching, module) activate_function() self.mpl_in_use = True while info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session: if self.mpl_in_use: # call input hooks if only matplotlib is in use try: if not imported: from pydev_ipython.inputhook import get_inputhook imported = True inputhook = get_inputhook() if inputhook: inputhook() except: pass self.process_internal_commands() time.sleep(0.01) # process any stepping instructions if info.pydev_step_cmd == CMD_STEP_INTO or info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE: info.pydev_step_stop = None info.pydev_smart_step_stop = None elif info.pydev_step_cmd == CMD_STEP_OVER: info.pydev_step_stop = frame info.pydev_smart_step_stop = None self.set_trace_for_frame_and_parents(frame) elif info.pydev_step_cmd == CMD_SMART_STEP_INTO: self.set_trace_for_frame_and_parents(frame) info.pydev_step_stop = None info.pydev_smart_step_stop = frame elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT : self.set_trace_for_frame_and_parents(frame) if event == 'line' or event == 'exception': #If we're already in the correct context, we have to stop it now, because we can act only on #line events -- if a return was the next statement it wouldn't work (so, we have this code #repeated at pydevd_frame). stop = False curr_func_name = frame.f_code.co_name #global context is set with an empty name if curr_func_name in ('?', '<module>'): curr_func_name = '' if curr_func_name == info.pydev_func_name: line = info.pydev_next_line if frame.f_lineno == line: stop = True else : if frame.f_trace is None: frame.f_trace = self.trace_dispatch frame.f_lineno = line frame.f_trace = None stop = True if stop: info.pydev_state = STATE_SUSPEND self.do_wait_suspend(thread, frame, event, arg) return elif info.pydev_step_cmd == CMD_STEP_RETURN: back_frame = frame.f_back if back_frame is not None: # steps back to the same frame (in a return call it will stop in the 'back frame' for the user) info.pydev_step_stop = frame self.set_trace_for_frame_and_parents(frame) else: # No back frame?!? -- this happens in jython when we have some frame created from an awt event # (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java') # so, if we're doing a step return in this situation, it's the same as just making it run info.pydev_step_stop = None info.pydev_step_cmd = -1 info.pydev_state = STATE_RUN del frame cmd = self.cmd_factory.make_thread_run_message(get_thread_id(thread), info.pydev_step_cmd) self.writer.add_command(cmd) CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable try: # The ones that remained on last_running must now be removed. for frame_id in from_this_thread: # print >> sys.stderr, 'Removing created frame: ', frame_id self.writer.add_command(self.cmd_factory.make_thread_killed_message(frame_id)) finally: CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable def handle_post_mortem_stop(self, thread, frame, frames_byid, exception): pydev_log.debug("We are stopping in post-mortem\n") thread_id = get_thread_id(thread) pydevd_vars.add_additional_frame_by_id(thread_id, frames_byid) try: try: add_exception_to_frame(frame, exception) self.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK) self.do_wait_suspend(thread, frame, 'exception', None) except: pydev_log.error("We've got an error while stopping in post-mortem: %s\n"%sys.exc_info()[0]) finally: pydevd_vars.remove_additional_frame_by_id(thread_id) def set_trace_for_frame_and_parents(self, frame, also_add_to_passed_frame=True, overwrite_prev_trace=False, dispatch_func=None): if dispatch_func is None: dispatch_func = self.trace_dispatch if also_add_to_passed_frame: self.update_trace(frame, dispatch_func, overwrite_prev_trace) frame = frame.f_back while frame: self.update_trace(frame, dispatch_func, overwrite_prev_trace) frame = frame.f_back del frame def update_trace(self, frame, dispatch_func, overwrite_prev): if frame.f_trace is None: frame.f_trace = dispatch_func else: if overwrite_prev: frame.f_trace = dispatch_func else: try: #If it's the trace_exception, go back to the frame trace dispatch! if frame.f_trace.im_func.__name__ == 'trace_exception': frame.f_trace = frame.f_trace.im_self.trace_dispatch except AttributeError: pass frame = frame.f_back del frame def prepare_to_run(self): ''' Shared code to prepare debugging by installing traces and registering threads ''' self.patch_threads() pydevd_tracing.SetTrace(self.trace_dispatch) PyDBCommandThread(self).start() if self.signature_factory is not None or self.thread_analyser is not None: # we need all data to be sent to IDE even after program finishes CheckOutputThread(self).start() def patch_threads(self): try: # not available in jython! threading.settrace(self.trace_dispatch) # for all future threads except: pass from _pydev_bundle.pydev_monkey import patch_thread_modules patch_thread_modules() def get_fullname(self, mod_name): if IS_PY3K: import pkgutil else: from _pydev_imps import _pydev_pkgutil_old as pkgutil try: loader = pkgutil.get_loader(mod_name) except: return None if loader is not None: for attr in ("get_filename", "_get_filename"): meth = getattr(loader, attr, None) if meth is not None: return meth(mod_name) return None def run(self, file, globals=None, locals=None, module=False, set_trace=True): if module: filename = self.get_fullname(file) if filename is None: sys.stderr.write("No module named %s\n" % file) return else: file = filename if os.path.isdir(file): new_target = os.path.join(file, '__main__.py') if os.path.isfile(new_target): file = new_target if globals is None: m = save_main_module(file, 'pydevd') globals = m.__dict__ try: globals['__builtins__'] = __builtins__ except NameError: pass # Not there on Jython... if locals is None: locals = globals if set_trace: # Predefined (writable) attributes: __name__ is the module's name; # __doc__ is the module's documentation string, or None if unavailable; # __file__ is the pathname of the file from which the module was loaded, # if it was loaded from a file. The __file__ attribute is not present for # C modules that are statically linked into the interpreter; for extension modules # loaded dynamically from a shared library, it is the pathname of the shared library file. # I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in # debug and run. if m.__file__.startswith(sys.path[0]): # print >> sys.stderr, 'Deleting: ', sys.path[0] del sys.path[0] # now, the local directory has to be added to the pythonpath # sys.path.insert(0, os.getcwd()) # Changed: it's not the local directory, but the directory of the file launched # The file being run ust be in the pythonpath (even if it was not before) sys.path.insert(0, os.path.split(file)[0]) self.prepare_to_run() while not self.ready_to_run: time.sleep(0.1) # busy wait until we receive run command if self.thread_analyser is not None: wrap_threads() t = threadingCurrentThread() self.thread_analyser.set_start_time(cur_time()) send_message("threading_event", 0, t.getName(), get_thread_id(t), "thread", "start", file, 1, None, parent=get_thread_id(t)) if self.asyncio_analyser is not None: # we don't have main thread in asyncio graph, so we should add a fake event send_message("asyncio_event", 0, "Task", "Task", "thread", "stop", file, 1, frame=None, parent=None) try: self.init_matplotlib_support() except: sys.stderr.write("Matplotlib support in debugger failed\n") traceback.print_exc() pydev_imports.execfile(file, globals, locals) # execute the script def exiting(self): sys.stdout.flush() sys.stderr.flush() self.check_output_redirect() cmd = self.cmd_factory.make_exit_message() self.writer.add_command(cmd) def wait_for_commands(self, globals): thread = threading.currentThread() from _pydevd_bundle import pydevd_frame_utils frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console", os.path.abspath(os.path.dirname(__file__))), globals, globals) thread_id = get_thread_id(thread) from _pydevd_bundle import pydevd_vars pydevd_vars.add_additional_frame_by_id(thread_id, {id(frame): frame}) cmd = self.cmd_factory.make_show_console_message(thread_id, frame) self.writer.add_command(cmd) while True: self.process_internal_commands() time.sleep(0.01) trace_dispatch = _trace_dispatch def set_debug(setup): setup['DEBUG_RECORD_SOCKET_READS'] = True setup['DEBUG_TRACE_BREAKPOINTS'] = 1 setup['DEBUG_TRACE_LEVEL'] = 3 def enable_qt_support(): from _pydev_bundle import pydev_monkey_qt pydev_monkey_qt.patch_qt() def process_command_line(argv): """ parses the arguments. removes our arguments from the command line """ setup = {} setup['client'] = '' setup['server'] = False setup['port'] = 0 setup['file'] = '' setup['multiproc'] = False #Used by PyCharm (reuses connection: ssh tunneling) setup['multiprocess'] = False # Used by PyDev (creates new connection to ide) setup['save-signatures'] = False setup['save-threading'] = False setup['save-asyncio'] = False setup['qt-support'] = False setup['print-in-debugger-startup'] = False setup['cmd-line'] = False setup['module'] = False i = 0 del argv[0] while (i < len(argv)): if argv[i] == '--port': del argv[i] setup['port'] = int(argv[i]) del argv[i] elif argv[i] == '--vm_type': del argv[i] setup['vm_type'] = argv[i] del argv[i] elif argv[i] == '--client': del argv[i] setup['client'] = argv[i] del argv[i] elif argv[i] == '--server': del argv[i] setup['server'] = True elif argv[i] == '--file': del argv[i] setup['file'] = argv[i] i = len(argv) # pop out, file is our last argument elif argv[i] == '--DEBUG_RECORD_SOCKET_READS': del argv[i] setup['DEBUG_RECORD_SOCKET_READS'] = True elif argv[i] == '--DEBUG': del argv[i] set_debug(setup) elif argv[i] == '--multiproc': del argv[i] setup['multiproc'] = True elif argv[i] == '--multiprocess': del argv[i] setup['multiprocess'] = True elif argv[i] == '--save-signatures': del argv[i] setup['save-signatures'] = True elif argv[i] == '--save-threading': del argv[i] setup['save-threading'] = True elif argv[i] == '--save-asyncio': del argv[i] setup['save-asyncio'] = True elif argv[i] == '--qt-support': del argv[i] setup['qt-support'] = True elif argv[i] == '--print-in-debugger-startup': del argv[i] setup['print-in-debugger-startup'] = True elif (argv[i] == '--cmd-line'): del argv[i] setup['cmd-line'] = True elif (argv[i] == '--module'): del argv[i] setup['module'] = True else: raise ValueError("unexpected option " + argv[i]) return setup def usage(doExit=0): sys.stdout.write('Usage:\n') sys.stdout.write('pydevd.py --port=N [(--client hostname) | --server] --file executable [file_options]\n') if doExit: sys.exit(0) def init_stdout_redirect(): if not getattr(sys, 'stdoutBuf', None): sys.stdoutBuf = pydevd_io.IOBuf() sys.stdout_original = sys.stdout sys.stdout = pydevd_io.IORedirector(sys.stdout, sys.stdoutBuf) #@UndefinedVariable def init_stderr_redirect(): if not getattr(sys, 'stderrBuf', None): sys.stderrBuf = pydevd_io.IOBuf() sys.stderr_original = sys.stderr sys.stderr = pydevd_io.IORedirector(sys.stderr, sys.stderrBuf) #@UndefinedVariable def has_data_to_redirect(): if getattr(sys, 'stdoutBuf', None): if not sys.stdoutBuf.empty(): return True if getattr(sys, 'stderrBuf', None): if not sys.stderrBuf.empty(): return True return False #======================================================================================================================= # settrace #======================================================================================================================= def settrace( host=None, stdoutToServer=False, stderrToServer=False, port=5678, suspend=True, trace_only_current_thread=False, overwrite_prev_trace=False, patch_multiprocessing=False, ): '''Sets the tracing function with the pydev debug function and initializes needed facilities. @param host: the user may specify another host, if the debug server is not in the same machine (default is the local host) @param stdoutToServer: when this is true, the stdout is passed to the debug server @param stderrToServer: when this is true, the stderr is passed to the debug server so that they are printed in its console and not in this process console. @param port: specifies which port to use for communicating with the server (note that the server must be started in the same port). @note: currently it's hard-coded at 5678 in the client @param suspend: whether a breakpoint should be emulated as soon as this function is called. @param trace_only_current_thread: determines if only the current thread will be traced or all current and future threads will also have the tracing enabled. @param overwrite_prev_trace: if True we'll reset the frame.f_trace of frames which are already being traced @param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched processes are debugged. ''' _set_trace_lock.acquire() try: _locked_settrace( host, stdoutToServer, stderrToServer, port, suspend, trace_only_current_thread, overwrite_prev_trace, patch_multiprocessing, ) finally: _set_trace_lock.release() _set_trace_lock = thread.allocate_lock() def _locked_settrace( host, stdoutToServer, stderrToServer, port, suspend, trace_only_current_thread, overwrite_prev_trace, patch_multiprocessing, ): if patch_multiprocessing: try: from _pydev_bundle import pydev_monkey except: pass else: pydev_monkey.patch_new_process_functions() if host is None: from _pydev_bundle import pydev_localhost host = pydev_localhost.get_localhost() global connected global bufferStdOutToServer global bufferStdErrToServer if not connected : pydevd_vm_type.setup_type() debugger = PyDB() debugger.connect(host, port) # Note: connect can raise error. # Mark connected only if it actually succeeded. connected = True bufferStdOutToServer = stdoutToServer bufferStdErrToServer = stderrToServer if bufferStdOutToServer: init_stdout_redirect() if bufferStdErrToServer: init_stderr_redirect() debugger.set_trace_for_frame_and_parents(get_frame(), False, overwrite_prev_trace=overwrite_prev_trace) CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable try: for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames): debugger.set_trace_for_frame_and_parents(custom_frame.frame, False) finally: CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable t = threadingCurrentThread() try: additional_info = t.additional_info except AttributeError: additional_info = PyDBAdditionalThreadInfo() t.additional_info = additional_info while not debugger.ready_to_run: time.sleep(0.1) # busy wait until we receive run command # note that we do that through pydevd_tracing.SetTrace so that the tracing # is not warned to the user! pydevd_tracing.SetTrace(debugger.trace_dispatch) if not trace_only_current_thread: # Trace future threads? debugger.patch_threads() # As this is the first connection, also set tracing for any untraced threads debugger.set_tracing_for_untraced_contexts(ignore_frame=get_frame(), overwrite_prev_trace=overwrite_prev_trace) # Stop the tracing as the last thing before the actual shutdown for a clean exit. atexit.register(stoptrace) PyDBCommandThread(debugger).start() CheckOutputThread(debugger).start() #Suspend as the last thing after all tracing is in place. if suspend: debugger.set_suspend(t, CMD_THREAD_SUSPEND) else: # ok, we're already in debug mode, with all set, so, let's just set the break debugger = get_global_debugger() debugger.set_trace_for_frame_and_parents(get_frame(), False) t = threadingCurrentThread() try: additional_info = t.additional_info except AttributeError: additional_info = PyDBAdditionalThreadInfo() t.additional_info = additional_info pydevd_tracing.SetTrace(debugger.trace_dispatch) if not trace_only_current_thread: # Trace future threads? debugger.patch_threads() if suspend: debugger.set_suspend(t, CMD_THREAD_SUSPEND) def stoptrace(): global connected if connected: pydevd_tracing.restore_sys_set_trace_func() sys.settrace(None) try: #not available in jython! threading.settrace(None) # for all future threads except: pass from _pydev_bundle.pydev_monkey import undo_patch_thread_modules undo_patch_thread_modules() debugger = get_global_debugger() if debugger: debugger.set_trace_for_frame_and_parents( get_frame(), also_add_to_passed_frame=True, overwrite_prev_trace=True, dispatch_func=lambda *args:None) debugger.exiting() kill_all_pydev_threads() connected = False class Dispatcher(object): def __init__(self): self.port = None def connect(self, host, port): self.host = host self.port = port self.client = start_client(self.host, self.port) self.reader = DispatchReader(self) self.reader.dontTraceMe = False #we run reader in the same thread so we don't want to loose tracing self.reader.run() def close(self): try: self.reader.do_kill_pydev_thread() except : pass class DispatchReader(ReaderThread): def __init__(self, dispatcher): self.dispatcher = dispatcher ReaderThread.__init__(self, self.dispatcher.client) def _on_run(self): dummy_thread = threading.currentThread() dummy_thread.is_pydev_daemon_thread = False return ReaderThread._on_run(self) def handle_except(self): ReaderThread.handle_except(self) def process_command(self, cmd_id, seq, text): if cmd_id == 99: self.dispatcher.port = int(text) self.killReceived = True DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION def dispatch(): setup = SetupHolder.setup host = setup['client'] port = setup['port'] if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION: dispatcher = Dispatcher() try: dispatcher.connect(host, port) port = dispatcher.port finally: dispatcher.close() return host, port def settrace_forked(): ''' When creating a fork from a process in the debugger, we need to reset the whole debugger environment! ''' host, port = dispatch() from _pydevd_bundle import pydevd_tracing pydevd_tracing.restore_sys_set_trace_func() if port is not None: global connected connected = False custom_frames_container_init() settrace( host, port=port, suspend=False, trace_only_current_thread=False, overwrite_prev_trace=True, patch_multiprocessing=True, ) #======================================================================================================================= # SetupHolder #======================================================================================================================= class SetupHolder: setup = None #======================================================================================================================= # main #======================================================================================================================= if __name__ == '__main__': # parse the command line. --file is our last argument that is required try: sys.original_argv = sys.argv[:] setup = process_command_line(sys.argv) SetupHolder.setup = setup except ValueError: traceback.print_exc() usage(1) if setup['print-in-debugger-startup']: try: pid = ' (pid: %s)' % os.getpid() except: pid = '' sys.stderr.write("pydev debugger: starting%s\n" % pid) fix_getpass.fix_getpass() pydev_log.debug("Executing file %s" % setup['file']) pydev_log.debug("arguments: %s"% str(sys.argv)) pydevd_vm_type.setup_type(setup.get('vm_type', None)) if os.getenv('PYCHARM_DEBUG') or os.getenv('PYDEV_DEBUG'): set_debug(setup) DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', DebugInfoHolder.DEBUG_RECORD_SOCKET_READS) DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS) DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', DebugInfoHolder.DEBUG_TRACE_LEVEL) port = setup['port'] host = setup['client'] f = setup['file'] fix_app_engine_debug = False debugger = PyDB() try: from _pydev_bundle import pydev_monkey except: pass #Not usable on jython 2.1 else: if setup['multiprocess']: # PyDev pydev_monkey.patch_new_process_functions() elif setup['multiproc']: # PyCharm pydev_log.debug("Started in multiproc mode\n") # Note: we're not inside method, so, no need for 'global' DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION dispatcher = Dispatcher() try: dispatcher.connect(host, port) if dispatcher.port is not None: port = dispatcher.port pydev_log.debug("Received port %d\n" %port) pydev_log.info("pydev debugger: process %d is connecting\n"% os.getpid()) try: pydev_monkey.patch_new_process_functions() except: pydev_log.error("Error patching process functions\n") traceback.print_exc() else: pydev_log.error("pydev debugger: couldn't get port for new debug process\n") finally: dispatcher.close() else: pydev_log.info("pydev debugger: starting\n") try: pydev_monkey.patch_new_process_functions_with_warning() except: pydev_log.error("Error patching process functions\n") traceback.print_exc() # Only do this patching if we're not running with multiprocess turned on. if f.find('dev_appserver.py') != -1: if os.path.basename(f).startswith('dev_appserver.py'): appserver_dir = os.path.dirname(f) version_file = os.path.join(appserver_dir, 'VERSION') if os.path.exists(version_file): try: stream = open(version_file, 'r') try: for line in stream.read().splitlines(): line = line.strip() if line.startswith('release:'): line = line[8:].strip() version = line.replace('"', '') version = version.split('.') if int(version[0]) > 1: fix_app_engine_debug = True elif int(version[0]) == 1: if int(version[1]) >= 7: # Only fix from 1.7 onwards fix_app_engine_debug = True break finally: stream.close() except: traceback.print_exc() try: # In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible # on a run where we have a remote debug, we may have to be more careful because patching stackless means # that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need # to call it again (because stackless provides no way of getting the last function which was registered # in set_schedule_callback). # # So, ideally, if there's an application using stackless and the application wants to use the remote debugger # and benefit from stackless debugging, the application itself must call: # # import pydevd_stackless # pydevd_stackless.patch_stackless() # # itself to be able to benefit from seeing the tasklets created before the remote debugger is attached. from _pydevd_bundle import pydevd_stackless pydevd_stackless.patch_stackless() except: pass # It's ok not having stackless there... is_module = setup['module'] if fix_app_engine_debug: sys.stderr.write("pydev debugger: google app engine integration enabled\n") curr_dir = os.path.dirname(__file__) app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py') sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file) import json setup['pydevd'] = __file__ sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),) sys.argv.insert(3, '--automatic_restart=no') sys.argv.insert(4, '--max_module_instances=1') # Run the dev_appserver debugger.run(setup['file'], None, None, is_module, set_trace=False) else: if setup['save-signatures']: if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON: sys.stderr.write("Collecting run-time type information is not supported for Jython\n") else: # Only import it if we're going to use it! from _pydevd_bundle.pydevd_signature import SignatureFactory debugger.signature_factory = SignatureFactory() if setup['qt-support']: enable_qt_support() if setup['save-threading']: debugger.thread_analyser = ThreadingLogger() if setup['save-asyncio']: if IS_PY34_OLDER: debugger.asyncio_analyser = AsyncioLogger() try: debugger.connect(host, port) except: sys.stderr.write("Could not connect to %s: %s\n" % (host, port)) traceback.print_exc() sys.exit(1) connected = True # Mark that we're connected when started from inside ide. globals = debugger.run(setup['file'], None, None, is_module) if setup['cmd-line']: debugger.wait_for_commands(globals)
epl-1.0
moutai/scikit-learn
examples/gaussian_process/plot_gpr_prior_posterior.py
104
2878
""" ========================================================================== Illustration of prior and posterior Gaussian process for different kernels ========================================================================== This example illustrates the prior and posterior of a GPR with different kernels. Mean, standard deviation, and 10 samples are shown for both prior and posterior. """ print(__doc__) # Authors: Jan Hendrik Metzen <[email protected]> # # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel) kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)), 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1), 1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0, length_scale_bounds=(0.1, 10.0), periodicity_bounds=(1.0, 10.0)), ConstantKernel(0.1, (0.01, 10.0)) * (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2), 1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0), nu=1.5)] for fig_index, kernel in enumerate(kernels): # Specify Gaussian Process gp = GaussianProcessRegressor(kernel=kernel) # Plot prior plt.figure(fig_index, figsize=(8, 8)) plt.subplot(2, 1, 1) X_ = np.linspace(0, 5, 100) y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True) plt.plot(X_, y_mean, 'k', lw=3, zorder=9) plt.fill_between(X_, y_mean - y_std, y_mean + y_std, alpha=0.5, color='k') y_samples = gp.sample_y(X_[:, np.newaxis], 10) plt.plot(X_, y_samples, lw=1) plt.xlim(0, 5) plt.ylim(-3, 3) plt.title("Prior (kernel: %s)" % kernel, fontsize=12) # Generate data and fit GP rng = np.random.RandomState(4) X = rng.uniform(0, 5, 10)[:, np.newaxis] y = np.sin((X[:, 0] - 2.5) ** 2) gp.fit(X, y) # Plot posterior plt.subplot(2, 1, 2) X_ = np.linspace(0, 5, 100) y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True) plt.plot(X_, y_mean, 'k', lw=3, zorder=9) plt.fill_between(X_, y_mean - y_std, y_mean + y_std, alpha=0.5, color='k') y_samples = gp.sample_y(X_[:, np.newaxis], 10) plt.plot(X_, y_samples, lw=1) plt.scatter(X[:, 0], y, c='r', s=50, zorder=10) plt.xlim(0, 5) plt.ylim(-3, 3) plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f" % (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)), fontsize=12) plt.tight_layout() plt.show()
bsd-3-clause
jkthompson/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/_mathtext_data.py
69
57988
""" font data tables for truetype and afm computer modern fonts """ # this dict maps symbol names to fontnames, glyphindex. To get the # glyph index from the character code, you have to use get_charmap """ from matplotlib.ft2font import FT2Font font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf') items = font.get_charmap().items() items.sort() for charcode, glyphind in items: print charcode, glyphind """ latex_to_bakoma = { r'\oint' : ('cmex10', 45), r'\bigodot' : ('cmex10', 50), r'\bigoplus' : ('cmex10', 55), r'\bigotimes' : ('cmex10', 59), r'\sum' : ('cmex10', 51), r'\prod' : ('cmex10', 24), r'\int' : ('cmex10', 56), r'\bigcup' : ('cmex10', 28), r'\bigcap' : ('cmex10', 60), r'\biguplus' : ('cmex10', 32), r'\bigwedge' : ('cmex10', 4), r'\bigvee' : ('cmex10', 37), r'\coprod' : ('cmex10', 42), r'\__sqrt__' : ('cmex10', 48), r'\leftbrace' : ('cmex10', 92), r'{' : ('cmex10', 92), r'\{' : ('cmex10', 92), r'\rightbrace' : ('cmex10', 130), r'}' : ('cmex10', 130), r'\}' : ('cmex10', 130), r'\leftangle' : ('cmex10', 97), r'\rightangle' : ('cmex10', 64), r'\langle' : ('cmex10', 97), r'\rangle' : ('cmex10', 64), r'\widehat' : ('cmex10', 15), r'\widetilde' : ('cmex10', 52), r'\omega' : ('cmmi10', 29), r'\varepsilon' : ('cmmi10', 20), r'\vartheta' : ('cmmi10', 22), r'\varrho' : ('cmmi10', 61), r'\varsigma' : ('cmmi10', 41), r'\varphi' : ('cmmi10', 6), r'\leftharpoonup' : ('cmmi10', 108), r'\leftharpoondown' : ('cmmi10', 68), r'\rightharpoonup' : ('cmmi10', 117), r'\rightharpoondown' : ('cmmi10', 77), r'\triangleright' : ('cmmi10', 130), r'\triangleleft' : ('cmmi10', 89), r'.' : ('cmmi10', 51), r',' : ('cmmi10', 44), r'<' : ('cmmi10', 99), r'/' : ('cmmi10', 98), r'>' : ('cmmi10', 107), r'\flat' : ('cmmi10', 131), r'\natural' : ('cmmi10', 90), r'\sharp' : ('cmmi10', 50), r'\smile' : ('cmmi10', 97), r'\frown' : ('cmmi10', 58), r'\ell' : ('cmmi10', 102), r'\imath' : ('cmmi10', 8), r'\jmath' : ('cmmi10', 65), r'\wp' : ('cmmi10', 14), r'\alpha' : ('cmmi10', 13), r'\beta' : ('cmmi10', 35), r'\gamma' : ('cmmi10', 24), r'\delta' : ('cmmi10', 38), r'\epsilon' : ('cmmi10', 54), r'\zeta' : ('cmmi10', 10), r'\eta' : ('cmmi10', 5), r'\theta' : ('cmmi10', 18), r'\iota' : ('cmmi10', 28), r'\lambda' : ('cmmi10', 9), r'\mu' : ('cmmi10', 32), r'\nu' : ('cmmi10', 34), r'\xi' : ('cmmi10', 7), r'\pi' : ('cmmi10', 36), r'\kappa' : ('cmmi10', 30), r'\rho' : ('cmmi10', 39), r'\sigma' : ('cmmi10', 21), r'\tau' : ('cmmi10', 43), r'\upsilon' : ('cmmi10', 25), r'\phi' : ('cmmi10', 42), r'\chi' : ('cmmi10', 17), r'\psi' : ('cmmi10', 31), r'|' : ('cmsy10', 47), r'\|' : ('cmsy10', 47), r'(' : ('cmr10', 119), r'\leftparen' : ('cmr10', 119), r'\rightparen' : ('cmr10', 68), r')' : ('cmr10', 68), r'+' : ('cmr10', 76), r'0' : ('cmr10', 40), r'1' : ('cmr10', 100), r'2' : ('cmr10', 49), r'3' : ('cmr10', 110), r'4' : ('cmr10', 59), r'5' : ('cmr10', 120), r'6' : ('cmr10', 69), r'7' : ('cmr10', 127), r'8' : ('cmr10', 77), r'9' : ('cmr10', 22), r':' : ('cmr10', 85), r';' : ('cmr10', 31), r'=' : ('cmr10', 41), r'\leftbracket' : ('cmr10', 62), r'[' : ('cmr10', 62), r'\rightbracket' : ('cmr10', 72), r']' : ('cmr10', 72), r'\%' : ('cmr10', 48), r'%' : ('cmr10', 48), r'\$' : ('cmr10', 99), r'@' : ('cmr10', 111), r'\_' : ('cmtt10', 79), r'\Gamma' : ('cmr10', 19), r'\Delta' : ('cmr10', 6), r'\Theta' : ('cmr10', 7), r'\Lambda' : ('cmr10', 14), r'\Xi' : ('cmr10', 3), r'\Pi' : ('cmr10', 17), r'\Sigma' : ('cmr10', 10), r'\Upsilon' : ('cmr10', 11), r'\Phi' : ('cmr10', 9), r'\Psi' : ('cmr10', 15), r'\Omega' : ('cmr10', 12), # these are mathml names, I think. I'm just using them for the # tex methods noted r'\circumflexaccent' : ('cmr10', 124), # for \hat r'\combiningbreve' : ('cmr10', 81), # for \breve r'\combiningoverline' : ('cmr10', 131), # for \bar r'\combininggraveaccent' : ('cmr10', 114), # for \grave r'\combiningacuteaccent' : ('cmr10', 63), # for \accute r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot r'\combiningtilde' : ('cmr10', 75), # for \tilde r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec r'\combiningdotabove' : ('cmr10', 26), # for \dot r'\leftarrow' : ('cmsy10', 10), r'\uparrow' : ('cmsy10', 25), r'\downarrow' : ('cmsy10', 28), r'\leftrightarrow' : ('cmsy10', 24), r'\nearrow' : ('cmsy10', 99), r'\searrow' : ('cmsy10', 57), r'\simeq' : ('cmsy10', 108), r'\Leftarrow' : ('cmsy10', 104), r'\Rightarrow' : ('cmsy10', 112), r'\Uparrow' : ('cmsy10', 60), r'\Downarrow' : ('cmsy10', 68), r'\Leftrightarrow' : ('cmsy10', 51), r'\nwarrow' : ('cmsy10', 65), r'\swarrow' : ('cmsy10', 116), r'\propto' : ('cmsy10', 15), r'\prime' : ('cmsy10', 73), r"'" : ('cmsy10', 73), r'\infty' : ('cmsy10', 32), r'\in' : ('cmsy10', 59), r'\ni' : ('cmsy10', 122), r'\bigtriangleup' : ('cmsy10', 80), r'\bigtriangledown' : ('cmsy10', 132), r'\slash' : ('cmsy10', 87), r'\forall' : ('cmsy10', 21), r'\exists' : ('cmsy10', 5), r'\neg' : ('cmsy10', 20), r'\emptyset' : ('cmsy10', 33), r'\Re' : ('cmsy10', 95), r'\Im' : ('cmsy10', 52), r'\top' : ('cmsy10', 100), r'\bot' : ('cmsy10', 11), r'\aleph' : ('cmsy10', 26), r'\cup' : ('cmsy10', 6), r'\cap' : ('cmsy10', 19), r'\uplus' : ('cmsy10', 58), r'\wedge' : ('cmsy10', 43), r'\vee' : ('cmsy10', 96), r'\vdash' : ('cmsy10', 109), r'\dashv' : ('cmsy10', 66), r'\lfloor' : ('cmsy10', 117), r'\rfloor' : ('cmsy10', 74), r'\lceil' : ('cmsy10', 123), r'\rceil' : ('cmsy10', 81), r'\lbrace' : ('cmsy10', 92), r'\rbrace' : ('cmsy10', 105), r'\mid' : ('cmsy10', 47), r'\vert' : ('cmsy10', 47), r'\Vert' : ('cmsy10', 44), r'\updownarrow' : ('cmsy10', 94), r'\Updownarrow' : ('cmsy10', 53), r'\backslash' : ('cmsy10', 126), r'\wr' : ('cmsy10', 101), r'\nabla' : ('cmsy10', 110), r'\sqcup' : ('cmsy10', 67), r'\sqcap' : ('cmsy10', 118), r'\sqsubseteq' : ('cmsy10', 75), r'\sqsupseteq' : ('cmsy10', 124), r'\S' : ('cmsy10', 129), r'\dag' : ('cmsy10', 71), r'\ddag' : ('cmsy10', 127), r'\P' : ('cmsy10', 130), r'\clubsuit' : ('cmsy10', 18), r'\diamondsuit' : ('cmsy10', 34), r'\heartsuit' : ('cmsy10', 22), r'-' : ('cmsy10', 17), r'\cdot' : ('cmsy10', 78), r'\times' : ('cmsy10', 13), r'*' : ('cmsy10', 9), r'\ast' : ('cmsy10', 9), r'\div' : ('cmsy10', 31), r'\diamond' : ('cmsy10', 48), r'\pm' : ('cmsy10', 8), r'\mp' : ('cmsy10', 98), r'\oplus' : ('cmsy10', 16), r'\ominus' : ('cmsy10', 56), r'\otimes' : ('cmsy10', 30), r'\oslash' : ('cmsy10', 107), r'\odot' : ('cmsy10', 64), r'\bigcirc' : ('cmsy10', 115), r'\circ' : ('cmsy10', 72), r'\bullet' : ('cmsy10', 84), r'\asymp' : ('cmsy10', 121), r'\equiv' : ('cmsy10', 35), r'\subseteq' : ('cmsy10', 103), r'\supseteq' : ('cmsy10', 42), r'\leq' : ('cmsy10', 14), r'\geq' : ('cmsy10', 29), r'\preceq' : ('cmsy10', 79), r'\succeq' : ('cmsy10', 131), r'\sim' : ('cmsy10', 27), r'\approx' : ('cmsy10', 23), r'\subset' : ('cmsy10', 50), r'\supset' : ('cmsy10', 86), r'\ll' : ('cmsy10', 85), r'\gg' : ('cmsy10', 40), r'\prec' : ('cmsy10', 93), r'\succ' : ('cmsy10', 49), r'\rightarrow' : ('cmsy10', 12), r'\to' : ('cmsy10', 12), r'\spadesuit' : ('cmsy10', 7), } latex_to_cmex = { r'\__sqrt__' : 112, r'\bigcap' : 92, r'\bigcup' : 91, r'\bigodot' : 75, r'\bigoplus' : 77, r'\bigotimes' : 79, r'\biguplus' : 93, r'\bigvee' : 95, r'\bigwedge' : 94, r'\coprod' : 97, r'\int' : 90, r'\leftangle' : 173, r'\leftbrace' : 169, r'\oint' : 73, r'\prod' : 89, r'\rightangle' : 174, r'\rightbrace' : 170, r'\sum' : 88, r'\widehat' : 98, r'\widetilde' : 101, } latex_to_standard = { r'\cong' : ('psyr', 64), r'\Delta' : ('psyr', 68), r'\Phi' : ('psyr', 70), r'\Gamma' : ('psyr', 89), r'\alpha' : ('psyr', 97), r'\beta' : ('psyr', 98), r'\chi' : ('psyr', 99), r'\delta' : ('psyr', 100), r'\varepsilon' : ('psyr', 101), r'\phi' : ('psyr', 102), r'\gamma' : ('psyr', 103), r'\eta' : ('psyr', 104), r'\iota' : ('psyr', 105), r'\varpsi' : ('psyr', 106), r'\kappa' : ('psyr', 108), r'\nu' : ('psyr', 110), r'\pi' : ('psyr', 112), r'\theta' : ('psyr', 113), r'\rho' : ('psyr', 114), r'\sigma' : ('psyr', 115), r'\tau' : ('psyr', 116), r'\upsilon' : ('psyr', 117), r'\varpi' : ('psyr', 118), r'\omega' : ('psyr', 119), r'\xi' : ('psyr', 120), r'\psi' : ('psyr', 121), r'\zeta' : ('psyr', 122), r'\sim' : ('psyr', 126), r'\leq' : ('psyr', 163), r'\infty' : ('psyr', 165), r'\clubsuit' : ('psyr', 167), r'\diamondsuit' : ('psyr', 168), r'\heartsuit' : ('psyr', 169), r'\spadesuit' : ('psyr', 170), r'\leftrightarrow' : ('psyr', 171), r'\leftarrow' : ('psyr', 172), r'\uparrow' : ('psyr', 173), r'\rightarrow' : ('psyr', 174), r'\downarrow' : ('psyr', 175), r'\pm' : ('psyr', 176), r'\geq' : ('psyr', 179), r'\times' : ('psyr', 180), r'\propto' : ('psyr', 181), r'\partial' : ('psyr', 182), r'\bullet' : ('psyr', 183), r'\div' : ('psyr', 184), r'\neq' : ('psyr', 185), r'\equiv' : ('psyr', 186), r'\approx' : ('psyr', 187), r'\ldots' : ('psyr', 188), r'\aleph' : ('psyr', 192), r'\Im' : ('psyr', 193), r'\Re' : ('psyr', 194), r'\wp' : ('psyr', 195), r'\otimes' : ('psyr', 196), r'\oplus' : ('psyr', 197), r'\oslash' : ('psyr', 198), r'\cap' : ('psyr', 199), r'\cup' : ('psyr', 200), r'\supset' : ('psyr', 201), r'\supseteq' : ('psyr', 202), r'\subset' : ('psyr', 204), r'\subseteq' : ('psyr', 205), r'\in' : ('psyr', 206), r'\notin' : ('psyr', 207), r'\angle' : ('psyr', 208), r'\nabla' : ('psyr', 209), r'\textregistered' : ('psyr', 210), r'\copyright' : ('psyr', 211), r'\texttrademark' : ('psyr', 212), r'\Pi' : ('psyr', 213), r'\prod' : ('psyr', 213), r'\surd' : ('psyr', 214), r'\__sqrt__' : ('psyr', 214), r'\cdot' : ('psyr', 215), r'\urcorner' : ('psyr', 216), r'\vee' : ('psyr', 217), r'\wedge' : ('psyr', 218), r'\Leftrightarrow' : ('psyr', 219), r'\Leftarrow' : ('psyr', 220), r'\Uparrow' : ('psyr', 221), r'\Rightarrow' : ('psyr', 222), r'\Downarrow' : ('psyr', 223), r'\Diamond' : ('psyr', 224), r'\langle' : ('psyr', 225), r'\Sigma' : ('psyr', 229), r'\sum' : ('psyr', 229), r'\forall' : ('psyr', 34), r'\exists' : ('psyr', 36), r'\lceil' : ('psyr', 233), r'\lbrace' : ('psyr', 123), r'\Psi' : ('psyr', 89), r'\bot' : ('psyr', 0136), r'\Omega' : ('psyr', 0127), r'\leftbracket' : ('psyr', 0133), r'\rightbracket' : ('psyr', 0135), r'\leftbrace' : ('psyr', 123), r'\leftparen' : ('psyr', 050), r'\prime' : ('psyr', 0242), r'\sharp' : ('psyr', 043), r'\slash' : ('psyr', 057), r'\Lamda' : ('psyr', 0114), r'\neg' : ('psyr', 0330), r'\Upsilon' : ('psyr', 0241), r'\rightbrace' : ('psyr', 0175), r'\rfloor' : ('psyr', 0373), r'\lambda' : ('psyr', 0154), r'\to' : ('psyr', 0256), r'\Xi' : ('psyr', 0130), r'\emptyset' : ('psyr', 0306), r'\lfloor' : ('psyr', 0353), r'\rightparen' : ('psyr', 051), r'\rceil' : ('psyr', 0371), r'\ni' : ('psyr', 047), r'\epsilon' : ('psyr', 0145), r'\Theta' : ('psyr', 0121), r'\langle' : ('psyr', 0341), r'\leftangle' : ('psyr', 0341), r'\rangle' : ('psyr', 0361), r'\rightangle' : ('psyr', 0361), r'\rbrace' : ('psyr', 0175), r'\circ' : ('psyr', 0260), r'\diamond' : ('psyr', 0340), r'\mu' : ('psyr', 0155), r'\mid' : ('psyr', 0352), r'\imath' : ('pncri8a', 105), r'\%' : ('pncr8a', 37), r'\$' : ('pncr8a', 36), r'\{' : ('pncr8a', 123), r'\}' : ('pncr8a', 125), r'\backslash' : ('pncr8a', 92), r'\ast' : ('pncr8a', 42), r'\circumflexaccent' : ('pncri8a', 124), # for \hat r'\combiningbreve' : ('pncri8a', 81), # for \breve r'\combininggraveaccent' : ('pncri8a', 114), # for \grave r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot r'\combiningtilde' : ('pncri8a', 75), # for \tilde r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec r'\combiningdotabove' : ('pncri8a', 26), # for \dot } # Automatically generated. type12uni = {'uni24C8': 9416, 'aring': 229, 'uni22A0': 8864, 'uni2292': 8850, 'quotedblright': 8221, 'uni03D2': 978, 'uni2215': 8725, 'uni03D0': 976, 'V': 86, 'dollar': 36, 'uni301E': 12318, 'uni03D5': 981, 'four': 52, 'uni25A0': 9632, 'uni013C': 316, 'uni013B': 315, 'uni013E': 318, 'Yacute': 221, 'uni25DE': 9694, 'uni013F': 319, 'uni255A': 9562, 'uni2606': 9734, 'uni0180': 384, 'uni22B7': 8887, 'uni044F': 1103, 'uni22B5': 8885, 'uni22B4': 8884, 'uni22AE': 8878, 'uni22B2': 8882, 'uni22B1': 8881, 'uni22B0': 8880, 'uni25CD': 9677, 'uni03CE': 974, 'uni03CD': 973, 'uni03CC': 972, 'uni03CB': 971, 'uni03CA': 970, 'uni22B8': 8888, 'uni22C9': 8905, 'uni0449': 1097, 'uni20DD': 8413, 'uni20DC': 8412, 'uni20DB': 8411, 'uni2231': 8753, 'uni25CF': 9679, 'uni306E': 12398, 'uni03D1': 977, 'uni01A1': 417, 'uni20D7': 8407, 'uni03D6': 982, 'uni2233': 8755, 'uni20D2': 8402, 'uni20D1': 8401, 'uni20D0': 8400, 'P': 80, 'uni22BE': 8894, 'uni22BD': 8893, 'uni22BC': 8892, 'uni22BB': 8891, 'underscore': 95, 'uni03C8': 968, 'uni03C7': 967, 'uni0328': 808, 'uni03C5': 965, 'uni03C4': 964, 'uni03C3': 963, 'uni03C2': 962, 'uni03C1': 961, 'uni03C0': 960, 'uni2010': 8208, 'uni0130': 304, 'uni0133': 307, 'uni0132': 306, 'uni0135': 309, 'uni0134': 308, 'uni0137': 311, 'uni0136': 310, 'uni0139': 313, 'uni0138': 312, 'uni2244': 8772, 'uni229A': 8858, 'uni2571': 9585, 'uni0278': 632, 'uni2239': 8761, 'p': 112, 'uni3019': 12313, 'uni25CB': 9675, 'uni03DB': 987, 'uni03DC': 988, 'uni03DA': 986, 'uni03DF': 991, 'uni03DD': 989, 'uni013D': 317, 'uni220A': 8714, 'uni220C': 8716, 'uni220B': 8715, 'uni220E': 8718, 'uni220D': 8717, 'uni220F': 8719, 'uni22CC': 8908, 'Otilde': 213, 'uni25E5': 9701, 'uni2736': 10038, 'perthousand': 8240, 'zero': 48, 'uni279B': 10139, 'dotlessi': 305, 'uni2279': 8825, 'Scaron': 352, 'zcaron': 382, 'uni21D8': 8664, 'egrave': 232, 'uni0271': 625, 'uni01AA': 426, 'uni2332': 9010, 'section': 167, 'uni25E4': 9700, 'Icircumflex': 206, 'ntilde': 241, 'uni041E': 1054, 'ampersand': 38, 'uni041C': 1052, 'uni041A': 1050, 'uni22AB': 8875, 'uni21DB': 8667, 'dotaccent': 729, 'uni0416': 1046, 'uni0417': 1047, 'uni0414': 1044, 'uni0415': 1045, 'uni0412': 1042, 'uni0413': 1043, 'degree': 176, 'uni0411': 1041, 'K': 75, 'uni25EB': 9707, 'uni25EF': 9711, 'uni0418': 1048, 'uni0419': 1049, 'uni2263': 8803, 'uni226E': 8814, 'uni2251': 8785, 'uni02C8': 712, 'uni2262': 8802, 'acircumflex': 226, 'uni22B3': 8883, 'uni2261': 8801, 'uni2394': 9108, 'Aring': 197, 'uni2260': 8800, 'uni2254': 8788, 'uni0436': 1078, 'uni2267': 8807, 'k': 107, 'uni22C8': 8904, 'uni226A': 8810, 'uni231F': 8991, 'smalltilde': 732, 'uni2201': 8705, 'uni2200': 8704, 'uni2203': 8707, 'uni02BD': 701, 'uni2205': 8709, 'uni2204': 8708, 'Agrave': 192, 'uni2206': 8710, 'uni2209': 8713, 'uni2208': 8712, 'uni226D': 8813, 'uni2264': 8804, 'uni263D': 9789, 'uni2258': 8792, 'uni02D3': 723, 'uni02D2': 722, 'uni02D1': 721, 'uni02D0': 720, 'uni25E1': 9697, 'divide': 247, 'uni02D5': 725, 'uni02D4': 724, 'ocircumflex': 244, 'uni2524': 9508, 'uni043A': 1082, 'uni24CC': 9420, 'asciitilde': 126, 'uni22B9': 8889, 'uni24D2': 9426, 'uni211E': 8478, 'uni211D': 8477, 'uni24DD': 9437, 'uni211A': 8474, 'uni211C': 8476, 'uni211B': 8475, 'uni25C6': 9670, 'uni017F': 383, 'uni017A': 378, 'uni017C': 380, 'uni017B': 379, 'uni0346': 838, 'uni22F1': 8945, 'uni22F0': 8944, 'two': 50, 'uni2298': 8856, 'uni24D1': 9425, 'E': 69, 'uni025D': 605, 'scaron': 353, 'uni2322': 8994, 'uni25E3': 9699, 'uni22BF': 8895, 'F': 70, 'uni0440': 1088, 'uni255E': 9566, 'uni22BA': 8890, 'uni0175': 373, 'uni0174': 372, 'uni0177': 375, 'uni0176': 374, 'bracketleft': 91, 'uni0170': 368, 'uni0173': 371, 'uni0172': 370, 'asciicircum': 94, 'uni0179': 377, 'uni2590': 9616, 'uni25E2': 9698, 'uni2119': 8473, 'uni2118': 8472, 'uni25CC': 9676, 'f': 102, 'ordmasculine': 186, 'uni229B': 8859, 'uni22A1': 8865, 'uni2111': 8465, 'uni2110': 8464, 'uni2113': 8467, 'uni2112': 8466, 'mu': 181, 'uni2281': 8833, 'paragraph': 182, 'nine': 57, 'uni25EC': 9708, 'v': 118, 'uni040C': 1036, 'uni0113': 275, 'uni22D0': 8912, 'uni21CC': 8652, 'uni21CB': 8651, 'uni21CA': 8650, 'uni22A5': 8869, 'uni21CF': 8655, 'uni21CE': 8654, 'uni21CD': 8653, 'guilsinglleft': 8249, 'backslash': 92, 'uni2284': 8836, 'uni224E': 8782, 'uni224D': 8781, 'uni224F': 8783, 'uni224A': 8778, 'uni2287': 8839, 'uni224C': 8780, 'uni224B': 8779, 'uni21BD': 8637, 'uni2286': 8838, 'uni030F': 783, 'uni030D': 781, 'uni030E': 782, 'uni030B': 779, 'uni030C': 780, 'uni030A': 778, 'uni026E': 622, 'uni026D': 621, 'six': 54, 'uni026A': 618, 'uni026C': 620, 'uni25C1': 9665, 'uni20D6': 8406, 'uni045B': 1115, 'uni045C': 1116, 'uni256B': 9579, 'uni045A': 1114, 'uni045F': 1119, 'uni045E': 1118, 'A': 65, 'uni2569': 9577, 'uni0458': 1112, 'uni0459': 1113, 'uni0452': 1106, 'uni0453': 1107, 'uni2562': 9570, 'uni0451': 1105, 'uni0456': 1110, 'uni0457': 1111, 'uni0454': 1108, 'uni0455': 1109, 'icircumflex': 238, 'uni0307': 775, 'uni0304': 772, 'uni0305': 773, 'uni0269': 617, 'uni0268': 616, 'uni0300': 768, 'uni0301': 769, 'uni0265': 613, 'uni0264': 612, 'uni0267': 615, 'uni0266': 614, 'uni0261': 609, 'uni0260': 608, 'uni0263': 611, 'uni0262': 610, 'a': 97, 'uni2207': 8711, 'uni2247': 8775, 'uni2246': 8774, 'uni2241': 8769, 'uni2240': 8768, 'uni2243': 8771, 'uni2242': 8770, 'uni2312': 8978, 'ogonek': 731, 'uni2249': 8777, 'uni2248': 8776, 'uni3030': 12336, 'q': 113, 'uni21C2': 8642, 'uni21C1': 8641, 'uni21C0': 8640, 'uni21C7': 8647, 'uni21C6': 8646, 'uni21C5': 8645, 'uni21C4': 8644, 'uni225F': 8799, 'uni212C': 8492, 'uni21C8': 8648, 'uni2467': 9319, 'oacute': 243, 'uni028F': 655, 'uni028E': 654, 'uni026F': 623, 'uni028C': 652, 'uni028B': 651, 'uni028A': 650, 'uni2510': 9488, 'ograve': 242, 'edieresis': 235, 'uni22CE': 8910, 'uni22CF': 8911, 'uni219F': 8607, 'comma': 44, 'uni22CA': 8906, 'uni0429': 1065, 'uni03C6': 966, 'uni0427': 1063, 'uni0426': 1062, 'uni0425': 1061, 'uni0424': 1060, 'uni0423': 1059, 'uni0422': 1058, 'uni0421': 1057, 'uni0420': 1056, 'uni2465': 9317, 'uni24D0': 9424, 'uni2464': 9316, 'uni0430': 1072, 'otilde': 245, 'uni2661': 9825, 'uni24D6': 9430, 'uni2466': 9318, 'uni24D5': 9429, 'uni219A': 8602, 'uni2518': 9496, 'uni22B6': 8886, 'uni2461': 9313, 'uni24D4': 9428, 'uni2460': 9312, 'uni24EA': 9450, 'guillemotright': 187, 'ecircumflex': 234, 'greater': 62, 'uni2011': 8209, 'uacute': 250, 'uni2462': 9314, 'L': 76, 'bullet': 8226, 'uni02A4': 676, 'uni02A7': 679, 'cedilla': 184, 'uni02A2': 674, 'uni2015': 8213, 'uni22C4': 8900, 'uni22C5': 8901, 'uni22AD': 8877, 'uni22C7': 8903, 'uni22C0': 8896, 'uni2016': 8214, 'uni22C2': 8898, 'uni22C3': 8899, 'uni24CF': 9423, 'uni042F': 1071, 'uni042E': 1070, 'uni042D': 1069, 'ydieresis': 255, 'l': 108, 'logicalnot': 172, 'uni24CA': 9418, 'uni0287': 647, 'uni0286': 646, 'uni0285': 645, 'uni0284': 644, 'uni0283': 643, 'uni0282': 642, 'uni0281': 641, 'uni027C': 636, 'uni2664': 9828, 'exclamdown': 161, 'uni25C4': 9668, 'uni0289': 649, 'uni0288': 648, 'uni039A': 922, 'endash': 8211, 'uni2640': 9792, 'uni20E4': 8420, 'uni0473': 1139, 'uni20E1': 8417, 'uni2642': 9794, 'uni03B8': 952, 'uni03B9': 953, 'agrave': 224, 'uni03B4': 948, 'uni03B5': 949, 'uni03B6': 950, 'uni03B7': 951, 'uni03B0': 944, 'uni03B1': 945, 'uni03B2': 946, 'uni03B3': 947, 'uni2555': 9557, 'Adieresis': 196, 'germandbls': 223, 'Odieresis': 214, 'space': 32, 'uni0126': 294, 'uni0127': 295, 'uni0124': 292, 'uni0125': 293, 'uni0122': 290, 'uni0123': 291, 'uni0120': 288, 'uni0121': 289, 'quoteright': 8217, 'uni2560': 9568, 'uni2556': 9558, 'ucircumflex': 251, 'uni2561': 9569, 'uni2551': 9553, 'uni25B2': 9650, 'uni2550': 9552, 'uni2563': 9571, 'uni2553': 9555, 'G': 71, 'uni2564': 9572, 'uni2552': 9554, 'quoteleft': 8216, 'uni2565': 9573, 'uni2572': 9586, 'uni2568': 9576, 'uni2566': 9574, 'W': 87, 'uni214A': 8522, 'uni012F': 303, 'uni012D': 301, 'uni012E': 302, 'uni012B': 299, 'uni012C': 300, 'uni255C': 9564, 'uni012A': 298, 'uni2289': 8841, 'Q': 81, 'uni2320': 8992, 'uni2321': 8993, 'g': 103, 'uni03BD': 957, 'uni03BE': 958, 'uni03BF': 959, 'uni2282': 8834, 'uni2285': 8837, 'uni03BA': 954, 'uni03BB': 955, 'uni03BC': 956, 'uni2128': 8488, 'uni25B7': 9655, 'w': 119, 'uni0302': 770, 'uni03DE': 990, 'uni25DA': 9690, 'uni0303': 771, 'uni0463': 1123, 'uni0462': 1122, 'uni3018': 12312, 'uni2514': 9492, 'question': 63, 'uni25B3': 9651, 'uni24E1': 9441, 'one': 49, 'uni200A': 8202, 'uni2278': 8824, 'ring': 730, 'uni0195': 405, 'figuredash': 8210, 'uni22EC': 8940, 'uni0339': 825, 'uni0338': 824, 'uni0337': 823, 'uni0336': 822, 'uni0335': 821, 'uni0333': 819, 'uni0332': 818, 'uni0331': 817, 'uni0330': 816, 'uni01C1': 449, 'uni01C0': 448, 'uni01C3': 451, 'uni01C2': 450, 'uni2353': 9043, 'uni0308': 776, 'uni2218': 8728, 'uni2219': 8729, 'uni2216': 8726, 'uni2217': 8727, 'uni2214': 8724, 'uni0309': 777, 'uni2609': 9737, 'uni2213': 8723, 'uni2210': 8720, 'uni2211': 8721, 'uni2245': 8773, 'B': 66, 'uni25D6': 9686, 'iacute': 237, 'uni02E6': 742, 'uni02E7': 743, 'uni02E8': 744, 'uni02E9': 745, 'uni221D': 8733, 'uni221E': 8734, 'Ydieresis': 376, 'uni221C': 8732, 'uni22D7': 8919, 'uni221A': 8730, 'R': 82, 'uni24DC': 9436, 'uni033F': 831, 'uni033E': 830, 'uni033C': 828, 'uni033B': 827, 'uni033A': 826, 'b': 98, 'uni228A': 8842, 'uni22DB': 8923, 'uni2554': 9556, 'uni046B': 1131, 'uni046A': 1130, 'r': 114, 'uni24DB': 9435, 'Ccedilla': 199, 'minus': 8722, 'uni24DA': 9434, 'uni03F0': 1008, 'uni03F1': 1009, 'uni20AC': 8364, 'uni2276': 8822, 'uni24C0': 9408, 'uni0162': 354, 'uni0163': 355, 'uni011E': 286, 'uni011D': 285, 'uni011C': 284, 'uni011B': 283, 'uni0164': 356, 'uni0165': 357, 'Lslash': 321, 'uni0168': 360, 'uni0169': 361, 'uni25C9': 9673, 'uni02E5': 741, 'uni21C3': 8643, 'uni24C4': 9412, 'uni24E2': 9442, 'uni2277': 8823, 'uni013A': 314, 'uni2102': 8450, 'Uacute': 218, 'uni2317': 8983, 'uni2107': 8455, 'uni221F': 8735, 'yacute': 253, 'uni3012': 12306, 'Ucircumflex': 219, 'uni015D': 349, 'quotedbl': 34, 'uni25D9': 9689, 'uni2280': 8832, 'uni22AF': 8879, 'onehalf': 189, 'uni221B': 8731, 'Thorn': 222, 'uni2226': 8742, 'M': 77, 'uni25BA': 9658, 'uni2463': 9315, 'uni2336': 9014, 'eight': 56, 'uni2236': 8758, 'multiply': 215, 'uni210C': 8460, 'uni210A': 8458, 'uni21C9': 8649, 'grave': 96, 'uni210E': 8462, 'uni0117': 279, 'uni016C': 364, 'uni0115': 277, 'uni016A': 362, 'uni016F': 367, 'uni0112': 274, 'uni016D': 365, 'uni016E': 366, 'Ocircumflex': 212, 'uni2305': 8965, 'm': 109, 'uni24DF': 9439, 'uni0119': 281, 'uni0118': 280, 'uni20A3': 8355, 'uni20A4': 8356, 'uni20A7': 8359, 'uni2288': 8840, 'uni24C3': 9411, 'uni251C': 9500, 'uni228D': 8845, 'uni222F': 8751, 'uni222E': 8750, 'uni222D': 8749, 'uni222C': 8748, 'uni222B': 8747, 'uni222A': 8746, 'uni255B': 9563, 'Ugrave': 217, 'uni24DE': 9438, 'guilsinglright': 8250, 'uni250A': 9482, 'Ntilde': 209, 'uni0279': 633, 'questiondown': 191, 'uni256C': 9580, 'Atilde': 195, 'uni0272': 626, 'uni0273': 627, 'uni0270': 624, 'ccedilla': 231, 'uni0276': 630, 'uni0277': 631, 'uni0274': 628, 'uni0275': 629, 'uni2252': 8786, 'uni041F': 1055, 'uni2250': 8784, 'Z': 90, 'uni2256': 8790, 'uni2257': 8791, 'copyright': 169, 'uni2255': 8789, 'uni043D': 1085, 'uni043E': 1086, 'uni043F': 1087, 'yen': 165, 'uni041D': 1053, 'uni043B': 1083, 'uni043C': 1084, 'uni21B0': 8624, 'uni21B1': 8625, 'uni21B2': 8626, 'uni21B3': 8627, 'uni21B4': 8628, 'uni21B5': 8629, 'uni21B6': 8630, 'uni21B7': 8631, 'uni21B8': 8632, 'Eacute': 201, 'uni2311': 8977, 'uni2310': 8976, 'uni228F': 8847, 'uni25DB': 9691, 'uni21BA': 8634, 'uni21BB': 8635, 'uni21BC': 8636, 'uni2017': 8215, 'uni21BE': 8638, 'uni21BF': 8639, 'uni231C': 8988, 'H': 72, 'uni0293': 659, 'uni2202': 8706, 'uni22A4': 8868, 'uni231E': 8990, 'uni2232': 8754, 'uni225B': 8795, 'uni225C': 8796, 'uni24D9': 9433, 'uni225A': 8794, 'uni0438': 1080, 'uni0439': 1081, 'uni225D': 8797, 'uni225E': 8798, 'uni0434': 1076, 'X': 88, 'uni007F': 127, 'uni0437': 1079, 'Idieresis': 207, 'uni0431': 1073, 'uni0432': 1074, 'uni0433': 1075, 'uni22AC': 8876, 'uni22CD': 8909, 'uni25A3': 9635, 'bar': 124, 'uni24BB': 9403, 'uni037E': 894, 'uni027B': 635, 'h': 104, 'uni027A': 634, 'uni027F': 639, 'uni027D': 637, 'uni027E': 638, 'uni2227': 8743, 'uni2004': 8196, 'uni2225': 8741, 'uni2224': 8740, 'uni2223': 8739, 'uni2222': 8738, 'uni2221': 8737, 'uni2220': 8736, 'x': 120, 'uni2323': 8995, 'uni2559': 9561, 'uni2558': 9560, 'uni2229': 8745, 'uni2228': 8744, 'udieresis': 252, 'uni029D': 669, 'ordfeminine': 170, 'uni22CB': 8907, 'uni233D': 9021, 'uni0428': 1064, 'uni24C6': 9414, 'uni22DD': 8925, 'uni24C7': 9415, 'uni015C': 348, 'uni015B': 347, 'uni015A': 346, 'uni22AA': 8874, 'uni015F': 351, 'uni015E': 350, 'braceleft': 123, 'uni24C5': 9413, 'uni0410': 1040, 'uni03AA': 938, 'uni24C2': 9410, 'uni03AC': 940, 'uni03AB': 939, 'macron': 175, 'uni03AD': 941, 'uni03AF': 943, 'uni0294': 660, 'uni0295': 661, 'uni0296': 662, 'uni0297': 663, 'uni0290': 656, 'uni0291': 657, 'uni0292': 658, 'atilde': 227, 'Acircumflex': 194, 'uni2370': 9072, 'uni24C1': 9409, 'uni0298': 664, 'uni0299': 665, 'Oslash': 216, 'uni029E': 670, 'C': 67, 'quotedblleft': 8220, 'uni029B': 667, 'uni029C': 668, 'uni03A9': 937, 'uni03A8': 936, 'S': 83, 'uni24C9': 9417, 'uni03A1': 929, 'uni03A0': 928, 'exclam': 33, 'uni03A5': 933, 'uni03A4': 932, 'uni03A7': 935, 'Zcaron': 381, 'uni2133': 8499, 'uni2132': 8498, 'uni0159': 345, 'uni0158': 344, 'uni2137': 8503, 'uni2005': 8197, 'uni2135': 8501, 'uni2134': 8500, 'uni02BA': 698, 'uni2033': 8243, 'uni0151': 337, 'uni0150': 336, 'uni0157': 343, 'equal': 61, 'uni0155': 341, 'uni0154': 340, 's': 115, 'uni233F': 9023, 'eth': 240, 'uni24BE': 9406, 'uni21E9': 8681, 'uni2060': 8288, 'Egrave': 200, 'uni255D': 9565, 'uni24CD': 9421, 'uni21E1': 8673, 'uni21B9': 8633, 'hyphen': 45, 'uni01BE': 446, 'uni01BB': 443, 'period': 46, 'igrave': 236, 'uni01BA': 442, 'uni2296': 8854, 'uni2297': 8855, 'uni2294': 8852, 'uni2295': 8853, 'colon': 58, 'uni2293': 8851, 'uni2290': 8848, 'uni2291': 8849, 'uni032D': 813, 'uni032E': 814, 'uni032F': 815, 'uni032A': 810, 'uni032B': 811, 'uni032C': 812, 'uni231D': 8989, 'Ecircumflex': 202, 'uni24D7': 9431, 'uni25DD': 9693, 'trademark': 8482, 'Aacute': 193, 'cent': 162, 'uni0445': 1093, 'uni266E': 9838, 'uni266D': 9837, 'uni266B': 9835, 'uni03C9': 969, 'uni2003': 8195, 'uni2047': 8263, 'lslash': 322, 'uni03A6': 934, 'uni2043': 8259, 'uni250C': 9484, 'uni2040': 8256, 'uni255F': 9567, 'uni24CB': 9419, 'uni0472': 1138, 'uni0446': 1094, 'uni0474': 1140, 'uni0475': 1141, 'uni2508': 9480, 'uni2660': 9824, 'uni2506': 9478, 'uni2502': 9474, 'c': 99, 'uni2500': 9472, 'N': 78, 'uni22A6': 8870, 'uni21E7': 8679, 'uni2130': 8496, 'uni2002': 8194, 'breve': 728, 'uni0442': 1090, 'Oacute': 211, 'uni229F': 8863, 'uni25C7': 9671, 'uni229D': 8861, 'uni229E': 8862, 'guillemotleft': 171, 'uni0329': 809, 'uni24E5': 9445, 'uni011F': 287, 'uni0324': 804, 'uni0325': 805, 'uni0326': 806, 'uni0327': 807, 'uni0321': 801, 'uni0322': 802, 'n': 110, 'uni2032': 8242, 'uni2269': 8809, 'uni2268': 8808, 'uni0306': 774, 'uni226B': 8811, 'uni21EA': 8682, 'uni0166': 358, 'uni203B': 8251, 'uni01B5': 437, 'idieresis': 239, 'uni02BC': 700, 'uni01B0': 432, 'braceright': 125, 'seven': 55, 'uni02BB': 699, 'uni011A': 282, 'uni29FB': 10747, 'brokenbar': 166, 'uni2036': 8246, 'uni25C0': 9664, 'uni0156': 342, 'uni22D5': 8917, 'uni0258': 600, 'ugrave': 249, 'uni22D6': 8918, 'uni22D1': 8913, 'uni2034': 8244, 'uni22D3': 8915, 'uni22D2': 8914, 'uni203C': 8252, 'uni223E': 8766, 'uni02BF': 703, 'uni22D9': 8921, 'uni22D8': 8920, 'uni25BD': 9661, 'uni25BE': 9662, 'uni25BF': 9663, 'uni041B': 1051, 'periodcentered': 183, 'uni25BC': 9660, 'uni019E': 414, 'uni019B': 411, 'uni019A': 410, 'uni2007': 8199, 'uni0391': 913, 'uni0390': 912, 'uni0393': 915, 'uni0392': 914, 'uni0395': 917, 'uni0394': 916, 'uni0397': 919, 'uni0396': 918, 'uni0399': 921, 'uni0398': 920, 'uni25C8': 9672, 'uni2468': 9320, 'sterling': 163, 'uni22EB': 8939, 'uni039C': 924, 'uni039B': 923, 'uni039E': 926, 'uni039D': 925, 'uni039F': 927, 'I': 73, 'uni03E1': 993, 'uni03E0': 992, 'uni2319': 8985, 'uni228B': 8843, 'uni25B5': 9653, 'uni25B6': 9654, 'uni22EA': 8938, 'uni24B9': 9401, 'uni044E': 1102, 'uni0199': 409, 'uni2266': 8806, 'Y': 89, 'uni22A2': 8866, 'Eth': 208, 'uni266F': 9839, 'emdash': 8212, 'uni263B': 9787, 'uni24BD': 9405, 'uni22DE': 8926, 'uni0360': 864, 'uni2557': 9559, 'uni22DF': 8927, 'uni22DA': 8922, 'uni22DC': 8924, 'uni0361': 865, 'i': 105, 'uni24BF': 9407, 'uni0362': 866, 'uni263E': 9790, 'uni028D': 653, 'uni2259': 8793, 'uni0323': 803, 'uni2265': 8805, 'daggerdbl': 8225, 'y': 121, 'uni010A': 266, 'plusminus': 177, 'less': 60, 'uni21AE': 8622, 'uni0315': 789, 'uni230B': 8971, 'uni21AF': 8623, 'uni21AA': 8618, 'uni21AC': 8620, 'uni21AB': 8619, 'uni01FB': 507, 'uni01FC': 508, 'uni223A': 8762, 'uni01FA': 506, 'uni01FF': 511, 'uni01FD': 509, 'uni01FE': 510, 'uni2567': 9575, 'uni25E0': 9696, 'uni0104': 260, 'uni0105': 261, 'uni0106': 262, 'uni0107': 263, 'uni0100': 256, 'uni0101': 257, 'uni0102': 258, 'uni0103': 259, 'uni2038': 8248, 'uni2009': 8201, 'uni2008': 8200, 'uni0108': 264, 'uni0109': 265, 'uni02A1': 673, 'uni223B': 8763, 'uni226C': 8812, 'uni25AC': 9644, 'uni24D3': 9427, 'uni21E0': 8672, 'uni21E3': 8675, 'Udieresis': 220, 'uni21E2': 8674, 'D': 68, 'uni21E5': 8677, 'uni2621': 9761, 'uni21D1': 8657, 'uni203E': 8254, 'uni22C6': 8902, 'uni21E4': 8676, 'uni010D': 269, 'uni010E': 270, 'uni010F': 271, 'five': 53, 'T': 84, 'uni010B': 267, 'uni010C': 268, 'uni2605': 9733, 'uni2663': 9827, 'uni21E6': 8678, 'uni24B6': 9398, 'uni22C1': 8897, 'oslash': 248, 'acute': 180, 'uni01F0': 496, 'd': 100, 'OE': 338, 'uni22E3': 8931, 'Igrave': 204, 'uni2308': 8968, 'uni2309': 8969, 'uni21A9': 8617, 't': 116, 'uni2313': 8979, 'uni03A3': 931, 'uni21A4': 8612, 'uni21A7': 8615, 'uni21A6': 8614, 'uni21A1': 8609, 'uni21A0': 8608, 'uni21A3': 8611, 'uni21A2': 8610, 'parenright': 41, 'uni256A': 9578, 'uni25DC': 9692, 'uni24CE': 9422, 'uni042C': 1068, 'uni24E0': 9440, 'uni042B': 1067, 'uni0409': 1033, 'uni0408': 1032, 'uni24E7': 9447, 'uni25B4': 9652, 'uni042A': 1066, 'uni228E': 8846, 'uni0401': 1025, 'adieresis': 228, 'uni0403': 1027, 'quotesingle': 39, 'uni0405': 1029, 'uni0404': 1028, 'uni0407': 1031, 'uni0406': 1030, 'uni229C': 8860, 'uni2306': 8966, 'uni2253': 8787, 'twodotenleader': 8229, 'uni2131': 8497, 'uni21DA': 8666, 'uni2234': 8756, 'uni2235': 8757, 'uni01A5': 421, 'uni2237': 8759, 'uni2230': 8752, 'uni02CC': 716, 'slash': 47, 'uni01A0': 416, 'ellipsis': 8230, 'uni2299': 8857, 'uni2238': 8760, 'numbersign': 35, 'uni21A8': 8616, 'uni223D': 8765, 'uni01AF': 431, 'uni223F': 8767, 'uni01AD': 429, 'uni01AB': 427, 'odieresis': 246, 'uni223C': 8764, 'uni227D': 8829, 'uni0280': 640, 'O': 79, 'uni227E': 8830, 'uni21A5': 8613, 'uni22D4': 8916, 'uni25D4': 9684, 'uni227F': 8831, 'uni0435': 1077, 'uni2302': 8962, 'uni2669': 9833, 'uni24E3': 9443, 'uni2720': 10016, 'uni22A8': 8872, 'uni22A9': 8873, 'uni040A': 1034, 'uni22A7': 8871, 'oe': 339, 'uni040B': 1035, 'uni040E': 1038, 'uni22A3': 8867, 'o': 111, 'uni040F': 1039, 'Edieresis': 203, 'uni25D5': 9685, 'plus': 43, 'uni044D': 1101, 'uni263C': 9788, 'uni22E6': 8934, 'uni2283': 8835, 'uni258C': 9612, 'uni219E': 8606, 'uni24E4': 9444, 'uni2136': 8502, 'dagger': 8224, 'uni24B7': 9399, 'uni219B': 8603, 'uni22E5': 8933, 'three': 51, 'uni210B': 8459, 'uni2534': 9524, 'uni24B8': 9400, 'uni230A': 8970, 'hungarumlaut': 733, 'parenleft': 40, 'uni0148': 328, 'uni0149': 329, 'uni2124': 8484, 'uni2125': 8485, 'uni2126': 8486, 'uni2127': 8487, 'uni0140': 320, 'uni2129': 8489, 'uni25C5': 9669, 'uni0143': 323, 'uni0144': 324, 'uni0145': 325, 'uni0146': 326, 'uni0147': 327, 'uni210D': 8461, 'fraction': 8260, 'uni2031': 8241, 'uni2196': 8598, 'uni2035': 8245, 'uni24E6': 9446, 'uni016B': 363, 'uni24BA': 9402, 'uni266A': 9834, 'uni0116': 278, 'uni2115': 8469, 'registered': 174, 'J': 74, 'uni25DF': 9695, 'uni25CE': 9678, 'uni273D': 10045, 'dieresis': 168, 'uni212B': 8491, 'uni0114': 276, 'uni212D': 8493, 'uni212E': 8494, 'uni212F': 8495, 'uni014A': 330, 'uni014B': 331, 'uni014C': 332, 'uni014D': 333, 'uni014E': 334, 'uni014F': 335, 'uni025E': 606, 'uni24E8': 9448, 'uni0111': 273, 'uni24E9': 9449, 'Ograve': 210, 'j': 106, 'uni2195': 8597, 'uni2194': 8596, 'uni2197': 8599, 'uni2037': 8247, 'uni2191': 8593, 'uni2190': 8592, 'uni2193': 8595, 'uni2192': 8594, 'uni29FA': 10746, 'uni2713': 10003, 'z': 122, 'uni2199': 8601, 'uni2198': 8600, 'uni2667': 9831, 'ae': 230, 'uni0448': 1096, 'semicolon': 59, 'uni2666': 9830, 'uni038F': 911, 'uni0444': 1092, 'uni0447': 1095, 'uni038E': 910, 'uni0441': 1089, 'uni038C': 908, 'uni0443': 1091, 'uni038A': 906, 'uni0250': 592, 'uni0251': 593, 'uni0252': 594, 'uni0253': 595, 'uni0254': 596, 'at': 64, 'uni0256': 598, 'uni0257': 599, 'uni0167': 359, 'uni0259': 601, 'uni228C': 8844, 'uni2662': 9826, 'uni0319': 793, 'uni0318': 792, 'uni24BC': 9404, 'uni0402': 1026, 'uni22EF': 8943, 'Iacute': 205, 'uni22ED': 8941, 'uni22EE': 8942, 'uni0311': 785, 'uni0310': 784, 'uni21E8': 8680, 'uni0312': 786, 'percent': 37, 'uni0317': 791, 'uni0316': 790, 'uni21D6': 8662, 'uni21D7': 8663, 'uni21D4': 8660, 'uni21D5': 8661, 'uni21D2': 8658, 'uni21D3': 8659, 'uni21D0': 8656, 'uni2138': 8504, 'uni2270': 8816, 'uni2271': 8817, 'uni2272': 8818, 'uni2273': 8819, 'uni2274': 8820, 'uni2275': 8821, 'bracketright': 93, 'uni21D9': 8665, 'uni21DF': 8671, 'uni21DD': 8669, 'uni21DE': 8670, 'AE': 198, 'uni03AE': 942, 'uni227A': 8826, 'uni227B': 8827, 'uni227C': 8828, 'asterisk': 42, 'aacute': 225, 'uni226F': 8815, 'uni22E2': 8930, 'uni0386': 902, 'uni22E0': 8928, 'uni22E1': 8929, 'U': 85, 'uni22E7': 8935, 'uni22E4': 8932, 'uni0387': 903, 'uni031A': 794, 'eacute': 233, 'uni22E8': 8936, 'uni22E9': 8937, 'uni24D8': 9432, 'uni025A': 602, 'uni025B': 603, 'uni025C': 604, 'e': 101, 'uni0128': 296, 'uni025F': 607, 'uni2665': 9829, 'thorn': 254, 'uni0129': 297, 'uni253C': 9532, 'uni25D7': 9687, 'u': 117, 'uni0388': 904, 'uni0389': 905, 'uni0255': 597, 'uni0171': 369, 'uni0384': 900, 'uni0385': 901, 'uni044A': 1098, 'uni252C': 9516, 'uni044C': 1100, 'uni044B': 1099} uni2type1 = dict([(v,k) for k,v in type12uni.items()]) tex2uni = { 'widehat': 0x0302, 'widetilde': 0x0303, 'langle': 0x27e8, 'rangle': 0x27e9, 'perp': 0x27c2, 'neq': 0x2260, 'Join': 0x2a1d, 'leqslant': 0x2a7d, 'geqslant': 0x2a7e, 'lessapprox': 0x2a85, 'gtrapprox': 0x2a86, 'lesseqqgtr': 0x2a8b, 'gtreqqless': 0x2a8c, 'triangleeq': 0x225c, 'eqslantless': 0x2a95, 'eqslantgtr': 0x2a96, 'backepsilon': 0x03f6, 'precapprox': 0x2ab7, 'succapprox': 0x2ab8, 'fallingdotseq': 0x2252, 'subseteqq': 0x2ac5, 'supseteqq': 0x2ac6, 'varpropto': 0x221d, 'precnapprox': 0x2ab9, 'succnapprox': 0x2aba, 'subsetneqq': 0x2acb, 'supsetneqq': 0x2acc, 'lnapprox': 0x2ab9, 'gnapprox': 0x2aba, 'longleftarrow': 0x27f5, 'longrightarrow': 0x27f6, 'longleftrightarrow': 0x27f7, 'Longleftarrow': 0x27f8, 'Longrightarrow': 0x27f9, 'Longleftrightarrow': 0x27fa, 'longmapsto': 0x27fc, 'leadsto': 0x21dd, 'dashleftarrow': 0x290e, 'dashrightarrow': 0x290f, 'circlearrowleft': 0x21ba, 'circlearrowright': 0x21bb, 'leftrightsquigarrow': 0x21ad, 'leftsquigarrow': 0x219c, 'rightsquigarrow': 0x219d, 'Game': 0x2141, 'hbar': 0x0127, 'hslash': 0x210f, 'ldots': 0x22ef, 'vdots': 0x22ee, 'doteqdot': 0x2251, 'doteq': 8784, 'partial': 8706, 'gg': 8811, 'asymp': 8781, 'blacktriangledown': 9662, 'otimes': 8855, 'nearrow': 8599, 'varpi': 982, 'vee': 8744, 'vec': 8407, 'smile': 8995, 'succnsim': 8937, 'gimel': 8503, 'vert': 124, '|': 124, 'varrho': 1009, 'P': 182, 'approxident': 8779, 'Swarrow': 8665, 'textasciicircum': 94, 'imageof': 8887, 'ntriangleleft': 8938, 'nleq': 8816, 'div': 247, 'nparallel': 8742, 'Leftarrow': 8656, 'lll': 8920, 'oiint': 8751, 'ngeq': 8817, 'Theta': 920, 'origof': 8886, 'blacksquare': 9632, 'solbar': 9023, 'neg': 172, 'sum': 8721, 'Vdash': 8873, 'coloneq': 8788, 'degree': 176, 'bowtie': 8904, 'blacktriangleright': 9654, 'varsigma': 962, 'leq': 8804, 'ggg': 8921, 'lneqq': 8808, 'scurel': 8881, 'stareq': 8795, 'BbbN': 8469, 'nLeftarrow': 8653, 'nLeftrightarrow': 8654, 'k': 808, 'bot': 8869, 'BbbC': 8450, 'Lsh': 8624, 'leftleftarrows': 8647, 'BbbZ': 8484, 'digamma': 989, 'BbbR': 8477, 'BbbP': 8473, 'BbbQ': 8474, 'vartriangleright': 8883, 'succsim': 8831, 'wedge': 8743, 'lessgtr': 8822, 'veebar': 8891, 'mapsdown': 8615, 'Rsh': 8625, 'chi': 967, 'prec': 8826, 'nsubseteq': 8840, 'therefore': 8756, 'eqcirc': 8790, 'textexclamdown': 161, 'nRightarrow': 8655, 'flat': 9837, 'notin': 8713, 'llcorner': 8990, 'varepsilon': 949, 'bigtriangleup': 9651, 'aleph': 8501, 'dotminus': 8760, 'upsilon': 965, 'Lambda': 923, 'cap': 8745, 'barleftarrow': 8676, 'mu': 956, 'boxplus': 8862, 'mp': 8723, 'circledast': 8859, 'tau': 964, 'in': 8712, 'backslash': 92, 'varnothing': 8709, 'sharp': 9839, 'eqsim': 8770, 'gnsim': 8935, 'Searrow': 8664, 'updownarrows': 8645, 'heartsuit': 9825, 'trianglelefteq': 8884, 'ddag': 8225, 'sqsubseteq': 8849, 'mapsfrom': 8612, 'boxbar': 9707, 'sim': 8764, 'Nwarrow': 8662, 'nequiv': 8802, 'succ': 8827, 'vdash': 8866, 'Leftrightarrow': 8660, 'parallel': 8741, 'invnot': 8976, 'natural': 9838, 'ss': 223, 'uparrow': 8593, 'nsim': 8769, 'hookrightarrow': 8618, 'Equiv': 8803, 'approx': 8776, 'Vvdash': 8874, 'nsucc': 8833, 'leftrightharpoons': 8651, 'Re': 8476, 'boxminus': 8863, 'equiv': 8801, 'Lleftarrow': 8666, 'thinspace': 8201, 'll': 8810, 'Cup': 8915, 'measeq': 8798, 'upharpoonleft': 8639, 'lq': 8216, 'Upsilon': 933, 'subsetneq': 8842, 'greater': 62, 'supsetneq': 8843, 'Cap': 8914, 'L': 321, 'spadesuit': 9824, 'lrcorner': 8991, 'not': 824, 'bar': 772, 'rightharpoonaccent': 8401, 'boxdot': 8865, 'l': 322, 'leftharpoondown': 8637, 'bigcup': 8899, 'iint': 8748, 'bigwedge': 8896, 'downharpoonleft': 8643, 'textasciitilde': 126, 'subset': 8834, 'leqq': 8806, 'mapsup': 8613, 'nvDash': 8877, 'looparrowleft': 8619, 'nless': 8814, 'rightarrowbar': 8677, 'Vert': 8214, 'downdownarrows': 8650, 'uplus': 8846, 'simeq': 8771, 'napprox': 8777, 'ast': 8727, 'twoheaduparrow': 8607, 'doublebarwedge': 8966, 'Sigma': 931, 'leftharpoonaccent': 8400, 'ntrianglelefteq': 8940, 'nexists': 8708, 'times': 215, 'measuredangle': 8737, 'bumpeq': 8783, 'carriagereturn': 8629, 'adots': 8944, 'checkmark': 10003, 'lambda': 955, 'xi': 958, 'rbrace': 125, 'rbrack': 93, 'Nearrow': 8663, 'maltese': 10016, 'clubsuit': 9827, 'top': 8868, 'overarc': 785, 'varphi': 966, 'Delta': 916, 'iota': 953, 'nleftarrow': 8602, 'candra': 784, 'supset': 8835, 'triangleleft': 9665, 'gtreqless': 8923, 'ntrianglerighteq': 8941, 'quad': 8195, 'Xi': 926, 'gtrdot': 8919, 'leftthreetimes': 8907, 'minus': 8722, 'preccurlyeq': 8828, 'nleftrightarrow': 8622, 'lambdabar': 411, 'blacktriangle': 9652, 'kernelcontraction': 8763, 'Phi': 934, 'angle': 8736, 'spadesuitopen': 9828, 'eqless': 8924, 'mid': 8739, 'varkappa': 1008, 'Ldsh': 8626, 'updownarrow': 8597, 'beta': 946, 'textquotedblleft': 8220, 'rho': 961, 'alpha': 945, 'intercal': 8890, 'beth': 8502, 'grave': 768, 'acwopencirclearrow': 8634, 'nmid': 8740, 'nsupset': 8837, 'sigma': 963, 'dot': 775, 'Rightarrow': 8658, 'turnednot': 8985, 'backsimeq': 8909, 'leftarrowtail': 8610, 'approxeq': 8778, 'curlyeqsucc': 8927, 'rightarrowtail': 8611, 'Psi': 936, 'copyright': 169, 'yen': 165, 'vartriangleleft': 8882, 'rasp': 700, 'triangleright': 9655, 'precsim': 8830, 'infty': 8734, 'geq': 8805, 'updownarrowbar': 8616, 'precnsim': 8936, 'H': 779, 'ulcorner': 8988, 'looparrowright': 8620, 'ncong': 8775, 'downarrow': 8595, 'circeq': 8791, 'subseteq': 8838, 'bigstar': 9733, 'prime': 8242, 'lceil': 8968, 'Rrightarrow': 8667, 'oiiint': 8752, 'curlywedge': 8911, 'vDash': 8872, 'lfloor': 8970, 'ddots': 8945, 'exists': 8707, 'underbar': 817, 'Pi': 928, 'leftrightarrows': 8646, 'sphericalangle': 8738, 'coprod': 8720, 'circledcirc': 8858, 'gtrsim': 8819, 'gneqq': 8809, 'between': 8812, 'theta': 952, 'complement': 8705, 'arceq': 8792, 'nVdash': 8878, 'S': 167, 'wr': 8768, 'wp': 8472, 'backcong': 8780, 'lasp': 701, 'c': 807, 'nabla': 8711, 'dotplus': 8724, 'eta': 951, 'forall': 8704, 'eth': 240, 'colon': 58, 'sqcup': 8852, 'rightrightarrows': 8649, 'sqsupset': 8848, 'mapsto': 8614, 'bigtriangledown': 9661, 'sqsupseteq': 8850, 'propto': 8733, 'pi': 960, 'pm': 177, 'dots': 8230, 'nrightarrow': 8603, 'textasciiacute': 180, 'Doteq': 8785, 'breve': 774, 'sqcap': 8851, 'twoheadrightarrow': 8608, 'kappa': 954, 'vartriangle': 9653, 'diamondsuit': 9826, 'pitchfork': 8916, 'blacktriangleleft': 9664, 'nprec': 8832, 'vdots': 8942, 'curvearrowright': 8631, 'barwedge': 8892, 'multimap': 8888, 'textquestiondown': 191, 'cong': 8773, 'rtimes': 8906, 'rightzigzagarrow': 8669, 'rightarrow': 8594, 'leftarrow': 8592, '__sqrt__': 8730, 'twoheaddownarrow': 8609, 'oint': 8750, 'bigvee': 8897, 'eqdef': 8797, 'sterling': 163, 'phi': 981, 'Updownarrow': 8661, 'backprime': 8245, 'emdash': 8212, 'Gamma': 915, 'i': 305, 'rceil': 8969, 'leftharpoonup': 8636, 'Im': 8465, 'curvearrowleft': 8630, 'wedgeq': 8793, 'fallingdotseq': 8786, 'curlyeqprec': 8926, 'questeq': 8799, 'less': 60, 'upuparrows': 8648, 'tilde': 771, 'textasciigrave': 96, 'smallsetminus': 8726, 'ell': 8467, 'cup': 8746, 'danger': 9761, 'nVDash': 8879, 'cdotp': 183, 'cdots': 8943, 'hat': 770, 'eqgtr': 8925, 'enspace': 8194, 'psi': 968, 'frown': 8994, 'acute': 769, 'downzigzagarrow': 8623, 'ntriangleright': 8939, 'cupdot': 8845, 'circleddash': 8861, 'oslash': 8856, 'mho': 8487, 'd': 803, 'sqsubset': 8847, 'cdot': 8901, 'Omega': 937, 'OE': 338, 'veeeq': 8794, 'Finv': 8498, 't': 865, 'leftrightarrow': 8596, 'swarrow': 8601, 'rightthreetimes': 8908, 'rightleftharpoons': 8652, 'lesssim': 8818, 'searrow': 8600, 'because': 8757, 'gtrless': 8823, 'star': 8902, 'nsubset': 8836, 'zeta': 950, 'dddot': 8411, 'bigcirc': 9675, 'Supset': 8913, 'circ': 8728, 'slash': 8725, 'ocirc': 778, 'prod': 8719, 'twoheadleftarrow': 8606, 'daleth': 8504, 'upharpoonright': 8638, 'odot': 8857, 'Uparrow': 8657, 'O': 216, 'hookleftarrow': 8617, 'trianglerighteq': 8885, 'nsime': 8772, 'oe': 339, 'nwarrow': 8598, 'o': 248, 'ddddot': 8412, 'downharpoonright': 8642, 'succcurlyeq': 8829, 'gamma': 947, 'scrR': 8475, 'dag': 8224, 'thickspace': 8197, 'frakZ': 8488, 'lessdot': 8918, 'triangledown': 9663, 'ltimes': 8905, 'scrB': 8492, 'endash': 8211, 'scrE': 8496, 'scrF': 8497, 'scrH': 8459, 'scrI': 8464, 'rightharpoondown': 8641, 'scrL': 8466, 'scrM': 8499, 'frakC': 8493, 'nsupseteq': 8841, 'circledR': 174, 'circledS': 9416, 'ngtr': 8815, 'bigcap': 8898, 'scre': 8495, 'Downarrow': 8659, 'scrg': 8458, 'overleftrightarrow': 8417, 'scro': 8500, 'lnsim': 8934, 'eqcolon': 8789, 'curlyvee': 8910, 'urcorner': 8989, 'lbrace': 123, 'Bumpeq': 8782, 'delta': 948, 'boxtimes': 8864, 'overleftarrow': 8406, 'prurel': 8880, 'clubsuitopen': 9831, 'cwopencirclearrow': 8635, 'geqq': 8807, 'rightleftarrows': 8644, 'ac': 8766, 'ae': 230, 'int': 8747, 'rfloor': 8971, 'risingdotseq': 8787, 'nvdash': 8876, 'diamond': 8900, 'ddot': 776, 'backsim': 8765, 'oplus': 8853, 'triangleq': 8796, 'check': 780, 'ni': 8715, 'iiint': 8749, 'ne': 8800, 'lesseqgtr': 8922, 'obar': 9021, 'supseteq': 8839, 'nu': 957, 'AA': 8491, 'AE': 198, 'models': 8871, 'ominus': 8854, 'dashv': 8867, 'omega': 969, 'rq': 8217, 'Subset': 8912, 'rightharpoonup': 8640, 'Rdsh': 8627, 'bullet': 8729, 'divideontimes': 8903, 'lbrack': 91, 'textquotedblright': 8221, 'Colon': 8759, '%': 37, '$': 36, '{': 123, '}': 125, '_': 95, 'imath': 0x131, 'circumflexaccent' : 770, 'combiningbreve' : 774, 'combiningoverline' : 772, 'combininggraveaccent' : 768, 'combiningacuteaccent' : 769, 'combiningdiaeresis' : 776, 'combiningtilde' : 771, 'combiningrightarrowabove' : 8407, 'combiningdotabove' : 775, 'to': 8594, 'succeq': 8829, 'emptyset': 8709, 'leftparen': 40, 'rightparen': 41, 'bigoplus': 10753, 'leftangle': 10216, 'rightangle': 10217, 'leftbrace': 124, 'rightbrace': 125, 'jmath': 567, 'bigodot': 10752, 'preceq': 8828, 'biguplus': 10756, 'epsilon': 949, 'vartheta': 977, 'bigotimes': 10754 } # Each element is a 4-tuple of the form: # src_start, src_end, dst_font, dst_start # stix_virtual_fonts = { 'bb': { 'rm': [ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 (0x0041, 0x0042, 'rm', 0x1d538), # A-B (0x0043, 0x0043, 'rm', 0x2102), # C (0x0044, 0x0047, 'rm', 0x1d53b), # D-G (0x0048, 0x0048, 'rm', 0x210d), # H (0x0049, 0x004d, 'rm', 0x1d540), # I-M (0x004e, 0x004e, 'rm', 0x2115), # N (0x004f, 0x004f, 'rm', 0x1d546), # O (0x0050, 0x0051, 'rm', 0x2119), # P-Q (0x0052, 0x0052, 'rm', 0x211d), # R (0x0053, 0x0059, 'rm', 0x1d54a), # S-Y (0x005a, 0x005a, 'rm', 0x2124), # Z (0x0061, 0x007a, 'rm', 0x1d552), # a-z (0x0393, 0x0393, 'rm', 0x213e), # \Gamma (0x03a0, 0x03a0, 'rm', 0x213f), # \Pi (0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma (0x03b3, 0x03b3, 'rm', 0x213d), # \gamma (0x03c0, 0x03c0, 'rm', 0x213c), # \pi ], 'it': [ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 (0x0041, 0x0042, 'it', 0xe154), # A-B (0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts) (0x0044, 0x0044, 'it', 0x2145), # D (0x0045, 0x0047, 'it', 0xe156), # E-G (0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts) (0x0049, 0x004d, 'it', 0xe159), # I-M (0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts) (0x004f, 0x004f, 'it', 0xe15e), # O (0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts) (0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts) (0x0053, 0x0059, 'it', 0xe15f), # S-Y (0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts) (0x0061, 0x0063, 'it', 0xe166), # a-c (0x0064, 0x0065, 'it', 0x2146), # d-e (0x0066, 0x0068, 'it', 0xe169), # f-h (0x0069, 0x006a, 'it', 0x2148), # i-j (0x006b, 0x007a, 'it', 0xe16c), # k-z (0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts) (0x03a0, 0x03a0, 'it', 0x213f), # \Pi (0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts) (0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts) (0x03c0, 0x03c0, 'it', 0x213c), # \pi ], 'bf': [ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 (0x0041, 0x005a, 'bf', 0xe38a), # A-Z (0x0061, 0x007a, 'bf', 0xe39d), # a-z (0x0393, 0x0393, 'bf', 0x213e), # \Gamma (0x03a0, 0x03a0, 'bf', 0x213f), # \Pi (0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma (0x03b3, 0x03b3, 'bf', 0x213d), # \gamma (0x03c0, 0x03c0, 'bf', 0x213c), # \pi ], }, 'cal': [ (0x0041, 0x005a, 'it', 0xe22d), # A-Z ], 'circled': { 'rm': [ (0x0030, 0x0030, 'rm', 0x24ea), # 0 (0x0031, 0x0039, 'rm', 0x2460), # 1-9 (0x0041, 0x005a, 'rm', 0x24b6), # A-Z (0x0061, 0x007a, 'rm', 0x24d0) # a-z ], 'it': [ (0x0030, 0x0030, 'rm', 0x24ea), # 0 (0x0031, 0x0039, 'rm', 0x2460), # 1-9 (0x0041, 0x005a, 'it', 0x24b6), # A-Z (0x0061, 0x007a, 'it', 0x24d0) # a-z ], 'bf': [ (0x0030, 0x0030, 'bf', 0x24ea), # 0 (0x0031, 0x0039, 'bf', 0x2460), # 1-9 (0x0041, 0x005a, 'bf', 0x24b6), # A-Z (0x0061, 0x007a, 'bf', 0x24d0) # a-z ], }, 'frak': { 'rm': [ (0x0041, 0x0042, 'rm', 0x1d504), # A-B (0x0043, 0x0043, 'rm', 0x212d), # C (0x0044, 0x0047, 'rm', 0x1d507), # D-G (0x0048, 0x0048, 'rm', 0x210c), # H (0x0049, 0x0049, 'rm', 0x2111), # I (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q (0x0052, 0x0052, 'rm', 0x211c), # R (0x0053, 0x0059, 'rm', 0x1d516), # S-Y (0x005a, 0x005a, 'rm', 0x2128), # Z (0x0061, 0x007a, 'rm', 0x1d51e), # a-z ], 'it': [ (0x0041, 0x0042, 'rm', 0x1d504), # A-B (0x0043, 0x0043, 'rm', 0x212d), # C (0x0044, 0x0047, 'rm', 0x1d507), # D-G (0x0048, 0x0048, 'rm', 0x210c), # H (0x0049, 0x0049, 'rm', 0x2111), # I (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q (0x0052, 0x0052, 'rm', 0x211c), # R (0x0053, 0x0059, 'rm', 0x1d516), # S-Y (0x005a, 0x005a, 'rm', 0x2128), # Z (0x0061, 0x007a, 'rm', 0x1d51e), # a-z ], 'bf': [ (0x0041, 0x005a, 'bf', 0x1d56c), # A-Z (0x0061, 0x007a, 'bf', 0x1d586), # a-z ], }, 'scr': [ (0x0041, 0x0041, 'it', 0x1d49c), # A (0x0042, 0x0042, 'it', 0x212c), # B (0x0043, 0x0044, 'it', 0x1d49e), # C-D (0x0045, 0x0046, 'it', 0x2130), # E-F (0x0047, 0x0047, 'it', 0x1d4a2), # G (0x0048, 0x0048, 'it', 0x210b), # H (0x0049, 0x0049, 'it', 0x2110), # I (0x004a, 0x004b, 'it', 0x1d4a5), # J-K (0x004c, 0x004c, 'it', 0x2112), # L (0x004d, 0x003d, 'it', 0x2113), # M (0x004e, 0x0051, 'it', 0x1d4a9), # N-Q (0x0052, 0x0052, 'it', 0x211b), # R (0x0053, 0x005a, 'it', 0x1d4ae), # S-Z (0x0061, 0x0064, 'it', 0x1d4b6), # a-d (0x0065, 0x0065, 'it', 0x212f), # e (0x0066, 0x0066, 'it', 0x1d4bb), # f (0x0067, 0x0067, 'it', 0x210a), # g (0x0068, 0x006e, 'it', 0x1d4bd), # h-n (0x006f, 0x006f, 'it', 0x2134), # o (0x0070, 0x007a, 'it', 0x1d4c5), # p-z ], 'sf': { 'rm': [ (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 (0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z (0x0061, 0x007a, 'rm', 0x1d5ba), # a-z (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega (0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega (0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant (0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant (0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant (0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant (0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon (0x2202, 0x2202, 'rm', 0xe17c), # partial differential ], 'it': [ # These numerals are actually upright. We don't actually # want italic numerals ever. (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 (0x0041, 0x005a, 'it', 0x1d608), # A-Z (0x0061, 0x007a, 'it', 0x1d622), # a-z (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega (0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega (0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant (0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant (0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant (0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant (0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon ], 'bf': [ (0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9 (0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z (0x0061, 0x007a, 'bf', 0x1d5ee), # a-z (0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega (0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega (0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant (0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant (0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant (0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant (0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant (0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon (0x2202, 0x2202, 'bf', 0x1d789), # partial differential (0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla ], }, 'tt': [ (0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9 (0x0041, 0x005a, 'rm', 0x1d670), # A-Z (0x0061, 0x007a, 'rm', 0x1d68a) # a-z ], }
gpl-3.0
nishantsbi/Data-Science-45min-Intros
choosing-k-in-kmeans/3d-example.py
25
2925
#!/usr/bin/env python # -*- coding: UTF-8 -*- __author__="Josh Montague" __license__="MIT License" """ This script is designed to run inline (%run 3d-example.py) in the corresponding IPython notebook. It generates a 3d scatter plot using scikit-learn data generation and with a number of samples and clusters determined by the variables near the top. """ import argparse import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.datasets import make_blobs import seaborn as sns from gap_stats import gap_statistics from gap_stats import plot_gap_statistics def make_example_plot(args): """ Create artificial data (blobs) and color them according to the appropriate blob center. """ # read args samples = args.samples clusters = args.clusters # create some data X, y = make_blobs(n_samples=samples, centers=clusters, n_features=3, # increase variance for illustration cluster_std=1.5, # fix random_state if you believe in determinism #random_state=42 ) # seaborn display settings sns.set(style='whitegrid', palette=sns.color_palette("Set2", clusters)) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for i in range(clusters): # for each center, add data to the figure w/ appropriate label ax.plot(X[y==i,0], X[y==i,1], X[y==i,2], 'o', alpha=0.6, label='cluster {}'.format(i) ) ax.set_title('{} labeled clusters (ground truth)'.format(clusters)) ax.legend(loc='upper left') # seaborn settings - no, really set these things this time, please sns.set(style='whitegrid', palette=sns.color_palette("Set2", clusters)) #plt.show() # potentially return the data for later use data = None if args.gap: data = (X, y) return data if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-s","--samples" , dest="samples" , type=int , default=100 ) parser.add_argument("-c","--clusters" , dest="clusters" , type=int , default=5 ) parser.add_argument("-g","--gap" , dest="gap" , type=bool , default=False ) args = parser.parse_args() data = make_example_plot(args) if args.gap: # i just really prefer the dark theme sns.set(style='darkgrid', palette='deep') # unpack X, y = data # run the gap statistic algorithm gaps, errs, difs = gap_statistics(X, ks=range(1, args.clusters+5)) # plot (intended for %matplotlib inline) plot_gap_statistics(gaps, errs, difs)
unlicense
planetarymike/IDL-Colorbars
IDL_py_test/009_GRN-WHT_EXPONENTIAL.py
1
8787
from matplotlib.colors import LinearSegmentedColormap from numpy import nan, inf cm_data = [[0.00392157, 0., 0.], [0.00392157, 0., 0.], [0.00392157, 0.00392157, 0.], [0.00784314, 0.00784314, 0.], [0.00784314, 0.00784314, 0.00392157], [0.00784314, 0.0117647, 0.00392157], [0.0117647, 0.0156863, 0.00392157], [0.0117647, 0.0156863, 0.00392157], [0.0156863, 0.0196078, 0.00784314], [0.0156863, 0.0235294, 0.00784314], [0.0156863, 0.0235294, 0.00784314], [0.0196078, 0.027451, 0.00784314], [0.0196078, 0.0313725, 0.0117647], [0.0196078, 0.0313725, 0.0117647], [0.0235294, 0.0352941, 0.0117647], [0.0235294, 0.0392157, 0.0117647], [0.027451, 0.0392157, 0.0156863], [0.027451, 0.0431373, 0.0156863], [0.027451, 0.0470588, 0.0156863], [0.0313725, 0.0470588, 0.0196078], [0.0313725, 0.0509804, 0.0196078], [0.0352941, 0.054902, 0.0196078], [0.0352941, 0.054902, 0.0196078], [0.0352941, 0.0588235, 0.0235294], [0.0392157, 0.0627451, 0.0235294], [0.0392157, 0.0627451, 0.0235294], [0.0392157, 0.0666667, 0.0235294], [0.0431373, 0.0705882, 0.027451], [0.0431373, 0.0705882, 0.027451], [0.0470588, 0.0745098, 0.027451], [0.0470588, 0.0784314, 0.027451], [0.0470588, 0.0784314, 0.0313725], [0.0509804, 0.0823529, 0.0313725], [0.0509804, 0.0862745, 0.0313725], [0.0509804, 0.0862745, 0.0313725], [0.054902, 0.0901961, 0.0352941], [0.054902, 0.0941176, 0.0352941], [0.0588235, 0.0941176, 0.0352941], [0.0588235, 0.0980392, 0.0392157], [0.0588235, 0.101961, 0.0392157], [0.0627451, 0.101961, 0.0392157], [0.0627451, 0.105882, 0.0392157], [0.0666667, 0.109804, 0.0431373], [0.0666667, 0.109804, 0.0431373], [0.0666667, 0.113725, 0.0431373], [0.0705882, 0.117647, 0.0431373], [0.0705882, 0.117647, 0.0470588], [0.0705882, 0.121569, 0.0470588], [0.0745098, 0.12549, 0.0470588], [0.0745098, 0.12549, 0.0470588], [0.0784314, 0.129412, 0.0509804], [0.0784314, 0.133333, 0.0509804], [0.0784314, 0.133333, 0.0509804], [0.0823529, 0.137255, 0.0509804], [0.0823529, 0.141176, 0.054902], [0.0823529, 0.141176, 0.054902], [0.0862745, 0.145098, 0.054902], [0.0862745, 0.14902, 0.0588235], [0.0901961, 0.14902, 0.0588235], [0.0901961, 0.152941, 0.0588235], [0.0901961, 0.156863, 0.0588235], [0.0941176, 0.156863, 0.0627451], [0.0941176, 0.160784, 0.0627451], [0.0980392, 0.164706, 0.0627451], [0.0980392, 0.164706, 0.0627451], [0.0980392, 0.168627, 0.0666667], [0.101961, 0.172549, 0.0666667], [0.101961, 0.172549, 0.0666667], [0.101961, 0.176471, 0.0666667], [0.105882, 0.180392, 0.0705882], [0.105882, 0.180392, 0.0705882], [0.109804, 0.184314, 0.0705882], [0.109804, 0.188235, 0.0705882], [0.109804, 0.188235, 0.0745098], [0.113725, 0.192157, 0.0745098], [0.113725, 0.196078, 0.0745098], [0.113725, 0.196078, 0.0784314], [0.117647, 0.2, 0.0784314], [0.117647, 0.203922, 0.0784314], [0.121569, 0.203922, 0.0784314], [0.121569, 0.207843, 0.0823529], [0.121569, 0.211765, 0.0823529], [0.12549, 0.211765, 0.0823529], [0.12549, 0.215686, 0.0823529], [0.129412, 0.219608, 0.0862745], [0.129412, 0.219608, 0.0862745], [0.129412, 0.223529, 0.0862745], [0.133333, 0.227451, 0.0862745], [0.133333, 0.227451, 0.0901961], [0.133333, 0.231373, 0.0901961], [0.137255, 0.235294, 0.0901961], [0.137255, 0.235294, 0.0901961], [0.141176, 0.239216, 0.0941176], [0.141176, 0.243137, 0.0941176], [0.141176, 0.243137, 0.0941176], [0.145098, 0.247059, 0.0980392], [0.145098, 0.25098, 0.0980392], [0.145098, 0.25098, 0.0980392], [0.14902, 0.254902, 0.0980392], [0.14902, 0.258824, 0.101961], [0.152941, 0.258824, 0.101961], [0.152941, 0.262745, 0.101961], [0.152941, 0.266667, 0.101961], [0.156863, 0.266667, 0.105882], [0.156863, 0.270588, 0.105882], [0.160784, 0.27451, 0.105882], [0.160784, 0.27451, 0.105882], [0.160784, 0.278431, 0.109804], [0.164706, 0.282353, 0.109804], [0.164706, 0.282353, 0.109804], [0.164706, 0.286275, 0.109804], [0.168627, 0.290196, 0.113725], [0.168627, 0.290196, 0.113725], [0.172549, 0.294118, 0.113725], [0.172549, 0.298039, 0.117647], [0.172549, 0.298039, 0.117647], [0.176471, 0.301961, 0.117647], [0.176471, 0.305882, 0.117647], [0.176471, 0.305882, 0.121569], [0.180392, 0.309804, 0.121569], [0.180392, 0.313725, 0.121569], [0.184314, 0.313725, 0.121569], [0.184314, 0.317647, 0.12549], [0.184314, 0.321569, 0.12549], [0.188235, 0.321569, 0.12549], [0.188235, 0.32549, 0.12549], [0.192157, 0.329412, 0.129412], [0.192157, 0.329412, 0.129412], [0.192157, 0.333333, 0.129412], [0.196078, 0.337255, 0.129412], [0.196078, 0.341176, 0.133333], [0.196078, 0.345098, 0.133333], [0.2, 0.352941, 0.133333], [0.2, 0.356863, 0.137255], [0.203922, 0.360784, 0.137255], [0.203922, 0.368627, 0.137255], [0.203922, 0.372549, 0.137255], [0.211765, 0.376471, 0.141176], [0.211765, 0.384314, 0.141176], [0.215686, 0.388235, 0.141176], [0.219608, 0.392157, 0.141176], [0.219608, 0.4, 0.145098], [0.223529, 0.403922, 0.145098], [0.227451, 0.407843, 0.145098], [0.227451, 0.415686, 0.145098], [0.231373, 0.419608, 0.14902], [0.235294, 0.423529, 0.14902], [0.235294, 0.431373, 0.14902], [0.239216, 0.435294, 0.14902], [0.243137, 0.439216, 0.152941], [0.243137, 0.447059, 0.152941], [0.247059, 0.45098, 0.152941], [0.25098, 0.454902, 0.156863], [0.25098, 0.462745, 0.156863], [0.258824, 0.466667, 0.156863], [0.266667, 0.470588, 0.156863], [0.27451, 0.478431, 0.160784], [0.278431, 0.482353, 0.160784], [0.286275, 0.486275, 0.160784], [0.294118, 0.494118, 0.160784], [0.301961, 0.498039, 0.164706], [0.309804, 0.501961, 0.164706], [0.317647, 0.509804, 0.164706], [0.32549, 0.513725, 0.164706], [0.333333, 0.517647, 0.168627], [0.337255, 0.52549, 0.168627], [0.345098, 0.529412, 0.168627], [0.352941, 0.533333, 0.168627], [0.360784, 0.541176, 0.172549], [0.368627, 0.545098, 0.172549], [0.376471, 0.54902, 0.172549], [0.384314, 0.556863, 0.176471], [0.388235, 0.560784, 0.176471], [0.396078, 0.568627, 0.176471], [0.403922, 0.572549, 0.176471], [0.411765, 0.576471, 0.180392], [0.419608, 0.584314, 0.180392], [0.427451, 0.588235, 0.180392], [0.435294, 0.592157, 0.180392], [0.443137, 0.6, 0.184314], [0.447059, 0.603922, 0.184314], [0.454902, 0.607843, 0.184314], [0.462745, 0.615686, 0.184314], [0.470588, 0.619608, 0.188235], [0.478431, 0.623529, 0.184314], [0.486275, 0.631373, 0.192157], [0.494118, 0.635294, 0.203922], [0.498039, 0.639216, 0.215686], [0.505882, 0.647059, 0.227451], [0.513725, 0.65098, 0.239216], [0.521569, 0.654902, 0.25098], [0.529412, 0.662745, 0.262745], [0.537255, 0.666667, 0.27451], [0.545098, 0.670588, 0.286275], [0.552941, 0.678431, 0.298039], [0.556863, 0.682353, 0.309804], [0.564706, 0.686275, 0.321569], [0.572549, 0.694118, 0.333333], [0.580392, 0.698039, 0.345098], [0.588235, 0.701961, 0.352941], [0.596078, 0.709804, 0.364706], [0.603922, 0.713725, 0.376471], [0.611765, 0.717647, 0.388235], [0.615686, 0.72549, 0.4], [0.623529, 0.729412, 0.411765], [0.631373, 0.733333, 0.423529], [0.639216, 0.741176, 0.435294], [0.647059, 0.745098, 0.447059], [0.654902, 0.74902, 0.458824], [0.662745, 0.756863, 0.470588], [0.666667, 0.760784, 0.482353], [0.67451, 0.764706, 0.494118], [0.682353, 0.772549, 0.505882], [0.690196, 0.776471, 0.513725], [0.698039, 0.784314, 0.52549], [0.705882, 0.788235, 0.537255], [0.713725, 0.792157, 0.54902], [0.721569, 0.8, 0.560784], [0.72549, 0.803922, 0.572549], [0.733333, 0.807843, 0.584314], [0.741176, 0.815686, 0.596078], [0.74902, 0.819608, 0.607843], [0.756863, 0.823529, 0.619608], [0.764706, 0.831373, 0.631373], [0.772549, 0.835294, 0.643137], [0.776471, 0.839216, 0.654902], [0.784314, 0.847059, 0.666667], [0.792157, 0.85098, 0.67451], [0.8, 0.854902, 0.686275], [0.807843, 0.862745, 0.698039], [0.815686, 0.866667, 0.709804], [0.823529, 0.870588, 0.721569], [0.831373, 0.878431, 0.733333], [0.835294, 0.882353, 0.745098], [0.843137, 0.886275, 0.756863], [0.85098, 0.894118, 0.768627], [0.858824, 0.898039, 0.780392], [0.866667, 0.901961, 0.792157], [0.87451, 0.909804, 0.803922], [0.882353, 0.913725, 0.815686], [0.886275, 0.917647, 0.827451], [0.894118, 0.92549, 0.835294], [0.901961, 0.929412, 0.847059], [0.909804, 0.933333, 0.858824], [0.917647, 0.941176, 0.870588], [0.92549, 0.945098, 0.882353], [0.933333, 0.94902, 0.894118], [0.941176, 0.956863, 0.905882], [0.945098, 0.960784, 0.917647], [0.952941, 0.964706, 0.929412], [0.960784, 0.972549, 0.941176], [0.968627, 0.976471, 0.952941], [0.976471, 0.980392, 0.964706], [0.984314, 0.988235, 0.976471], [0.992157, 0.992157, 0.988235], [1., 1., 1.]] test_cm = LinearSegmentedColormap.from_list(__file__, cm_data) if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np try: from pycam02ucs.cm.viscm import viscm viscm(test_cm) except ImportError: print("pycam02ucs not found, falling back on simple display") plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=test_cm) plt.show()
gpl-2.0
ryfeus/lambda-packs
Sklearn_scipy_numpy/source/sklearn/utils/testing.py
6
26970
"""Testing utilities.""" # Copyright (c) 2011, 2012 # Authors: Pietro Berkes, # Andreas Muller # Mathieu Blondel # Olivier Grisel # Arnaud Joly # Denis Engemann # Giorgio Patrini # License: BSD 3 clause import os import inspect import pkgutil import warnings import sys import re import platform import struct import scipy as sp import scipy.io from functools import wraps try: # Python 2 from urllib2 import urlopen from urllib2 import HTTPError except ImportError: # Python 3+ from urllib.request import urlopen from urllib.error import HTTPError import tempfile import shutil import os.path as op import atexit # WindowsError only exist on Windows try: WindowsError except NameError: WindowsError = None import sklearn from sklearn.base import BaseEstimator from sklearn.externals import joblib # Conveniently import all assertions in one place. from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_true from nose.tools import assert_false from nose.tools import assert_raises from nose.tools import raises from nose import SkipTest from nose import with_setup from numpy.testing import assert_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_less from numpy.testing import assert_approx_equal import numpy as np from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin) from sklearn.cluster import DBSCAN __all__ = ["assert_equal", "assert_not_equal", "assert_raises", "assert_raises_regexp", "raises", "with_setup", "assert_true", "assert_false", "assert_almost_equal", "assert_array_equal", "assert_array_almost_equal", "assert_array_less", "assert_less", "assert_less_equal", "assert_greater", "assert_greater_equal", "assert_approx_equal"] try: from nose.tools import assert_in, assert_not_in except ImportError: # Nose < 1.0.0 def assert_in(x, container): assert_true(x in container, msg="%r in %r" % (x, container)) def assert_not_in(x, container): assert_false(x in container, msg="%r in %r" % (x, container)) try: from nose.tools import assert_raises_regex except ImportError: # for Python 2 def assert_raises_regex(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs): """Helper function to check for message patterns in exceptions""" not_raised = False try: callable_obj(*args, **kwargs) not_raised = True except expected_exception as e: error_message = str(e) if not re.compile(expected_regexp).search(error_message): raise AssertionError("Error message should match pattern " "%r. %r does not." % (expected_regexp, error_message)) if not_raised: raise AssertionError("%s not raised by %s" % (expected_exception.__name__, callable_obj.__name__)) # assert_raises_regexp is deprecated in Python 3.4 in favor of # assert_raises_regex but lets keep the bacward compat in scikit-learn with # the old name for now assert_raises_regexp = assert_raises_regex def _assert_less(a, b, msg=None): message = "%r is not lower than %r" % (a, b) if msg is not None: message += ": " + msg assert a < b, message def _assert_greater(a, b, msg=None): message = "%r is not greater than %r" % (a, b) if msg is not None: message += ": " + msg assert a > b, message def assert_less_equal(a, b, msg=None): message = "%r is not lower than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a <= b, message def assert_greater_equal(a, b, msg=None): message = "%r is not greater than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a >= b, message def assert_warns(warning_class, func, *args, **kw): """Test that a certain warning occurs. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func` Returns ------- result : the return value of `func` """ # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Trigger a warning. result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = any(warning.category is warning_class for warning in w) if not found: raise AssertionError("%s did not give warning: %s( is %s)" % (func.__name__, warning_class, w)) return result def assert_warns_message(warning_class, message, func, *args, **kw): # very important to avoid uncontrolled state propagation """Test that a certain warning occurs and with a certain message. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. message : str | callable The entire message or a substring to test for. If callable, it takes a string as argument and will trigger an assertion error if it returns `False`. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func`. Returns ------- result : the return value of `func` """ clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") if hasattr(np, 'VisibleDeprecationWarning'): # Let's not catch the numpy internal DeprecationWarnings warnings.simplefilter('ignore', np.VisibleDeprecationWarning) # Trigger a warning. result = func(*args, **kw) # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = [issubclass(warning.category, warning_class) for warning in w] if not any(found): raise AssertionError("No warning raised for %s with class " "%s" % (func.__name__, warning_class)) message_found = False # Checks the message of all warnings belong to warning_class for index in [i for i, x in enumerate(found) if x]: # substring will match, the entire message with typo won't msg = w[index].message # For Python 3 compatibility msg = str(msg.args[0] if hasattr(msg, 'args') else msg) if callable(message): # add support for certain tests check_in_message = message else: check_in_message = lambda msg: message in msg if check_in_message(msg): message_found = True break if not message_found: raise AssertionError("Did not receive the message you expected " "('%s') for <%s>, got: '%s'" % (message, func.__name__, msg)) return result # To remove when we support numpy 1.7 def assert_no_warnings(func, *args, **kw): # XXX: once we may depend on python >= 2.6, this can be replaced by the # warnings module context manager. # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] if len(w) > 0: raise AssertionError("Got warnings when calling %s: %s" % (func.__name__, w)) return result def ignore_warnings(obj=None): """ Context manager and decorator to ignore warnings Note. Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging this is not your tool of choice. Examples -------- >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42 """ if callable(obj): return _ignore_warnings(obj) else: return _IgnoreWarnings() def _ignore_warnings(fn): """Decorator to catch and hide warnings without visual nesting""" @wraps(fn) def wrapper(*args, **kwargs): # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') return fn(*args, **kwargs) w[:] = [] return wrapper class _IgnoreWarnings(object): """Improved and simplified Python warnings context manager Copied from Python 2.7.5 and modified as required. """ def __init__(self): """ Parameters ========== category : warning class The category to filter. Defaults to Warning. If None, all categories will be muted. """ self._record = True self._module = sys.modules['warnings'] self._entered = False self.log = [] def __repr__(self): args = [] if self._record: args.append("record=True") if self._module is not sys.modules['warnings']: args.append("module=%r" % self._module) name = type(self).__name__ return "%s(%s)" % (name, ", ".join(args)) def __enter__(self): clean_warning_registry() # be safe and not propagate state + chaos warnings.simplefilter('always') if self._entered: raise RuntimeError("Cannot enter %r twice" % self) self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] self._showwarning = self._module.showwarning if self._record: self.log = [] def showwarning(*args, **kwargs): self.log.append(warnings.WarningMessage(*args, **kwargs)) self._module.showwarning = showwarning return self.log else: return None def __exit__(self, *exc_info): if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters self._module.showwarning = self._showwarning self.log[:] = [] clean_warning_registry() # be safe and not propagate state + chaos try: from nose.tools import assert_less except ImportError: assert_less = _assert_less try: from nose.tools import assert_greater except ImportError: assert_greater = _assert_greater def _assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', verbose=True): actual, desired = np.asanyarray(actual), np.asanyarray(desired) if np.allclose(actual, desired, rtol=rtol, atol=atol): return msg = ('Array not equal to tolerance rtol=%g, atol=%g: ' 'actual %s, desired %s') % (rtol, atol, actual, desired) raise AssertionError(msg) if hasattr(np.testing, 'assert_allclose'): assert_allclose = np.testing.assert_allclose else: assert_allclose = _assert_allclose def assert_raise_message(exceptions, message, function, *args, **kwargs): """Helper function to test error messages in exceptions Parameters ---------- exceptions : exception or tuple of exception Name of the estimator func : callable Calable object to raise error *args : the positional arguments to `func`. **kw : the keyword arguments to `func` """ try: function(*args, **kwargs) except exceptions as e: error_message = str(e) if message not in error_message: raise AssertionError("Error message does not include the expected" " string: %r. Observed error message: %r" % (message, error_message)) else: # concatenate exception names if isinstance(exceptions, tuple): names = " or ".join(e.__name__ for e in exceptions) else: names = exceptions.__name__ raise AssertionError("%s not raised by %s" % (names, function.__name__)) def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column') class mock_mldata_urlopen(object): def __init__(self, mock_datasets): """Object that mocks the urlopen function to fake requests to mldata. `mock_datasets` is a dictionary of {dataset_name: data_dict}, or {dataset_name: (data_dict, ordering). `data_dict` itself is a dictionary of {column_name: data_array}, and `ordering` is a list of column_names to determine the ordering in the data set (see `fake_mldata` for details). When requesting a dataset with a name that is in mock_datasets, this object creates a fake dataset in a StringIO object and returns it. Otherwise, it raises an HTTPError. """ self.mock_datasets = mock_datasets def __call__(self, urlname): dataset_name = urlname.split('/')[-1] if dataset_name in self.mock_datasets: resource_name = '_' + dataset_name from io import BytesIO matfile = BytesIO() dataset = self.mock_datasets[dataset_name] ordering = None if isinstance(dataset, tuple): dataset, ordering = dataset fake_mldata(dataset, resource_name, matfile, ordering) matfile.seek(0) return matfile else: raise HTTPError(urlname, 404, dataset_name + " is not available", [], None) def install_mldata_mock(mock_datasets): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets) def uninstall_mldata_mock(): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = urlopen # Meta estimators need another estimator to be instantiated. META_ESTIMATORS = ["OneVsOneClassifier", "OutputCodeClassifier", "OneVsRestClassifier", "RFE", "RFECV", "BaseEnsemble"] # estimators that there is no way to default-construct sensibly OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV", "SelectFromModel"] # some trange ones DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'TfidfTransformer', 'TfidfVectorizer', 'IsotonicRegression', 'OneHotEncoder', 'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier', 'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures', 'GaussianRandomProjectionHash', 'HashingVectorizer', 'CheckingClassifier', 'PatchExtractor', 'CountVectorizer', # GradientBoosting base estimators, maybe should # exclude them in another way 'ZeroEstimator', 'ScaledLogOddsEstimator', 'QuantileEstimator', 'MeanEstimator', 'LogOddsEstimator', 'PriorProbabilityEstimator', '_SigmoidCalibration', 'VotingClassifier'] def all_estimators(include_meta_estimators=False, include_other=False, type_filter=None, include_dont_test=False): """Get a list of all estimators from sklearn. This function crawls the module and gets all classes that inherit from BaseEstimator. Classes that are defined in test-modules are not included. By default meta_estimators such as GridSearchCV are also not included. Parameters ---------- include_meta_estimators : boolean, default=False Whether to include meta-estimators that can be constructed using an estimator as their first argument. These are currently BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier, OneVsRestClassifier, RFE, RFECV. include_other : boolean, default=False Wether to include meta-estimators that are somehow special and can not be default-constructed sensibly. These are currently Pipeline, FeatureUnion and GridSearchCV include_dont_test : boolean, default=False Whether to include "special" label estimator or test processors. type_filter : string, list of string, or None, default=None Which kind of estimators should be returned. If None, no filter is applied and all estimators are returned. Possible values are 'classifier', 'regressor', 'cluster' and 'transformer' to get estimators only of these specific types, or a list of these to get the estimators that fit at least one of the types. Returns ------- estimators : list of tuples List of (name, class), where ``name`` is the class name as string and ``class`` is the actuall type of the class. """ def is_abstract(c): if not(hasattr(c, '__abstractmethods__')): return False if not len(c.__abstractmethods__): return False return True all_classes = [] # get parent folder path = sklearn.__path__ for importer, modname, ispkg in pkgutil.walk_packages( path=path, prefix='sklearn.', onerror=lambda x: None): if ".tests." in modname: continue module = __import__(modname, fromlist="dummy") classes = inspect.getmembers(module, inspect.isclass) all_classes.extend(classes) all_classes = set(all_classes) estimators = [c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')] # get rid of abstract base classes estimators = [c for c in estimators if not is_abstract(c[1])] if not include_dont_test: estimators = [c for c in estimators if not c[0] in DONT_TEST] if not include_other: estimators = [c for c in estimators if not c[0] in OTHER] # possibly get rid of meta estimators if not include_meta_estimators: estimators = [c for c in estimators if not c[0] in META_ESTIMATORS] if type_filter is not None: if not isinstance(type_filter, list): type_filter = [type_filter] else: type_filter = list(type_filter) # copy filtered_estimators = [] filters = {'classifier': ClassifierMixin, 'regressor': RegressorMixin, 'transformer': TransformerMixin, 'cluster': ClusterMixin} for name, mixin in filters.items(): if name in type_filter: type_filter.remove(name) filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)]) estimators = filtered_estimators if type_filter: raise ValueError("Parameter type_filter must be 'classifier', " "'regressor', 'transformer', 'cluster' or None, got" " %s." % repr(type_filter)) # drop duplicates, sort for reproducibility return sorted(set(estimators)) def set_random_state(estimator, random_state=0): """Set random state of an estimator if it has the `random_state` param. Classes for whom random_state is deprecated are ignored. Currently DBSCAN is one such class. """ if isinstance(estimator, DBSCAN): return if "random_state" in estimator.get_params(): estimator.set_params(random_state=random_state) def if_matplotlib(func): """Test decorator that skips test if matplotlib not installed. """ @wraps(func) def run_test(*args, **kwargs): try: import matplotlib matplotlib.use('Agg', warn=False) # this fails if no $DISPLAY specified import matplotlib.pyplot as plt plt.figure() except ImportError: raise SkipTest('Matplotlib not available.') else: return func(*args, **kwargs) return run_test def skip_if_32bit(func): """Test decorator that skips tests on 32bit platforms.""" @wraps(func) def run_test(*args, **kwargs): bits = 8 * struct.calcsize("P") if bits == 32: raise SkipTest('Test skipped on 32bit platforms.') else: return func(*args, **kwargs) return run_test def if_not_mac_os(versions=('10.7', '10.8', '10.9'), message='Multi-process bug in Mac OS X >= 10.7 ' '(see issue #636)'): """Test decorator that skips test if OS is Mac OS X and its major version is one of ``versions``. """ warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed" " in 0.19: use the safer and more generic" " if_safe_multiprocessing_with_blas instead", DeprecationWarning) mac_version, _, _ = platform.mac_ver() skip = '.'.join(mac_version.split('.')[:2]) in versions def decorator(func): if skip: @wraps(func) def func(*args, **kwargs): raise SkipTest(message) return func return decorator def if_safe_multiprocessing_with_blas(func): """Decorator for tests involving both BLAS calls and multiprocessing Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with some implementation of BLAS (or other libraries that manage an internal posix thread pool) can cause a crash or a freeze of the Python process. In practice all known packaged distributions (from Linux distros or Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to only impact OSX users. This wrapper makes it possible to skip tests that can possibly cause this crash under OS X with. Under Python 3.4+ it is possible to use the `forkserver` start method for multiprocessing to avoid this issue. However it can cause pickling errors on interactively defined functions. It therefore not enabled by default. """ @wraps(func) def run_test(*args, **kwargs): if sys.platform == 'darwin': raise SkipTest( "Possible multi-process bug with some BLAS") return func(*args, **kwargs) return run_test def clean_warning_registry(): """Safe way to reset warnings """ warnings.resetwarnings() reg = "__warningregistry__" for mod_name, mod in list(sys.modules.items()): if 'six.moves' in mod_name: continue if hasattr(mod, reg): getattr(mod, reg).clear() def check_skip_network(): if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)): raise SkipTest("Text tutorial requires large dataset download") def check_skip_travis(): """Skip test if being run on Travis.""" if os.environ.get('TRAVIS') == "true": raise SkipTest("This test needs to be skipped on Travis") def _delete_folder(folder_path, warn=False): """Utility function to cleanup a temporary folder if still existing. Copy from joblib.pool (for independance)""" try: if os.path.exists(folder_path): # This can fail under windows, # but will succeed when called by atexit shutil.rmtree(folder_path) except WindowsError: if warn: warnings.warn("Could not delete temporary folder %s" % folder_path) class TempMemmap(object): def __init__(self, data, mmap_mode='r'): self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_') self.mmap_mode = mmap_mode self.data = data def __enter__(self): fpath = op.join(self.temp_folder, 'data.pkl') joblib.dump(self.data, fpath) data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode) atexit.register(lambda: _delete_folder(self.temp_folder, warn=True)) return data_read_only def __exit__(self, exc_type, exc_val, exc_tb): _delete_folder(self.temp_folder) with_network = with_setup(check_skip_network) with_travis = with_setup(check_skip_travis)
mit
huzq/scikit-learn
examples/neighbors/plot_digits_kde_sampling.py
50
2007
""" ========================= Kernel Density Estimation ========================= This example shows how kernel density estimation (KDE), a powerful non-parametric density estimation technique, can be used to learn a generative model for a dataset. With this generative model in place, new samples can be drawn. These new samples reflect the underlying model of the data. """ import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_digits from sklearn.neighbors import KernelDensity from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV # load the data digits = load_digits() # project the 64-dimensional data to a lower dimension pca = PCA(n_components=15, whiten=False) data = pca.fit_transform(digits.data) # use grid search cross-validation to optimize the bandwidth params = {'bandwidth': np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(data) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ # sample 44 new points from the data new_data = kde.sample(44, random_state=0) new_data = pca.inverse_transform(new_data) # turn data into a 4x11 grid new_data = new_data.reshape((4, 11, -1)) real_data = digits.data[:44].reshape((4, 11, -1)) # plot real digits and resampled digits fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[])) for j in range(11): ax[4, j].set_visible(False) for i in range(4): im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) ax[0, 5].set_title('Selection from the input data') ax[5, 5].set_title('"New" digits drawn from the kernel density model') plt.show()
bsd-3-clause
ArtsiomCh/tensorflow
tensorflow/examples/learn/iris_custom_model.py
37
3651
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of Estimator for Iris plant dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from sklearn import datasets from sklearn import metrics from sklearn import model_selection import tensorflow as tf X_FEATURE = 'x' # Name of the input feature. def my_model(features, labels, mode): """DNN with three hidden layers, and dropout of 0.1 probability.""" # Create three fully connected layers respectively of size 10, 20, and 10 with # each layer having a dropout probability of 0.1. net = features[X_FEATURE] for units in [10, 20, 10]: net = tf.layers.dense(net, units=units, activation=tf.nn.relu) net = tf.layers.dropout(net, rate=0.1) # Compute logits (1 per class). logits = tf.layers.dense(net, 3, activation=None) # Compute predictions. predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class': predicted_classes, 'prob': tf.nn.softmax(logits) } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Convert the labels to a one-hot tensor of shape (length of features, 3) and # with a on-value of 1 for each one-hot vector of length 3. onehot_labels = tf.one_hot(labels, 3, 1, 0) # Compute loss. loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=logits) # Create training op. if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) # Compute evaluation metrics. eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predicted_classes) } return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=eval_metric_ops) def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) classifier = tf.estimator.Estimator(model_fn=my_model) # Train. train_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True) classifier.train(input_fn=train_input_fn, steps=1000) # Predict. test_input_fn = tf.estimator.inputs.numpy_input_fn( x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) predictions = classifier.predict(input_fn=test_input_fn) y_predicted = np.array(list(p['class'] for p in predictions)) y_predicted = y_predicted.reshape(np.array(y_test).shape) # Score with sklearn. score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy (sklearn): {0:f}'.format(score)) # Score with tensorflow. scores = classifier.evaluate(input_fn=test_input_fn) print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) if __name__ == '__main__': tf.app.run()
apache-2.0
jfsehuanes/aktivitarium
main.py
1
1331
__author__ = 'juan' from auxiliary import * import numpy as np import pandas as pd from IPython import embed def fill_curr_course(current_course, persons_in_course, persons_data_frame): """ Function returns the next matching person. :param current_course: :param persons_in_course: :param persons_data_frame: """ room_persons = persons_in_course embed() quit() pass if __name__ == '__main__': activities_df = pd.read_csv('./lista_de_actividades.csv') days = len(activities_df) persons_df = pd.read_csv('./lista_de_prueba.csv') tmp_dict = {'day': '', 'person': '', 'course': '', 'room': '', 'gender': ''} entries_df = pd.DataFrame(tmp_dict) for curr_day in np.arange(0, days): for curr_course in np.array(activities_df.index): max_persons = activities_df.places[curr_course] embed() quit() persons_in_course = entries_df[entries_df.course == activities_df.activities[curr_course]] if len(persons_in_course) < max_persons: fill_curr_course(curr_course, persons_in_course, persons_df) else: # Course is full, need to continue print 'Course is full, continuing with next person...' continue # embed() # quit()
gpl-2.0
jzt5132/scikit-learn
benchmarks/bench_lasso.py
297
3305
""" Benchmarks of Lasso vs LassoLars First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import gc from time import time import numpy as np from sklearn.datasets.samples_generator import make_regression def compute_bench(alpha, n_samples, n_features, precompute): lasso_results = [] lars_lasso_results = [] it = 0 for ns in n_samples: for nf in n_features: it += 1 print('==================') print('Iteration %s of %s' % (it, max(len(n_samples), len(n_features)))) print('==================') n_informative = nf // 10 X, Y, coef_ = make_regression(n_samples=ns, n_features=nf, n_informative=n_informative, noise=0.1, coef=True) X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data gc.collect() print("- benchmarking Lasso") clf = Lasso(alpha=alpha, fit_intercept=False, precompute=precompute) tstart = time() clf.fit(X, Y) lasso_results.append(time() - tstart) gc.collect() print("- benchmarking LassoLars") clf = LassoLars(alpha=alpha, fit_intercept=False, normalize=False, precompute=precompute) tstart = time() clf.fit(X, Y) lars_lasso_results.append(time() - tstart) return lasso_results, lars_lasso_results if __name__ == '__main__': from sklearn.linear_model import Lasso, LassoLars import pylab as pl alpha = 0.01 # regularization parameter n_features = 10 list_n_samples = np.linspace(100, 1000000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples, [n_features], precompute=True) pl.figure('scikit-learn LASSO benchmark results') pl.subplot(211) pl.plot(list_n_samples, lasso_results, 'b-', label='Lasso') pl.plot(list_n_samples, lars_lasso_results, 'r-', label='LassoLars') pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha)) pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') pl.axis('tight') n_samples = 2000 list_n_features = np.linspace(500, 3000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples], list_n_features, precompute=False) pl.subplot(212) pl.plot(list_n_features, lasso_results, 'b-', label='Lasso') pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars') pl.title('%d samples, alpha=%s' % (n_samples, alpha)) pl.legend(loc='upper left') pl.xlabel('number of features') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
wzbozon/scikit-learn
sklearn/datasets/twenty_newsgroups.py
126
13591
"""Caching loader for the 20 newsgroups text classification dataset The description of the dataset is available on the official website at: http://people.csail.mit.edu/jrennie/20Newsgroups/ Quoting the introduction: The 20 Newsgroups data set is a collection of approximately 20,000 newsgroup documents, partitioned (nearly) evenly across 20 different newsgroups. To the best of my knowledge, it was originally collected by Ken Lang, probably for his Newsweeder: Learning to filter netnews paper, though he does not explicitly mention this collection. The 20 newsgroups collection has become a popular data set for experiments in text applications of machine learning techniques, such as text classification and text clustering. This dataset loader will download the recommended "by date" variant of the dataset and which features a point in time split between the train and test sets. The compressed dataset size is around 14 Mb compressed. Once uncompressed the train set is 52 MB and the test set is 34 MB. The data is downloaded, extracted and cached in the '~/scikit_learn_data' folder. The `fetch_20newsgroups` function will not vectorize the data into numpy arrays but the dataset lists the filenames of the posts and their categories as target labels. The `fetch_20newsgroups_vectorized` function will in addition do a simple tf-idf vectorization step. """ # Copyright (c) 2011 Olivier Grisel <[email protected]> # License: BSD 3 clause import os import logging import tarfile import pickle import shutil import re import codecs import numpy as np import scipy.sparse as sp from .base import get_data_home from .base import Bunch from .base import load_files from ..utils import check_random_state from ..feature_extraction.text import CountVectorizer from ..preprocessing import normalize from ..externals import joblib, six if six.PY3: from urllib.request import urlopen else: from urllib2 import urlopen logger = logging.getLogger(__name__) URL = ("http://people.csail.mit.edu/jrennie/" "20Newsgroups/20news-bydate.tar.gz") ARCHIVE_NAME = "20news-bydate.tar.gz" CACHE_NAME = "20news-bydate.pkz" TRAIN_FOLDER = "20news-bydate-train" TEST_FOLDER = "20news-bydate-test" def download_20newsgroups(target_dir, cache_path): """Download the 20 newsgroups data and stored it as a zipped pickle.""" archive_path = os.path.join(target_dir, ARCHIVE_NAME) train_path = os.path.join(target_dir, TRAIN_FOLDER) test_path = os.path.join(target_dir, TEST_FOLDER) if not os.path.exists(target_dir): os.makedirs(target_dir) if os.path.exists(archive_path): # Download is not complete as the .tar.gz file is removed after # download. logger.warning("Download was incomplete, downloading again.") os.remove(archive_path) logger.warning("Downloading dataset from %s (14 MB)", URL) opener = urlopen(URL) with open(archive_path, 'wb') as f: f.write(opener.read()) logger.info("Decompressing %s", archive_path) tarfile.open(archive_path, "r:gz").extractall(path=target_dir) os.remove(archive_path) # Store a zipped pickle cache = dict(train=load_files(train_path, encoding='latin1'), test=load_files(test_path, encoding='latin1')) compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec') with open(cache_path, 'wb') as f: f.write(compressed_content) shutil.rmtree(target_dir) return cache def strip_newsgroup_header(text): """ Given text in "news" format, strip the headers, by removing everything before the first blank line. """ _before, _blankline, after = text.partition('\n\n') return after _QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:' r'|^In article|^Quoted from|^\||^>)') def strip_newsgroup_quoting(text): """ Given text in "news" format, strip lines beginning with the quote characters > or |, plus lines that often introduce a quoted section (for example, because they contain the string 'writes:'.) """ good_lines = [line for line in text.split('\n') if not _QUOTE_RE.search(line)] return '\n'.join(good_lines) def strip_newsgroup_footer(text): """ Given text in "news" format, attempt to remove a signature block. As a rough heuristic, we assume that signatures are set apart by either a blank line or a line made of hyphens, and that it is the last such line in the file (disregarding blank lines at the end). """ lines = text.strip().split('\n') for line_num in range(len(lines) - 1, -1, -1): line = lines[line_num] if line.strip().strip('-') == '': break if line_num > 0: return '\n'.join(lines[:line_num]) else: return text def fetch_20newsgroups(data_home=None, subset='train', categories=None, shuffle=True, random_state=42, remove=(), download_if_missing=True): """Load the filenames and data from the 20 newsgroups dataset. Read more in the :ref:`User Guide <20newsgroups>`. Parameters ---------- subset: 'train' or 'test', 'all', optional Select the dataset to load: 'train' for the training set, 'test' for the test set, 'all' for both, with shuffled ordering. data_home: optional, default: None Specify a download and cache folder for the datasets. If None, all scikit-learn data is stored in '~/scikit_learn_data' subfolders. categories: None or collection of string or unicode If None (default), load all the categories. If not None, list of category names to load (other categories ignored). shuffle: bool, optional Whether or not to shuffle the data: might be important for models that make the assumption that the samples are independent and identically distributed (i.i.d.), such as stochastic gradient descent. random_state: numpy random number generator or seed integer Used to shuffle the dataset. download_if_missing: optional, True by default If False, raise an IOError if the data is not locally available instead of trying to download the data from the source site. remove: tuple May contain any subset of ('headers', 'footers', 'quotes'). Each of these are kinds of text that will be detected and removed from the newsgroup posts, preventing classifiers from overfitting on metadata. 'headers' removes newsgroup headers, 'footers' removes blocks at the ends of posts that look like signatures, and 'quotes' removes lines that appear to be quoting another post. 'headers' follows an exact standard; the other filters are not always correct. """ data_home = get_data_home(data_home=data_home) cache_path = os.path.join(data_home, CACHE_NAME) twenty_home = os.path.join(data_home, "20news_home") cache = None if os.path.exists(cache_path): try: with open(cache_path, 'rb') as f: compressed_content = f.read() uncompressed_content = codecs.decode( compressed_content, 'zlib_codec') cache = pickle.loads(uncompressed_content) except Exception as e: print(80 * '_') print('Cache loading failed') print(80 * '_') print(e) if cache is None: if download_if_missing: cache = download_20newsgroups(target_dir=twenty_home, cache_path=cache_path) else: raise IOError('20Newsgroups dataset not found') if subset in ('train', 'test'): data = cache[subset] elif subset == 'all': data_lst = list() target = list() filenames = list() for subset in ('train', 'test'): data = cache[subset] data_lst.extend(data.data) target.extend(data.target) filenames.extend(data.filenames) data.data = data_lst data.target = np.array(target) data.filenames = np.array(filenames) else: raise ValueError( "subset can only be 'train', 'test' or 'all', got '%s'" % subset) data.description = 'the 20 newsgroups by date dataset' if 'headers' in remove: data.data = [strip_newsgroup_header(text) for text in data.data] if 'footers' in remove: data.data = [strip_newsgroup_footer(text) for text in data.data] if 'quotes' in remove: data.data = [strip_newsgroup_quoting(text) for text in data.data] if categories is not None: labels = [(data.target_names.index(cat), cat) for cat in categories] # Sort the categories to have the ordering of the labels labels.sort() labels, categories = zip(*labels) mask = np.in1d(data.target, labels) data.filenames = data.filenames[mask] data.target = data.target[mask] # searchsorted to have continuous labels data.target = np.searchsorted(labels, data.target) data.target_names = list(categories) # Use an object array to shuffle: avoids memory copy data_lst = np.array(data.data, dtype=object) data_lst = data_lst[mask] data.data = data_lst.tolist() if shuffle: random_state = check_random_state(random_state) indices = np.arange(data.target.shape[0]) random_state.shuffle(indices) data.filenames = data.filenames[indices] data.target = data.target[indices] # Use an object array to shuffle: avoids memory copy data_lst = np.array(data.data, dtype=object) data_lst = data_lst[indices] data.data = data_lst.tolist() return data def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None): """Load the 20 newsgroups dataset and transform it into tf-idf vectors. This is a convenience function; the tf-idf transformation is done using the default settings for `sklearn.feature_extraction.text.Vectorizer`. For more advanced usage (stopword filtering, n-gram extraction, etc.), combine fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`. Read more in the :ref:`User Guide <20newsgroups>`. Parameters ---------- subset: 'train' or 'test', 'all', optional Select the dataset to load: 'train' for the training set, 'test' for the test set, 'all' for both, with shuffled ordering. data_home: optional, default: None Specify an download and cache folder for the datasets. If None, all scikit-learn data is stored in '~/scikit_learn_data' subfolders. remove: tuple May contain any subset of ('headers', 'footers', 'quotes'). Each of these are kinds of text that will be detected and removed from the newsgroup posts, preventing classifiers from overfitting on metadata. 'headers' removes newsgroup headers, 'footers' removes blocks at the ends of posts that look like signatures, and 'quotes' removes lines that appear to be quoting another post. Returns ------- bunch : Bunch object bunch.data: sparse matrix, shape [n_samples, n_features] bunch.target: array, shape [n_samples] bunch.target_names: list, length [n_classes] """ data_home = get_data_home(data_home=data_home) filebase = '20newsgroup_vectorized' if remove: filebase += 'remove-' + ('-'.join(remove)) target_file = os.path.join(data_home, filebase + ".pk") # we shuffle but use a fixed seed for the memoization data_train = fetch_20newsgroups(data_home=data_home, subset='train', categories=None, shuffle=True, random_state=12, remove=remove) data_test = fetch_20newsgroups(data_home=data_home, subset='test', categories=None, shuffle=True, random_state=12, remove=remove) if os.path.exists(target_file): X_train, X_test = joblib.load(target_file) else: vectorizer = CountVectorizer(dtype=np.int16) X_train = vectorizer.fit_transform(data_train.data).tocsr() X_test = vectorizer.transform(data_test.data).tocsr() joblib.dump((X_train, X_test), target_file, compress=9) # the data is stored as int16 for compactness # but normalize needs floats X_train = X_train.astype(np.float64) X_test = X_test.astype(np.float64) normalize(X_train, copy=False) normalize(X_test, copy=False) target_names = data_train.target_names if subset == "train": data = X_train target = data_train.target elif subset == "test": data = X_test target = data_test.target elif subset == "all": data = sp.vstack((X_train, X_test)).tocsr() target = np.concatenate((data_train.target, data_test.target)) else: raise ValueError("%r is not a valid subset: should be one of " "['train', 'test', 'all']" % subset) return Bunch(data=data, target=target, target_names=target_names)
bsd-3-clause
puruckertom/ubertool
ubertool/iec/iec_exe.py
1
2127
from __future__ import division import numpy as np import pandas as pd from base.uber_model import UberModel, ModelSharedInputs from .iec_functions import IecFunctions class IecInputs(ModelSharedInputs): """ Input class for IEC. """ def __init__(self): """Class representing the inputs for IEC""" super(IecInputs, self).__init__() self.dose_response = pd.Series([], dtype="float") self.lc50 = pd.Series([], dtype="float") self.threshold = pd.Series([], dtype="float") class IecOutputs(object): """ Output class for IEC. """ def __init__(self): """Class representing the outputs for IEC""" super(IecOutputs, self).__init__() self.out_z_score_f = pd.Series([], dtype="float", name="out_z_score_f") self.out_f8_f = pd.Series([], dtype="float", name="out_f8_f") self.out_chance_f = pd.Series([], dtype="float", name="out_chance_f") class Iec(UberModel, IecInputs, IecOutputs, IecFunctions): """ IEC model for proportional population effect based on normal distribution. """ def __init__(self, pd_obj, pd_obj_exp): """Class representing the IEC model and containing all its methods""" super(Iec, self).__init__() self.pd_obj = pd_obj self.pd_obj_exp = pd_obj_exp self.pd_obj_out = None def execute_model(self): """ Callable to execute the running of the model: 1) Populate input parameters 2) Create output DataFrame to hold the model outputs 3) Run the model's methods to generate outputs 4) Fill the output DataFrame with the generated model outputs """ self.populate_inputs(self.pd_obj) self.pd_obj_out = self.populate_outputs() self.run_methods() self.fill_output_dataframe() def run_methods(self): """ Execute all algorithm methods for model logic. :return: """ try: self.z_score_f() self.f8_f() self.chance_f() except Exception as e: pass
unlicense
ronalcc/zipline
zipline/finance/performance/position_tracker.py
4
13048
from __future__ import division import logbook import numpy as np import pandas as pd from pandas.lib import checknull try: # optional cython based OrderedDict from cyordereddict import OrderedDict except ImportError: from collections import OrderedDict from six import iteritems, itervalues from zipline.finance.slippage import Transaction from zipline.utils.serialization_utils import ( VERSION_LABEL ) import zipline.protocol as zp from zipline.assets import ( Equity, Future ) from zipline.finance.trading import with_environment from . position import positiondict log = logbook.Logger('Performance') class PositionTracker(object): def __init__(self): # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_amounts = OrderedDict() self._position_last_sale_prices = OrderedDict() self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._position_payout_multipliers = OrderedDict() self._unpaid_dividends = pd.DataFrame( columns=zp.DIVIDEND_PAYMENT_FIELDS, ) self._positions_store = zp.Positions() @with_environment() def _retrieve_asset(self, sid, env=None): return env.asset_finder.retrieve_asset(sid) def _update_multipliers(self, sid): try: self._position_value_multipliers[sid] self._position_exposure_multipliers[sid] self._position_payout_multipliers[sid] except KeyError: # Collect the value multipliers from applicable sids asset = self._retrieve_asset(sid) if isinstance(asset, Equity): self._position_value_multipliers[sid] = 1 self._position_exposure_multipliers[sid] = 1 self._position_payout_multipliers[sid] = 0 if isinstance(asset, Future): self._position_value_multipliers[sid] = 0 self._position_exposure_multipliers[sid] = \ asset.contract_multiplier self._position_payout_multipliers[sid] = \ asset.contract_multiplier def update_last_sale(self, event): # NOTE, PerformanceTracker already vetted as TRADE type sid = event.sid if sid not in self.positions: return 0 price = event.price if checknull(price): return 0 pos = self.positions[sid] old_price = pos.last_sale_price pos.last_sale_date = event.dt pos.last_sale_price = price self._position_last_sale_prices[sid] = price # Calculate cash adjustment on assets with multipliers return ((price - old_price) * self._position_payout_multipliers[sid] * pos.amount) def update_positions(self, positions): # update positions in batch self.positions.update(positions) for sid, pos in iteritems(positions): self._position_amounts[sid] = pos.amount self._position_last_sale_prices[sid] = pos.last_sale_price self._update_multipliers(sid) def update_position(self, sid, amount=None, last_sale_price=None, last_sale_date=None, cost_basis=None): pos = self.positions[sid] if amount is not None: pos.amount = amount self._position_amounts[sid] = amount self._position_values = None # invalidate cache self._update_multipliers(sid=sid) if last_sale_price is not None: pos.last_sale_price = last_sale_price self._position_last_sale_prices[sid] = last_sale_price self._position_values = None # invalidate cache if last_sale_date is not None: pos.last_sale_date = last_sale_date if cost_basis is not None: pos.cost_basis = cost_basis def execute_transaction(self, txn): # Update Position # ---------------- sid = txn.sid position = self.positions[sid] position.update(txn) self._position_amounts[sid] = position.amount self._position_last_sale_prices[sid] = position.last_sale_price self._update_multipliers(sid) def handle_commission(self, commission): # Adjust the cost basis of the stock if we own it if commission.sid in self.positions: self.positions[commission.sid].\ adjust_commission_cost_basis(commission) @property def position_values(self): iter_amount_price_multiplier = zip( itervalues(self._position_amounts), itervalues(self._position_last_sale_prices), itervalues(self._position_value_multipliers), ) return [ price * amount * multiplier for price, amount, multiplier in iter_amount_price_multiplier ] @property def position_exposures(self): iter_amount_price_multiplier = zip( itervalues(self._position_amounts), itervalues(self._position_last_sale_prices), itervalues(self._position_exposure_multipliers), ) return [ price * amount * multiplier for price, amount, multiplier in iter_amount_price_multiplier ] def calculate_positions_value(self): if len(self.position_values) == 0: return np.float64(0) return sum(self.position_values) def calculate_positions_exposure(self): if len(self.position_exposures) == 0: return np.float64(0) return sum(self.position_exposures) def _longs_count(self): return sum(1 for i in self.position_exposures if i > 0) def _long_exposure(self): return sum(i for i in self.position_exposures if i > 0) def _long_value(self): return sum(i for i in self.position_values if i > 0) def _shorts_count(self): return sum(1 for i in self.position_exposures if i < 0) def _short_exposure(self): return sum(i for i in self.position_exposures if i < 0) def _short_value(self): return sum(i for i in self.position_values if i < 0) def _gross_exposure(self): return self._long_exposure() + abs(self._short_exposure()) def _gross_value(self): return self._long_value() + abs(self._short_value()) def _net_exposure(self): return self.calculate_positions_exposure() def _net_value(self): return self.calculate_positions_value() def handle_split(self, split): if split.sid in self.positions: # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[split.sid] leftover_cash = position.handle_split(split) self._position_amounts[split.sid] = position.amount self._position_last_sale_prices[split.sid] = \ position.last_sale_price self._update_multipliers(split.sid) return leftover_cash def _maybe_earn_dividend(self, dividend): """ Take a historical dividend record and return a Series with fields in zipline.protocol.DIVIDEND_FIELDS (plus an 'id' field) representing the cash/stock amount we are owed when the dividend is paid. """ if dividend['sid'] in self.positions: return self.positions[dividend['sid']].earn_dividend(dividend) else: return zp.dividend_payment() def earn_dividends(self, dividend_frame): """ Given a frame of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date. """ earned = dividend_frame.apply(self._maybe_earn_dividend, axis=1)\ .dropna(how='all') if len(earned) > 0: # Store the earned dividends so that they can be paid on the # dividends' pay_dates. self._unpaid_dividends = pd.concat( [self._unpaid_dividends, earned], ) def _maybe_pay_dividend(self, dividend): """ Take a historical dividend record, look up any stored record of cash/stock we are owed for that dividend, and return a Series with fields drawn from zipline.protocol.DIVIDEND_PAYMENT_FIELDS. """ try: unpaid_dividend = self._unpaid_dividends.loc[dividend['id']] return unpaid_dividend except KeyError: return zp.dividend_payment() def pay_dividends(self, dividend_frame): """ Given a frame of dividends whose pay_dates are all the next trading day, grant the cash and/or stock payments that were calculated on the given dividends' ex dates. """ payments = dividend_frame.apply(self._maybe_pay_dividend, axis=1)\ .dropna(how='all') # Mark these dividends as paid by dropping them from our unpaid # table. self._unpaid_dividends.drop(payments.index) # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. stock_payments = payments[payments['payment_sid'].notnull()] for _, row in stock_payments.iterrows(): stock = row['payment_sid'] share_count = row['share_count'] # note we create a Position for stock dividend if we don't # already own the asset position = self.positions[stock] position.amount += share_count self._position_amounts[stock] = position.amount self._position_last_sale_prices[stock] = position.last_sale_price self._update_multipliers(stock) # Add cash equal to the net cash payed from all dividends. Note that # "negative cash" is effectively paid if we're short an asset, # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. net_cash_payment = payments['cash_amount'].fillna(0).sum() return net_cash_payment def create_close_position_transaction(self, event): if not self._position_amounts.get(event.sid): return None txn = Transaction( sid=event.sid, amount=(-1 * self._position_amounts[event.sid]), dt=event.dt, price=event.price, commission=0, order_id=0 ) return txn def get_positions(self): positions = self._positions_store for sid, pos in iteritems(self.positions): if pos.amount == 0: # Clear out the position if it has become empty since the last # time get_positions was called. Catching the KeyError is # faster than checking `if sid in positions`, and this can be # potentially called in a tight inner loop. try: del positions[sid] except KeyError: pass continue # Note that this will create a position if we don't currently have # an entry position = positions[sid] position.amount = pos.amount position.cost_basis = pos.cost_basis position.last_sale_price = pos.last_sale_price return positions def get_positions_list(self): positions = [] for sid, pos in iteritems(self.positions): if pos.amount != 0: positions.append(pos.to_dict()) return positions def __getstate__(self): state_dict = {} state_dict['positions'] = dict(self.positions) state_dict['unpaid_dividends'] = self._unpaid_dividends STATE_VERSION = 1 state_dict[VERSION_LABEL] = STATE_VERSION return state_dict def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PositionTracker saved state is too old.") self.positions = positiondict() # note that positions_store is temporary and gets regened from # .positions self._positions_store = zp.Positions() self._unpaid_dividends = state['unpaid_dividends'] # Arrays for quick calculations of positions value self._position_amounts = OrderedDict() self._position_last_sale_prices = OrderedDict() self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._position_payout_multipliers = OrderedDict() self.update_positions(state['positions'])
apache-2.0
BorisJeremic/Real-ESSI-Examples
analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_NonLinHardSoftShear/Area/A_1e2/Normalized_Shear_Stress_Plot.py
48
3533
#!/usr/bin/python import h5py import matplotlib.pylab as plt import matplotlib as mpl import sys import numpy as np; plt.rcParams.update({'font.size': 28}) # set tick width mpl.rcParams['xtick.major.size'] = 10 mpl.rcParams['xtick.major.width'] = 5 mpl.rcParams['xtick.minor.size'] = 10 mpl.rcParams['xtick.minor.width'] = 5 plt.rcParams['xtick.labelsize']=24 mpl.rcParams['ytick.major.size'] = 10 mpl.rcParams['ytick.major.width'] = 5 mpl.rcParams['ytick.minor.size'] = 10 mpl.rcParams['ytick.minor.width'] = 5 plt.rcParams['ytick.labelsize']=24 ############################################################### ## Analytical Solution ############################################################### # Go over each feioutput and plot each one. thefile = "Analytical_Solution_Shear.feioutput"; finput = h5py.File(thefile) # Read the time and displacement times = finput["time"][:] shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:] shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:] shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:] shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:] normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:]; shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ; shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y ); shear_stress = shear_stress_x; shear_strain = shear_strain_x; # Configure the figure filename, according to the input filename. outfig=thefile.replace("_","-") outfigname=outfig.replace("h5.feioutput","pdf") # Plot the figure. Add labels and titles. plt.figure(figsize=(12,10)) plt.plot(shear_strain*5,shear_stress/normal_stress,'-r',label='Analytical Solution', Linewidth=4) plt.xlabel(r"Shear Displacement $\Delta_t [mm]$") plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$") ############################################################### ## Numerical Solution ############################################################### # Go over each feioutput and plot each one. thefile = "Monotonic_Contact_Behaviour_Adding_Tangential_Load.h5.feioutput"; finput = h5py.File(thefile) # Read the time and displacement times = finput["time"][:] shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:] shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:] shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:] shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:] normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:]; shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ; shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y ); shear_stress = shear_stress_x; shear_strain = shear_strain_x; # Configure the figure filename, according to the input filename. outfig=thefile.replace("_","-") outfigname=outfig.replace("h5.feioutput","pdf") # Plot the figure. Add labels and titles. plt.plot(shear_strain*5,shear_stress/normal_stress,'-k',label='Numerical Solution', Linewidth=4) plt.xlabel(r"Shear Displacement $\Delta_t [mm]$") plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$") ######################################################## # # axes = plt.gca() # # axes.set_xlim([-7,7]) # # axes.set_ylim([-1,1]) outfigname = "Normalized_Shear_Stress.pdf"; legend = plt.legend() legend.get_frame().set_linewidth(0.0) legend.get_frame().set_facecolor('none') plt.savefig(outfigname, bbox_inches='tight') # plt.show()
cc0-1.0
ttfnrob/NLTalk
process_comments.py
1
9429
# Script to train a Naive Bayesian Classifier with NLTK - based on https://github.com/abromberg/sentiment_analysis_python # Classifier is trained using 1.6M Tweets pre-procesed at Sanford and available at http://help.sentiment140.com/for-students # Other data is also save din the training-data folder # Script and HTML tepmate are designed for specific Zooniverse data. # This is extracted from the Zooniverse discussion platform 'Talk' - please contact [email protected] for more information. # Inputs are a MySQL DB of text comments, and NLTK+training data # Outputs are CSV for of sentiment scores, and HTML files to show positive and negative comments # Basic Components import math import time import datetime import os import numpy as np import pandas as pd import sys # For database and data IO import pymysql import json import csv import urllib2 from subprocess import call # For Text processing import re, collections, itertools import nltk, nltk.classify.util, nltk.metrics from nltk.classify import NaiveBayesClassifier from nltk.metrics import BigramAssocMeasures from nltk.probability import FreqDist, ConditionalFreqDist ########################## Set parameters for different projects ########################## # project_slug = 'galaxy_zoo' # talk_url = 'talk.galaxyzoo.org' # min_comments = 5 # imgy='250' # project_slug = 'serengeti' # talk_url = 'talk.snapshotserengeti.org' # min_comments = 5 # imgy='175' project_slug = 'planet_four' talk_url = 'talk.planetfour.org' min_comments = 5 imgy='193' # project_slug = 'milky_way' # talk_url = 'talk.milkywayproject.org' # min_comments = 5 # imgy='125' ######################## ------------------------------------- ########################### # Function to create HTML file from list of results def focus_list_to_html_table(focus_list): html = """<style type="text/css"> body {font-size:14px; color: #ffffff; font-family: sans-serif;} div.thumbnail {display: inline-block; position: relative; margin: 5px;} div.thumbnail img {width:250px;} div.details {position:absolute; bottom:5px; left:5px; width:240px;} span.pos_frac {color: #8DFF87;} span.neg_frac {color: #FF7B6B;} span.zoo_id {position:absolute; top:5px; left:5px;} span.comments {position:absolute; top:5px; right:5px;} span.words {display:none;} </style> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script> <script> function id2url(zooID) { $.getJSON( "https://api.zooniverse.org/projects/"""+project_slug+"""/subjects/"+zooID, function( data ) { if (data.location.standard instanceof Array) { $("#"+zooID).attr("src", data.location.standard[0]); } else { $("#"+zooID).attr("src", data.location.standard); } }); } $(document).ready(function() { $( ".img_waiting" ).each(function( index ) { id2url($( this ).attr('id')); }); }) </script> <body> """ c=0 focus_list.reverse() total = len(focus_list) for r in focus_list: if r[3][0]=="A": img_html="<a href='http://"+talk_url+"/#/subjects/"+r[3]+"' target='_blank'><img class='img_waiting' id='"+r[3]+"' src='http://placehold.it/250x"+imgy+"' /></a>" else: img_html="<img class='thumb' src='http://placehold.it/250x"+imgy+"' />" html+="""<div class='thumbnail'> """+img_html+""" <span class='zoo_id'>"""+str(r[3])+"""</span> <div class='details'> <span class='pos_frac'>"""+"{:.2f}".format(r[1])+"""</span> <span class='neg_frac'>"""+"{:.2f}".format(r[2])+"""</span> </div> <span class='comments'>"""+str(r[4])+"""</span> </div>""" c+=1 html+="""</body>""" return html # Function to get the feature words from text def extract_features(document): document_words = set(document) features = {} for word in document_words: features[word] = (word in document_words) return features # Function to create dictionary of text def make_full_dict(words): return dict([(word, True) for word in words]) # Function to score words in text and make distributions def create_word_scores(): posWords = [] negWords = [] with open(POS_DATA_FILE, 'r') as posSentences: for i in posSentences: posWord = re.findall(r"[\w']+|[.,!?;]", i.rstrip()) posWords.append(posWord) with open(NEG_DATA_FILE, 'r') as negSentences: for i in negSentences: negWord = re.findall(r"[\w']+|[.,!?;]", i.rstrip()) negWords.append(negWord) posWords = list(itertools.chain(*posWords)) negWords = list(itertools.chain(*negWords)) # Build frequency distibution of all words and then frequency distributions of words within positive and negative labels word_fd = FreqDist() cond_word_fd = ConditionalFreqDist() for word in posWords: word_fd.inc(word.lower()) cond_word_fd['pos'].inc(word.lower()) for word in negWords: word_fd.inc(word.lower()) cond_word_fd['neg'].inc(word.lower()) # Create counts of positive, negative, and total words pos_word_count = cond_word_fd['pos'].N() neg_word_count = cond_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count # Builds dictionary of word scores based on chi-squared test word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score return word_scores if len(sys.argv) < 2: print "No file specificed\n" else: input_filename = sys.argv[1] print "Initialized "+project_slug+" with min of "+str(min_comments)+" - processing file "+input_filename print "Loading training data..." DATA_DIRECTORY = os.path.join('training-data', 'twitter_data') POS_DATA_FILE = os.path.join(DATA_DIRECTORY, 'positive_tweets.txt') NEG_DATA_FILE = os.path.join(DATA_DIRECTORY, 'negative_tweets.txt') # DATA_DIRECTORY = os.path.join('training-data', 'combined') # POS_DATA_FILE = os.path.join(DATA_DIRECTORY, 'positive.txt') # NEG_DATA_FILE = os.path.join(DATA_DIRECTORY, 'negative.txt') print "Training NLTK Bayesian classifier..." posFeatures = [] negFeatures = [] # Process text into words with pos/neg connotation with open(POS_DATA_FILE, 'r') as posSentences: for i in posSentences: posWords = re.findall(r"[\w']+|[.,!?;]", i.rstrip()) posWords = [make_full_dict(posWords), 'pos'] posFeatures.append(posWords) with open(NEG_DATA_FILE, 'r') as negSentences: for i in negSentences: negWords = re.findall(r"[\w']+|[.,!?;]", i.rstrip()) negWords = [make_full_dict(negWords), 'neg'] negFeatures.append(negWords) # Selects 5/6 of the features to be used for training and 1/6 to be used for testing posCutoff = int(math.floor(len(posFeatures)*5/6)) negCutoff = int(math.floor(len(negFeatures)*5/6)) trainFeatures = posFeatures[:posCutoff] + negFeatures[:negCutoff] testFeatures = posFeatures[posCutoff:] + negFeatures[negCutoff:] # Train a Naive Bayes Classifier classifier = NaiveBayesClassifier.train(trainFeatures) # Create reference and test set referenceSets = collections.defaultdict(set) testSets = collections.defaultdict(set) # Puts correctly labeled sentences in referenceSets and the predictively labeled version in testSets for i, (features, label) in enumerate(testFeatures): referenceSets[label].add(i) predicted = classifier.classify(features) testSets[predicted].add(i) print "Esimated accuracy: ", nltk.classify.util.accuracy(classifier, testFeatures) print "Talk data loaded from file" print "Performing sentiment analysis..." df = pd.read_csv(input_filename, skipinitialspace=True, sep='\t') g = df.groupby('focus_id') flist = g['body'].apply(list) focus_list = [] for k,v in flist.iteritems(): if (isinstance(v, list)): if (len(v)>min_comments): string = ' '.join([str(i) for i in v]) # print string ob = (classifier.classify(extract_features(string.split())), classifier.prob_classify(extract_features(string.split())).prob('pos'), classifier.prob_classify(extract_features(string.split())).prob('neg'), k, len(v), extract_features(string.split())) focus_list.insert(0, ob) # Create lists sorted_list = sorted(focus_list, key=lambda x: (-x[1], x[4])) sorted_list_rev = list(sorted_list) sorted_list_rev.reverse() # Filter lists pos_list = filter(lambda x: x[0] == 'pos', sorted_list_rev) neg_list = filter(lambda x: x[0] == 'neg', sorted_list) n = int(len(sorted_list)*1.00) print "%i positive and %i negative items" % (len(pos_list), len(neg_list)) # Output files as CSV and HTML print "Writing CSV..." filename = os.path.join('output', project_slug, project_slug+'_'+str(min_comments)+'.csv') with open(filename, "wb") as f: writer = csv.writer(f) writer.writerows(sorted_list) print "Writing HTML files..." html = focus_list_to_html_table(pos_list) filename = os.path.join('output', project_slug, project_slug+'_'+str(min_comments)+'_positive.html') with open(filename, "w") as text_file: text_file.write(html) call(["open", filename]) html = focus_list_to_html_table(neg_list) filename = os.path.join('output', project_slug, project_slug+'_'+str(min_comments)+'_negative.html') with open(filename, "w") as text_file: text_file.write(html) call(["open", filename]) print "Done!"
mit
zorroblue/scikit-learn
sklearn/kernel_approximation.py
29
19022
""" The :mod:`sklearn.kernel_approximation` module implements several approximate kernel feature maps base on Fourier transforms. """ # Author: Andreas Mueller <[email protected]> # # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from scipy.linalg import svd from .base import BaseEstimator from .base import TransformerMixin from .utils import check_array, check_random_state, as_float_array from .utils.extmath import safe_sparse_dot from .utils.validation import check_is_fitted from .metrics.pairwise import pairwise_kernels, KERNEL_PARAMS class RBFSampler(BaseEstimator, TransformerMixin): """Approximates feature map of an RBF kernel by Monte Carlo approximation of its Fourier transform. It implements a variant of Random Kitchen Sinks.[1] Read more in the :ref:`User Guide <rbf_kernel_approx>`. Parameters ---------- gamma : float Parameter of RBF kernel: exp(-gamma * x^2) n_components : int Number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Notes ----- See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and Benjamin Recht. [1] "Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning" by A. Rahimi and Benjamin Recht. (http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf) """ def __init__(self, gamma=1., n_components=100, random_state=None): self.gamma = gamma self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X, accept_sparse='csr') random_state = check_random_state(self.random_state) n_features = X.shape[1] self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal( size=(n_features, self.n_components))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = check_array(X, accept_sparse='csr') projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class SkewedChi2Sampler(BaseEstimator, TransformerMixin): """Approximates feature map of the "skewed chi-squared" kernel by Monte Carlo approximation of its Fourier transform. Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`. Parameters ---------- skewedness : float "skewedness" parameter of the kernel. Needs to be cross-validated. n_components : int number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. References ---------- See "Random Fourier Approximations for Skewed Multiplicative Histogram Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu. See also -------- AdditiveChi2Sampler : A different approach for approximating an additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. """ def __init__(self, skewedness=1., n_components=100, random_state=None): self.skewedness = skewedness self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X) random_state = check_random_state(self.random_state) n_features = X.shape[1] uniform = random_state.uniform(size=(n_features, self.n_components)) # transform by inverse CDF of sech self.random_weights_ = (1. / np.pi * np.log(np.tan(np.pi / 2. * uniform))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. All values of X must be strictly greater than "-skewedness". Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = as_float_array(X, copy=True) X = check_array(X, copy=False) if (X <= -self.skewedness).any(): raise ValueError("X may not contain entries smaller than" " -skewedness.") X += self.skewedness np.log(X, X) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class AdditiveChi2Sampler(BaseEstimator, TransformerMixin): """Approximate feature map for additive chi2 kernel. Uses sampling the fourier transform of the kernel characteristic at regular intervals. Since the kernel that is to be approximated is additive, the components of the input vectors can be treated separately. Each entry in the original space is transformed into 2*sample_steps+1 features, where sample_steps is a parameter of the method. Typical values of sample_steps include 1, 2 and 3. Optimal choices for the sampling interval for certain data ranges can be computed (see the reference). The default values should be reasonable. Read more in the :ref:`User Guide <additive_chi_kernel_approx>`. Parameters ---------- sample_steps : int, optional Gives the number of (complex) sampling points. sample_interval : float, optional Sampling interval. Must be specified when sample_steps not in {1,2,3}. Notes ----- This estimator approximates a slightly different version of the additive chi squared kernel then ``metric.additive_chi2`` computes. See also -------- SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi squared kernel. References ---------- See `"Efficient additive kernels via explicit feature maps" <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_ A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, 2011 """ def __init__(self, sample_steps=2, sample_interval=None): self.sample_steps = sample_steps self.sample_interval = sample_interval def fit(self, X, y=None): """Set parameters.""" X = check_array(X, accept_sparse='csr') if self.sample_interval is None: # See reference, figure 2 c) if self.sample_steps == 1: self.sample_interval_ = 0.8 elif self.sample_steps == 2: self.sample_interval_ = 0.5 elif self.sample_steps == 3: self.sample_interval_ = 0.4 else: raise ValueError("If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval") else: self.sample_interval_ = self.sample_interval return self def transform(self, X): """Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Returns ------- X_new : {array, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps + 1)) Whether the return value is an array of sparse matrix depends on the type of the input X. """ msg = ("%(name)s is not fitted. Call fit to set the parameters before" " calling transform") check_is_fitted(self, "sample_interval_", msg=msg) X = check_array(X, accept_sparse='csr') sparse = sp.issparse(X) # check if X has negative values. Doesn't play well with np.log. if ((X.data if sparse else X) < 0).any(): raise ValueError("Entries of X must be non-negative.") # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X) def _transform_dense(self, X): non_zero = (X != 0.0) X_nz = X[non_zero] X_step = np.zeros_like(X) X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X_nz) step_nz = 2 * X_nz * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.cos(j * log_step_nz) X_new.append(X_step) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.sin(j * log_step_nz) X_new.append(X_step) return np.hstack(X_new) def _transform_sparse(self, X): indices = X.indices.copy() indptr = X.indptr.copy() data_step = np.sqrt(X.data * self.sample_interval_) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X.data) step_nz = 2 * X.data * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) data_step = factor_nz * np.cos(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) data_step = factor_nz * np.sin(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) return sp.hstack(X_new) class Nystroem(BaseEstimator, TransformerMixin): """Approximate a kernel map using a subset of the training data. Constructs an approximate feature map for an arbitrary kernel using a subset of the data as basis. Read more in the :ref:`User Guide <nystroem_kernel_approx>`. Parameters ---------- kernel : string or callable, default="rbf" Kernel map to be approximated. A callable should accept two arguments and the keyword arguments passed to this object as kernel_params, and should return a floating point number. n_components : int Number of features to construct. How many data points will be used to construct the mapping. gamma : float, default=None Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=None Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=None Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : mapping of string to any, optional Additional parameters (keyword arguments) for kernel function passed as callable object. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, shape (n_components, n_features) Subset of training points used to construct the feature map. component_indices_ : array, shape (n_components) Indices of ``components_`` in the training set. normalization_ : array, shape (n_components, n_components) Normalization matrix needed for embedding. Square root of the kernel matrix on ``components_``. References ---------- * Williams, C.K.I. and Seeger, M. "Using the Nystroem method to speed up kernel machines", Advances in neural information processing systems 2001 * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical Comparison", Advances in Neural Information Processing Systems 2012 See also -------- RBFSampler : An approximation to the RBF kernel using random Fourier features. sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. """ def __init__(self, kernel="rbf", gamma=None, coef0=None, degree=None, kernel_params=None, n_components=100, random_state=None): self.kernel = kernel self.gamma = gamma self.coef0 = coef0 self.degree = degree self.kernel_params = kernel_params self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit estimator to data. Samples a subset of training points, computes kernel on these and computes normalization matrix. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Training data. """ X = check_array(X, accept_sparse='csr') rnd = check_random_state(self.random_state) n_samples = X.shape[0] # get basis vectors if self.n_components > n_samples: # XXX should we just bail? n_components = n_samples warnings.warn("n_components > n_samples. This is not possible.\n" "n_components was set to n_samples, which results" " in inefficient evaluation of the full kernel.") else: n_components = self.n_components n_components = min(n_samples, n_components) inds = rnd.permutation(n_samples) basis_inds = inds[:n_components] basis = X[basis_inds] basis_kernel = pairwise_kernels(basis, metric=self.kernel, filter_params=True, **self._get_kernel_params()) # sqrt of kernel matrix on basis vectors U, S, V = svd(basis_kernel) S = np.maximum(S, 1e-12) self.normalization_ = np.dot(U / np.sqrt(S), V) self.components_ = basis self.component_indices_ = inds return self def transform(self, X): """Apply feature map to X. Computes an approximate feature map using the kernel between some training points and X. Parameters ---------- X : array-like, shape=(n_samples, n_features) Data to transform. Returns ------- X_transformed : array, shape=(n_samples, n_components) Transformed data. """ check_is_fitted(self, 'components_') X = check_array(X, accept_sparse='csr') kernel_params = self._get_kernel_params() embedded = pairwise_kernels(X, self.components_, metric=self.kernel, filter_params=True, **kernel_params) return np.dot(embedded, self.normalization_.T) def _get_kernel_params(self): params = self.kernel_params if params is None: params = {} if not callable(self.kernel): for param in (KERNEL_PARAMS[self.kernel]): if getattr(self, param) is not None: params[param] = getattr(self, param) else: if (self.gamma is not None or self.coef0 is not None or self.degree is not None): warnings.warn( "Passing gamma, coef0 or degree to Nystroem when using a" " callable kernel is deprecated in version 0.19 and will" " raise an error in 0.21, as they are ignored. Use " "kernel_params instead.", DeprecationWarning) return params
bsd-3-clause
NelisVerhoef/scikit-learn
examples/linear_model/plot_bayesian_ridge.py
248
2588
""" ========================= Bayesian Ridge Regression ========================= Computes a Bayesian Ridge Regression on a synthetic dataset. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. As the prior on the weights is a Gaussian prior, the histogram of the estimated weights is Gaussian. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import BayesianRidge, LinearRegression ############################################################################### # Generating simulated data with Gaussian weigthts np.random.seed(0) n_samples, n_features = 100, 100 X = np.random.randn(n_samples, n_features) # Create Gaussian data # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noise with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the Bayesian Ridge Regression and an OLS for comparison clf = BayesianRidge(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot true weights, estimated weights and histogram of the weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate") plt.plot(w, 'g-', label="Ground truth") plt.plot(ols.coef_, 'r--', label="OLS estimate") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc="best", prop=dict(size=12)) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc="lower left") plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
QuLogic/burnman
misc/paper_incorrect_averaging.py
1
8987
# BurnMan - a lower mantle toolkit # Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S. # Released under GPL v2 or later. """ paper_incorrect_averaging ------------------------- This script reproduces :cite:`Cottaar2014`, Figure 5. Attempt to reproduce Figure 6.12 from :cite:`Murakami2013` """ import os, sys, numpy as np, matplotlib.pyplot as plt #hack to allow scripts to be placed in subdirectories next to burnman: if not os.path.exists('burnman') and os.path.exists('../burnman'): sys.path.insert(1,os.path.abspath('..')) import burnman from burnman import minerals from burnman.mineral_helpers import HelperSolidSolution import matplotlib.image as mpimg import colors if __name__ == "__main__": plt.figure(dpi=100,figsize=(12,6)) prop={'size':12} plt.rc('text', usetex=True) plt.rcParams['text.latex.preamble'] = '\usepackage{relsize}' dashstyle2=(7,3) dashstyle3=(3,2) method = 'slb2' #define the minerals from table 6.3 mg_perovskite = burnman.Mineral() mg_perovskite.params = {'name': 'Mg perovskite', 'molar_mass' : 0.1004, 'V_0': 24.43e-6, 'K_0': 253.0e9, 'Kprime_0': 3.9, 'G_0' : 172.9e9, 'Gprime_0' : 1.56, 'n': 5.0, 'Debye_0': 1100., 'grueneisen_0': 1.40, 'q_0': 1.40, 'eta_s_0' : 2.6} mg_perovskite.set_method('slb2') fe_perovskite = burnman.Mineral() fe_perovskite.params = {'name': 'Fe perovskite', 'molar_mass' : 0.1319, 'V_0': 25.49e-6, 'K_0': 281.0e9, 'Kprime_0': 4.1, 'G_0' : 138.0e9, 'Gprime_0' : 1.70, 'n': 5.0, 'Debye_0': 841., 'grueneisen_0': 1.48, 'q_0': 1.40, 'eta_s_0' : 2.1} fe_perovskite.set_method(method) periclase = burnman.Mineral() periclase.params = {'name': 'periclase', 'molar_mass' : 0.0403, 'V_0': 11.24e-6, 'K_0': 161.0e9, 'Kprime_0': 3.9, 'G_0' : 130.9e9, 'Gprime_0' : 1.92, 'n': 2.0, 'Debye_0': 773., 'grueneisen_0': 1.50, 'q_0': 1.50, 'eta_s_0' : 2.3} periclase.set_method(method) wustite = burnman.Mineral() wustite.params = {'name': 'wustite', 'molar_mass' : 0.07184, 'V_0': 12.06e-6, 'K_0': 152.0e9, 'Kprime_0': 4.9, 'G_0' : 47.0e9, 'Gprime_0' : 0.70, 'n': 2.0, 'Debye_0': 455., 'grueneisen_0': 1.28, 'q_0': 1.50, 'eta_s_0' : 0.8} wustite.set_method(method) #in the text for the book chapter a linear relationship in elastic properties #for the solid solutions is assumed... class ferropericlase(HelperSolidSolution): def __init__(self, fe_num): endmembers = [periclase, wustite] molar_fractions = [1. - fe_num, 0.0 + fe_num] HelperSolidSolution.__init__(self, endmembers, molar_fractions) class perovskite(HelperSolidSolution): def __init__(self, fe_num): endmembers = [mg_perovskite, fe_perovskite] molar_fractions = [1. - fe_num, 0.0 + fe_num] HelperSolidSolution.__init__(self, endmembers, molar_fractions) #define the P-T path pressure = np.linspace(28.0e9, 129e9, 25.) temperature_bs = burnman.geotherm.brown_shankland(pressure) temperature_an = burnman.geotherm.anderson(pressure) #seismic model for comparison: seismic_model = burnman.seismic.PREM() # pick from .prem() .slow() .fast() (see burnman/seismic.py) depths = map(seismic_model.depth, pressure) seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths) #pure perovskite perovskitite = burnman.Composite( ( (perovskite(0.06), 1.0),) ) perovskitite.set_method(method) #pure periclase periclasite = burnman.Composite( ( (ferropericlase(0.21), 1.0),)) periclasite.set_method(method) #pyrolite (80% perovskite) pyrolite = burnman.Composite( ( (perovskite(0.06), 0.834), (ferropericlase(0.21), 0.166) ) ) pyrolite.set_method(method) #preferred mixture? amount_perovskite = 0.92 preferred_mixture = burnman.Composite( ( (perovskite(0.06), amount_perovskite), (ferropericlase(0.21), 1.0-amount_perovskite) ) ) preferred_mixture.set_method(method) mat_rho_1, mat_vp_1, mat_vs_1, mat_vphi_1, mat_K_1, mat_G_1 = burnman.velocities_from_rock(perovskitite,seis_p, temperature_bs) mat_rho_2, mat_vp_2, mat_vs_2, mat_vphi_2, mat_K_2, mat_G_2 = burnman.velocities_from_rock(periclasite,seis_p, temperature_bs) mat_rho_3, mat_vp_3, mat_vs_3, mat_vphi_3, mat_K_3, mat_G_3 = burnman.velocities_from_rock(pyrolite,seis_p, temperature_bs) mat_rho_4, mat_vp_4, mat_vs_4, mat_vphi_4, mat_K_4, mat_G_4 = burnman.velocities_from_rock(preferred_mixture,seis_p, temperature_bs) ### HERE IS THE STEP WITH THE INCORRECT MIXING ### # comment this out to have correct phase averaging, leave it in to have incorrect phase averaging mat_vs_3_wr = 0.5*((0.834*mat_vs_1 + 0.166*mat_vs_2) + np.ones_like(mat_vs_1)/(0.834/mat_vs_1 + 0.166/mat_vs_2)) mat_vs_4_wr = 0.5*((0.92*mat_vs_1 + 0.08*mat_vs_2) + np.ones_like(mat_vs_1)/(0.92/mat_vs_1 + 0.08/mat_vs_2)) plt.subplot(1,2,2) plt.ylim(5.2,7.4) plt.xlim(25,135) #fig1 = mpimg.imread('input_figures/murakami_book_chapter.png') #plt.imshow(fig1, extent=[25,135,5.0,7.6], aspect='auto') plt.plot(seis_p/1.e9,seis_vs/1.e3,color='k',linestyle='-',marker='None',markerfacecolor='w',markersize=4,label='PREM',linewidth=3.0,mew=1.5) plt.plot(seis_p/1.e9,mat_vs_1/1.e3,color=colors.color(3),marker='v',markerfacecolor=colors.color(3), \ markersize=4, markeredgecolor=colors.color(3),markevery=2,linewidth=1.5,label='perovskite') plt.plot(seis_p/1.e9,mat_vs_2/1.e3,color=colors.color(1),linestyle='-', \ linewidth=1.5,marker='^',markerfacecolor=colors.color(1), markersize=4, \ markeredgecolor=colors.color(1),markevery=2,label='periclase') plt.plot(seis_p/1.e9,mat_vs_4_wr/1.e3,color=colors.color(4),dashes=dashstyle3, \ linewidth=1.5,marker='o',markerfacecolor=colors.color(4), markersize=4, \ markeredgecolor=colors.color(4),markevery=2,label='92\% pv') plt.plot(seis_p/1.e9,mat_vs_3_wr/1.e3,color='g',linestyle='-',dashes=dashstyle2, \ linewidth=1.5,marker='o',markerfacecolor='w', markersize=4, markeredgecolor='g',markevery=2,label='83\% pv') plt.legend(loc='lower right',prop={'size':12}) plt.title("Phase average on velocities") plt.xlabel("Pressure (GPa)") plt.subplot(1,2,1) plt.ylim(5.2,7.4) plt.xlim(25,135) #fig1 = mpimg.imread('input_figures/murakami_book_chapter.png') #plt.imshow(fig1, extent=[25,135,5.0,7.6], aspect='auto') plt.plot(seis_p/1.e9,seis_vs/1.e3,color='k',linestyle='-',marker='None',markerfacecolor='w',markersize=4,label='PREM',linewidth=3.0,mew=1.5) plt.plot(seis_p/1.e9,mat_vs_1/1.e3,color=colors.color(3),marker='v',markerfacecolor=colors.color(3), \ markersize=4, markeredgecolor=colors.color(3),markevery=2,linewidth=1.5,label='perovskite') plt.plot(seis_p/1.e9,mat_vs_2/1.e3,color=colors.color(1),linestyle='-', \ linewidth=1.5,marker='^',markerfacecolor=colors.color(1), markersize=4, \ markeredgecolor=colors.color(1),markevery=2,label='periclase') plt.plot(seis_p/1.e9,mat_vs_4/1.e3,color=colors.color(4),dashes=dashstyle3, \ linewidth=1.5,marker='o',markerfacecolor=colors.color(4), markersize=4, \ markeredgecolor=colors.color(4),markevery=2,label='92\% pv') plt.plot(seis_p/1.e9,mat_vs_3/1.e3,color='g',linestyle='-',dashes=dashstyle2, \ linewidth=1.5,marker='o',markerfacecolor='w', markersize=4, markeredgecolor='g',markevery=2, label='83\% pv') plt.title(" V.-R.-H. on moduli") plt.xlabel("Pressure (GPa)") plt.ylabel("Shear Velocity Vs (km/s)") plt.tight_layout() plt.savefig("example_incorrect_averaging.pdf",bbox_inches='tight') plt.show()
gpl-2.0
DailyActie/Surrogate-Model
01-codes/scikit-learn-master/sklearn/linear_model/tests/test_ransac.py
1
17468
import numpy as np from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_equal, assert_raises from scipy import sparse from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso from sklearn.linear_model.ransac import _dynamic_max_trials from sklearn.utils import check_random_state from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_warns # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data outliers = np.array((10, 30, 200)) data[outliers[0], :] = (1000, 1000) data[outliers[1], :] = (-1000, -1000) data[outliers[2], :] = (-100, -50) X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False X = np.random.rand(10, 2) y = np.random.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=11, random_state=0) assert getattr(ransac_estimator, 'n_trials_', None) is None ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 2) def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100,)) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.score(X[2:], y[2:]), 1) assert_less(ransac_estimator.score(X[:2], y[:2]), 1) def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100,)) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.predict(X), np.zeros(100)) def test_ransac_resid_thresh_no_inliers(): # When residual_threshold=0.0 there are no inliers and a # ValueError with a message should be raised base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.0, random_state=0) assert_raises_regexp(ValueError, "No inliers.*residual_threshold.*0\.0", ransac_estimator.fit, X, y) def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) assert_raises(ValueError, ransac_estimator3.fit, X, y) assert_raises(ValueError, ransac_estimator4.fit, X, y) assert_raises(ValueError, ransac_estimator7.fit, X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) # XXX: Remove in 0.20 def test_ransac_residual_metric(): residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1) residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric2) # multi-dimensional ransac_estimator0.fit(X, yyy) assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy) assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_residual_loss(): loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1) loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1) loss_mono = lambda y_true, y_pred: np.abs(y_true - y_pred) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.loss = loss_mono ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss="squared_loss") ransac_estimator3.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 10 assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf')) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_fit_sample_weight(): ransac_estimator = RANSACRegressor(random_state=0) n_samples = y.shape[0] weights = np.ones(n_samples) ransac_estimator.fit(X, y, weights) # sanity check assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False # check that mask is correct assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) X_ = random_state.randint(0, 200, [10, 1]) y_ = np.ndarray.flatten(0.2 * X_ + 2) sample_weight = random_state.randint(0, 10, 10) outlier_X = random_state.randint(0, 1000, [1, 1]) outlier_weight = random_state.randint(0, 10, 1) outlier_y = random_state.randint(-1000, 0, 1) X_flat = np.append(np.repeat(X_, sample_weight, axis=0), np.repeat(outlier_X, outlier_weight, axis=0), axis=0) y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0), np.repeat(outlier_y, outlier_weight, axis=0), axis=0)) ransac_estimator.fit(X_flat, y_flat) ref_coef_ = ransac_estimator.estimator_.coef_ sample_weight = np.append(sample_weight, outlier_weight) X_ = np.append(X_, outlier_X, axis=0) y_ = np.append(y_, outlier_y) ransac_estimator.fit(X_, y_, sample_weight) assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_) # check that if base_estimator.fit doesn't support # sample_weight, raises error base_estimator = Lasso() ransac_estimator = RANSACRegressor(base_estimator) assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
mit
jwdebelius/break_4w
break4w/data_dictionary.py
1
22592
""" I need really good documentation for this module that I haven't begun to write yet. This will be really important? """ from collections import OrderedDict import datetime import pydoc import numpy as np import pandas as pd from pandas.api.types import CategoricalDtype from break4w.question import Question from break4w.categorical import Categorical from break4w.bool import Bool from break4w.continous import Continous import break4w._defaults as b4wdefaults type_lookup = {'continous': Continous, 'categorical': Categorical, 'multiple choice': Categorical, 'ordinal': Categorical, 'bool': Bool, 'boolean': Bool, 'yes/no': Bool, } class DataDictionary(OrderedDict): """ Generates a data dictionary object Parameters ---------- columns: list of dicts A list of dictionaries representing each column in the metadata. The dictionaries must contain a `name` key, describing the column name. The values in the dictionary should the variables needed for each type of question object in a data dictionary. types: list of strings A description of the type of question being asked. These come from a relatively controlled vocabulary and include types such as `"continous", "categorical", "bool"`. If the question type does not conform to the controlled vocabulary, the column will be read as a Question object with limited functionality. description: str A description of the data dictionary or study of no more than 80 characters. """ default_cols = ['name', 'description', 'type', 'dtype', 'order', 'units', 'ambigious', 'missing', 'notes'] def __init__(self, columns, types, description=None): """Initializes the dictionary object This is a very basic prototype of the data dictionary object """ self.log = [] if description is None: self.description = '' elif len(description) > 80: raise ValueError('The dictionary description cannot be more than ' '80 characters') else: self.description = description # Adds the question objects to the dictionary for col_, type_ in zip(*(columns, types)): self.add_question(question_data=col_, question_type=type_, record=False, check=False) self.columns = list(self.keys()) def __str__(self): """ Generates printed summary """ summary = ['Data Dictionary with %i columns' % len(self)] if len(self.description) > 0: summary.append('\t%s' % self.description) summary.append('-----------------------------------------------------' '-------------------------------') for col in self.values(): summary.append('%s (%s)\n\t%s' % (col.name, col.type, col.description)) summary.append('-----------------------------------------------------' '-------------------------------') return '\n'.join(summary) def _update_log(self, command, column=None, transform_type=None, transformation=None): """Used for internal tracking of the columns and data Every time a Question acts on data, a record should be made of the transformation. (See break4w.question.Question._update_log). However, this also tracks the transformation on the dictionary level. Parameters ---------- command : str A short textual description of the command performed. This may be the function name in text format. column : str, optional The column in the metadata being explored. transform_type: str, optional A more general description of the type of action that was performed. Ideally, this comes for a preset list of possible actions, and the descriptions are consistent. transformation: str, optional Explains exactly how values were changed. """ self.log.append({ 'timestamp': datetime.datetime.now(), 'column': column, 'command': command, 'transform_type': transform_type, 'transformation': transformation, }) def _pull_question_log(self, column=None): """Adds information from the specified column to the log.""" raise NotImplementedError def add_question(self, question_data, question_type='', check=True, record=True, var_delim=' | ', code_delim='=', null_value='None'): """ Adds a new question object to the data dictionary Parameters ---------- question_data: Dict, Question Describes the data dictionary entry for the question. This can be a break4w question object created directly, or a dictionary objecting with information like the name in the metadata representation, data type, a description, and specific information for the type of question. For instance, `question_type` specified the qustion was `"continous"`, the `question_data` must also describe units for the question. question_type: str, optional Describes the type of question object that should be selected for the question. If `question_data` is a `Question` object, then no `question_type` is needed. check: bool, optional Checks whether a name already exists in the question name space. If this is true, then the function will check if the column already exists in the dictionary. If the column does exist and check is true, an error will be raised. If check is not true, the data dictionary entry for the column will be overwritten and any information in that column will be lost. record, bool, optional Indicates where the addition should be logged. read_numeric_codes: bool, optional Whether columns should be read with a numerical delimiter (i.e "=") to parse a numeric value into a categorical one. For example, if numeric is true, then "0=female | 1=male" would be parsed that any data encoded as 0 maps to female, any data encoded as 1 maps to male, and the order of hte values is `[0, 1]` (corresponding to `['female', 'male']`). Otherwise, the line would be read literally, and the order is read as `["0=female", "1=male"]`. val_delim: str, optional The seperator between values in the "order" column. code_delim: str, optional The delimiter between a numericly coded categorical variable and the value it maps to. Raises ------ ValueError When the function is checking for the column and the column name is already in the dictionary. If this is the case, the dictionary entry should be adjusted using `update_question`, not `add_question`, since this function will otherwise over write the existing column. """ error1 = False # Converts to a Question object question_object = type_lookup.get(question_type.lower(), Question) if isinstance(question_data, pd.Series): question_data.dropna(inplace=True) question_data = question_object._read_series( question_data, var_delim=var_delim, code_delim=code_delim, null_value=null_value, ) elif isinstance(question_data, dict): question_data = question_object(**question_data) elif isinstance(question_data, Question): pass else: message = ('question_data must be a Question, dict, or' ' Series') if record: self._update_log('add column', column=None, transformation=message, transform_type='error') raise ValueError(message) name = question_data.name # Checks if the question is in the dictionary if (name in self.keys()) and check: error1 = True message = '%s already has a dictionary entry' % name transform_type = 'error' else: message = '%s was added to the dictionary' % name transform_type = None # Updates the log if record: self._update_log('add column', column=name, transformation=message, transform_type=transform_type) # Raises an error or updates the dictionary, as appropriate if error1: raise ValueError(message) else: self[name] = question_data self.columns = list(self.keys()) def get_question(self, name): """ Returns the data dictionary column Parameters ---------- name: str The name of the dictionary column to be returned Returns ------- Question The question object for the appropriate dictionary object Raises ------ ValueError When the column being asked for does not exist. """ if name not in self.keys(): message = 'There is no entry for %s' % name self._update_log(column=name, command='get question', transform_type='error', transformation=message) raise ValueError(message) self._update_log(column=name, command='get question') return self[name] def drop_question(self, name): """ Removes a dictionary entry for the specified column. Parameters ---------- name: str The name of the dictionary column to be returned """ if name in self.keys(): del self[name] self.columns = list(self.keys()) self._update_log(command='remove question', column=name) def update_question(self, update, name=None): """ Updates dictionary entry for the data Parameters ---------- update: Dict, Question Describes the data dictionary entry for the question. This can be a break4w question object created directly, or a dictionary objecting with information like the name in the metadata representation, data type, a description, and specific information for the type of question. For instance, `question_type` specified the qustion was `"continous"`, the `question_data` must also describe units for the question. name: str, optional The name of the dictionary column to be returned. If `update` is a Question object, this can be infered from the question. """ # Gets the dictionary of the new column and column name if isinstance(update, Question): update = vars(update) if name is None: name = update['name'] # Checks if the data is already in the dictionary if name not in self.keys(): message = ('%s is not a question in the current dictionary.\n' 'Have you tried adding the question?') % name self._update_log(command='update question', column=name, transform_type='error', transformation=message) raise ValueError(message) current = vars(self[name]) diff = {k: v for k, v in update.items() if (((k not in current) or (v != current[k])) and (k not in {'log'})) } change_keys = {} for k, v in diff.items(): if k in current: change_keys[k] = (current[k], v) else: change_keys[k] = ('add', v) setattr(self[name], k, v) if 'log' in update: self[name].log.extend(update['log']) self._update_log( command='update question', column=name, transform_type='update dictionary values', transformation=' | '.join(['%s : %s > %s' % (k, v[0], v[1]) for k, v in change_keys.items()])) def validate(self, map_, check_order=True): """ Checks columns appear in the mapping file in the appropriate order and conform to the standards set in the data dictionary. Parameters ---------- map_ : DataFrame A pandas object containing the metadata being analyzed. check_order: bool, optional Do the order of columns in the data dictionary and metadata have to match? """ pass_ = True failures = [] fail_message = [] self._validate_question_order(map_, check_order) for name, question in self.items(): if question.type == 'Question': continue try: question.validate(map_) except: pass_ = False failures.append( '\t%s - %s' % (name, question.log[-1]['transformation']) ) self.log.append(question.log[-1]) if pass_: self._update_log('validate', transform_type='pass', transformation='All columns passed') else: message = ('There were issues with the following columns:\n%s' % '\n'.join(failures)) message_l = (('There were issues with the following columns:\n%s' '\nPlease See the log for more details.') % '\n'.join([fail.split(' - ')[0].replace('\t', '') for fail in failures])) self._update_log('validate', transform_type='error', transformation=message_l) raise ValueError(message) def _validate_question_order(self, map_, check_order=True, record=True, verbose=False): """ Checks all the required questions are present in the mapping file and that they are in the correct order. Parameters ---------- map_ : DataFrame A pandas object containing the metadata being analyzed. check_order: bool, optional Do the order of columns in the data dictionary and metadata have to match? record: bool, optional Indicates where the addition should be logged. verbose: bool, optional Provides more detailed information about the error Raises ------ ValueError """ pass_ = True message = ('The columns in the mapping file match the columns in ' 'the data dictionary.') map_columns = list(map_.columns) dict_columns = list(self.keys()) if not set(map_columns) == set(dict_columns): pass_ = False in_map = list(set(map_columns) - set(dict_columns)) in_dict = list(set(dict_columns) - set(map_columns)) text = ('There are %i columns in the data dictionary ' 'not in the mapping file, and %i from the mapping' ' file not in the data dictionary.' % (len(in_dict), len(in_map))) if len(in_dict) > 0: not_map = ('In the dictionary but not in the map: \n\t%s\n' % '; '.join(in_dict)) else: not_map = '' if len(in_map) > 0: t_ = '\nIn the map but not in the dictionary:\n\t%s\n' not_dict = t_ % '; '.join(in_map) else: not_dict = '' if verbose: message = '%s%s%s' % (text, not_map, not_dict) # message = not_dict else: message = text elif not (map_columns == dict_columns) and check_order: pass_ = False message = ('The columns in the dictionary and map are not in' ' the same order.') if record and pass_: self._update_log(command='validate', transform_type='pass', transformation=message) elif record and not pass_: self._update_log(command='validate', transform_type='fail', transformation=message) raise ValueError(message) elif not pass_: raise ValueError(message) def to_dataframe(self, clean=False, val_delim=' | ', code_delim='='): u"""Converts data dictionary to a pandas dataframe Parameters ---------- clean: bool, optional Returns a subset of columns for the data dictionary. When True, the data dicitonary will return the following columns: * `name` -- the name of the column * `description` -- the 80 character description * `type` -- the type of question (Continous, Question, Categorical, or Boolean) * `dtype` -- the datatype for the pandas column. * `order` -- the order of data for categorical objects or range of values for continous values * `units` -- units for continous values * `ambigious` -- values for ambigious results * `missing` -- values for missing values * `notes` -- any notes passed into the data dictionary object to be retained val_delim: str, optional The seperator between values in the "order" column. code_delim: str, optional The delimiter between a numericly coded categorical variable and the value it maps to. Returns ------- DataFrame A dataframe mapping the variable name to its description, question type, datatype, and order. Example ------- """ cols = [] for col in self.values(): ser_ = col._to_series() # if isinstance(col, Continous): ser_.rename({'limits': 'order'}, inplace=True) cols.append(ser_) df_ = pd.concat(axis=1, sort=False, objs=cols).T if ('var_labels' in df_.columns): df_.loc[df_['var_labels'].notna(), 'order'] = \ df_.loc[df_['var_labels'].notna(), 'var_labels'] df_.drop(columns=['var_labels'], inplace=True) if clean: cols = [c for c in self.default_cols if c in df_] df_ = df_[cols] df_.drop(columns=df_.columns[df_.isna().all(axis=0)], inplace=True) return df_.set_index('name') def to_pandas_stata(self): """ Generates strings and dictionary compatible with writing to stata Returns ------- str A stata-compatible dataset description for `pandas.write_stata` dictionary A stata-compatible description for each variable, compatible with `pandas.write_stata`. """ variable_desc = {k: v.description for k,v in self.items()} return self.description, variable_desc def to_ddi_xml(self): pass @classmethod def read_dataframe(cls, df_, description=None, var_delim=' | ', code_delim='=', null_value='None'): """Builds the data dictionary from a dataframe Parameters ---------- df_ : DataFrame A pandas dataframe version of the data dictionary where the data is indexed by `name` description: str A description of the data dictionary or study of no more than 80 characters. read_codes: bool, optional Whether columns should be read with a numerical delimiter (i.e "=") to parse a numeric value into a categorical one. For example, if numeric is true, then "0=female | 1=male" would be parsed that any data encoded as 0 maps to female, any data encoded as 1 maps to male, and the order of hte values is `[0, 1]` (corresponding to `['female', 'male']`). Otherwise, the line would be read literally, and the order is read as `["0=female", "1=male"]`. val_delim: str, optional The seperator between values in the "order" column. code_delim: str, optional The delimiter between a numericly coded categorical variable and the value it maps to. Returns ------- DataDictionary A data dictionary object with the newly described study. Examples -------- """ types = [] cols = [] if 'name' not in df_.columns: df_.reset_index(inplace=True) for name_, var_ in df_.iterrows(): # Describes the question type type_ = var_['type'] qclass = type_lookup.get(type_.lower(), Question) var_.drop('type', inplace=True) # Updates the column and type objects types.append(type_) cols.append(qclass._read_series(var_.dropna(), var_delim=var_delim, code_delim=code_delim, null_value=null_value)) return cls(columns=cols, types=types, description=description) # @classmethod def to_usgs_xml(self): """Converts the data dictionary to a usgs xlm format""" pass @classmethod def read_stata(cls, iter_, ): pass
bsd-2-clause
kewitz/mestrado
Eletromagnetismo Computacional II/MOM.carganofio.py
1
1063
# -*- coding: utf-8 -*- """ Created on Fri Aug 1 15:09:54 2014 @author: leo """ import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d # Defs irange = lambda a: zip(range(len(a)),a) pi = np.pi # Parâmetros L = np.float64(1.0) # Comprimento do fio em m. a = np.float64(1E-3) # Raio do condutor. Delta = np.float64(.01) # Delta. V0 = np.float64(1.0) Eps = np.float64(8.854E-12) # Funções l = lambda m, n: Delta/np.abs(y[m-1]-y[n-1]) if m != n else 2*np.log(Delta/a) g = lambda m: 4*pi*Eps*V0 # Domínios e constantes y = np.arange(0,L,Delta, dtype=np.float64) ns = y.size ms = ns L = np.matrix(np.zeros((ms,ns))) G = np.matrix(np.zeros((ns))) # Processamento for i in range(ms): m = np.float64(i+1.0) G[0,i] = g(m) # Monta o vetor de tensões for j in range(ns): n = np.float64(j+1.0) L[i,j] = l(m,n) # Monta a matriz de ? rho = np.linalg.solve(L,G.T) # Obtem o vetor 5de Permeabilidades Q = rho.sum() * Delta print "Carga total no condutor %.3eC" % Q # Plots plt.plot(y,rho)
mit
astroML/astroML
astroML/plotting/regression.py
2
3574
import numpy as np import matplotlib.pyplot as plt from scipy import optimize from astroML.linear_model import TLS_logL, LinearRegression # TLS: def get_m_b(beta): b = np.dot(beta, beta) / beta[1] m = -beta[0] / beta[1] return m, b def plot_regressions(ksi, eta, x, y, sigma_x, sigma_y, add_regression_lines=False, alpha_in=1, beta_in=0.5, basis='linear'): figure = plt.figure(figsize=(8, 6)) ax = figure.add_subplot(111) ax.scatter(x, y, alpha=0.5) ax.errorbar(x, y, xerr=sigma_x, yerr=sigma_y, alpha=0.3, ls='') ax.set_xlabel('x') ax.set_ylabel('y') x0 = np.linspace(np.min(x) - 0.5, np.max(x) + 0.5, 20) # True regression line if alpha_in is not None and beta_in is not None: if basis == 'linear': y0 = alpha_in + x0 * beta_in elif basis == 'poly': y0 = alpha_in + beta_in[0] * x0 + beta_in[1] * x0 * x0 + beta_in[2] * x0 * x0 * x0 ax.plot(x0, y0, color='black', label='True regression') else: y0 = None if add_regression_lines: for label, data, *target in [['fit no errors', x, y, 1], ['fit y errors only', x, y, sigma_y], ['fit x errors only', y, x, sigma_x]]: linreg = LinearRegression() linreg.fit(data[:, None], *target) if label == 'fit x errors only' and y0 is not None: x_fit = linreg.predict(y0[:, None]) ax.plot(x_fit, y0, label=label) else: y_fit = linreg.predict(x0[:, None]) ax.plot(x0, y_fit, label=label) # TLS X = np.vstack((x, y)).T dX = np.zeros((len(x), 2, 2)) dX[:, 0, 0] = sigma_x dX[:, 1, 1] = sigma_y def min_func(beta): return -TLS_logL(beta, X, dX) beta_fit = optimize.fmin(min_func, x0=[-1, 1]) m_fit, b_fit = get_m_b(beta_fit) x_fit = np.linspace(-10, 10, 20) ax.plot(x_fit, m_fit * x_fit + b_fit, label='TLS') ax.set_xlim(np.min(x)-0.5, np.max(x)+0.5) ax.set_ylim(np.min(y)-0.5, np.max(y)+0.5) ax.legend() def plot_regression_from_trace(fitted, observed, ax=None, chains=None, multidim_ind=None): traces = [fitted.trace, ] xi, yi, sigx, sigy = observed if multidim_ind is not None: xi = xi[multidim_ind] x = np.linspace(np.min(xi)-0.5, np.max(xi)+0.5, 50) for i, trace in enumerate(traces): if 'theta' in trace.varnames and 'slope' not in trace.varnames: trace.add_values({'slope': np.tan(trace['theta'])}) if multidim_ind is not None: trace_slope = trace['slope'][:, multidim_ind] else: trace_slope = trace['slope'][:, 0] if chains is not None: for chain in range(100, len(trace) * trace.nchains, chains): y = trace['inter'][chain] + trace_slope[chain] * x ax.plot(x, y, alpha=0.03, c='red') # plot the best-fit line only H2D, bins1, bins2 = np.histogram2d(trace_slope, trace['inter'], bins=50) w = np.where(H2D == H2D.max()) # choose the maximum posterior slope and intercept slope_best = bins1[w[0][0]] intercept_best = bins2[w[1][0]] print("beta:", slope_best, "alpha:", intercept_best) y = intercept_best + slope_best * x # y_pre = fitted.predict(x[:, None]) ax.plot(x, y, ':', label='fitted') ax.legend() break
bsd-2-clause
yuanagain/seniorthesis
venv/lib/python2.7/site-packages/matplotlib/tests/test_delaunay.py
7
7137
from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six from matplotlib.externals.six.moves import xrange import warnings import numpy as np from matplotlib.testing.decorators import image_comparison, knownfailureif from matplotlib.cbook import MatplotlibDeprecationWarning with warnings.catch_warnings(): # the module is deprecated. The tests should be removed when the module is. warnings.simplefilter('ignore', MatplotlibDeprecationWarning) from matplotlib.delaunay.triangulate import Triangulation from matplotlib import pyplot as plt import matplotlib as mpl def constant(x, y): return np.ones(x.shape, x.dtype) constant.title = 'Constant' def xramp(x, y): return x xramp.title = 'X Ramp' def yramp(x, y): return y yramp.title = 'Y Ramp' def exponential(x, y): x = x*9 y = y*9 x1 = x+1.0 x2 = x-2.0 x4 = x-4.0 x7 = x-7.0 y1 = x+1.0 y2 = y-2.0 y3 = y-3.0 y7 = y-7.0 f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) + 0.75 * np.exp(-x1*x1/49.0 - y1/10.0) + 0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) - 0.2 * np.exp(-x4*x4 -y7*y7)) return f exponential.title = 'Exponential and Some Gaussians' def cliff(x, y): f = np.tanh(9.0*(y-x) + 1.0)/9.0 return f cliff.title = 'Cliff' def saddle(x, y): f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2) return f saddle.title = 'Saddle' def gentle(x, y): f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0 return f gentle.title = 'Gentle Peak' def steep(x, y): f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0 return f steep.title = 'Steep Peak' def sphere(x, y): circle = 64-81*((x-0.5)**2 + (y-0.5)**2) f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0) return f sphere.title = 'Sphere' def trig(x, y): f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y) return f trig.title = 'Cosines and Sines' def gauss(x, y): x = 5.0-10.0*x y = 5.0-10.0*y g1 = np.exp(-x*x/2) g2 = np.exp(-y*y/2) f = g1 + 0.75*g2*(1 + g1) return f gauss.title = 'Gaussian Peak and Gaussian Ridges' def cloverleaf(x, y): ex = np.exp((10.0-20.0*x)/3.0) ey = np.exp((10.0-20.0*y)/3.0) logitx = 1.0/(1.0+ex) logity = 1.0/(1.0+ey) f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 * (ex-2.0*logitx)*(ey-2.0*logity)) return f cloverleaf.title = 'Cloverleaf' def cosine_peak(x, y): circle = np.hypot(80*x-40.0, 90*y-45.) f = np.exp(-0.04*circle) * np.cos(0.15*circle) return f cosine_peak.title = 'Cosine Peak' allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak] class LinearTester(object): name = 'Linear' def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250): self.xrange = xrange self.yrange = yrange self.nrange = nrange self.npoints = npoints rng = np.random.RandomState(1234567890) self.x = rng.uniform(xrange[0], xrange[1], size=npoints) self.y = rng.uniform(yrange[0], yrange[1], size=npoints) self.tri = Triangulation(self.x, self.y) def replace_data(self, dataset): self.x = dataset.x self.y = dataset.y self.tri = Triangulation(self.x, self.y) def interpolator(self, func): z = func(self.x, self.y) return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange) def plot(self, func, interp=True, plotter='imshow'): if interp: lpi = self.interpolator(func) z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange), self.xrange[0]:self.xrange[1]:complex(0,self.nrange)] else: y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange), self.xrange[0]:self.xrange[1]:complex(0,self.nrange)] z = func(x, y) z = np.where(np.isinf(z), 0.0, z) extent = (self.xrange[0], self.xrange[1], self.yrange[0], self.yrange[1]) fig = plt.figure() plt.hot() # Some like it hot if plotter == 'imshow': plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower') elif plotter == 'contour': Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange), self.xrange[0]:self.xrange[1]:complex(0,self.nrange)] plt.contour(np.ravel(X), np.ravel(Y), z, 20) x = self.x y = self.y lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j])) for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)]) ax = plt.gca() ax.add_collection(lc) if interp: title = '%s Interpolant' % self.name else: title = 'Reference' if hasattr(func, 'title'): plt.title('%s: %s' % (func.title, title)) else: plt.title(title) class NNTester(LinearTester): name = 'Natural Neighbors' def interpolator(self, func): z = func(self.x, self.y) return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange) def make_all_2d_testfuncs(allfuncs=allfuncs): def make_test(func): filenames = [ '%s-%s' % (func.__name__, x) for x in ['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']] # We only generate PNGs to save disk space -- we just assume # that any backend differences are caught by other tests. @image_comparison(filenames, extensions=['png'], freetype_version=('2.4.5', '2.4.9'), remove_text=True) def reference_test(): nnt.plot(func, interp=False, plotter='imshow') nnt.plot(func, interp=True, plotter='imshow') lpt.plot(func, interp=True, plotter='imshow') nnt.plot(func, interp=False, plotter='contour') nnt.plot(func, interp=True, plotter='contour') lpt.plot(func, interp=True, plotter='contour') tester = reference_test tester.__name__ = str('test_%s' % func.__name__) return tester nnt = NNTester(npoints=1000) lpt = LinearTester(npoints=1000) for func in allfuncs: globals()['test_%s' % func.__name__] = make_test(func) make_all_2d_testfuncs() # 1d and 0d grid tests ref_interpolator = Triangulation([0,10,10,0], [0,0,10,10]).linear_interpolator([1,10,5,2.0]) def test_1d_grid(): res = ref_interpolator[3:6:2j,1:1:1j] assert np.allclose(res, [[1.6],[1.9]], rtol=0) def test_0d_grid(): res = ref_interpolator[3:3:1j,1:1:1j] assert np.allclose(res, [[1.6]], rtol=0) @image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png']) def test_1d_plots(): x_range = slice(0.25,9.75,20j) x = np.mgrid[x_range] ax = plt.gca() for y in xrange(2,10,2): plt.plot(x, ref_interpolator[x_range,y:y:1j]) ax.set_xticks([]) ax.set_yticks([])
mit
kobejean/tensorflow
tensorflow/examples/get_started/regression/test.py
41
4037
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple smoke test that runs these examples for 1 training iteration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import pandas as pd from six.moves import StringIO import tensorflow.examples.get_started.regression.imports85 as imports85 sys.modules["imports85"] = imports85 # pylint: disable=g-bad-import-order,g-import-not-at-top import tensorflow.contrib.data as data import tensorflow.examples.get_started.regression.dnn_regression as dnn_regression import tensorflow.examples.get_started.regression.linear_regression as linear_regression import tensorflow.examples.get_started.regression.linear_regression_categorical as linear_regression_categorical import tensorflow.examples.get_started.regression.custom_regression as custom_regression from tensorflow.python.platform import googletest from tensorflow.python.platform import test # pylint: disable=g-bad-import-order,g-import-not-at-top # pylint: disable=line-too-long FOUR_LINES = "\n".join([ "1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500", "2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950", "2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450", "2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250",]) # pylint: enable=line-too-long def four_lines_dataframe(): text = StringIO(FOUR_LINES) return pd.read_csv(text, names=imports85.types.keys(), dtype=imports85.types, na_values="?") def four_lines_dataset(*args, **kwargs): del args, kwargs return data.Dataset.from_tensor_slices(FOUR_LINES.split("\n")) class RegressionTest(googletest.TestCase): """Test the regression examples in this directory.""" @test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset}) @test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)}) @test.mock.patch.dict(linear_regression.__dict__, {"STEPS": 1}) def test_linear_regression(self): linear_regression.main([""]) @test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset}) @test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)}) @test.mock.patch.dict(linear_regression_categorical.__dict__, {"STEPS": 1}) def test_linear_regression_categorical(self): linear_regression_categorical.main([""]) @test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset}) @test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)}) @test.mock.patch.dict(dnn_regression.__dict__, {"STEPS": 1}) def test_dnn_regression(self): dnn_regression.main([""]) @test.mock.patch.dict(data.__dict__, {"TextLineDataset": four_lines_dataset}) @test.mock.patch.dict(imports85.__dict__, {"_get_imports85": (lambda: None)}) @test.mock.patch.dict(custom_regression.__dict__, {"STEPS": 1}) def test_custom_regression(self): custom_regression.main([""]) if __name__ == "__main__": googletest.main()
apache-2.0
BhallaLab/moose-full
moose-examples/snippets/MULTI/midchan.py
2
13452
# midchan.py --- # Upi Bhalla, NCBS Bangalore 2014. # # Commentary: # # This loads in a medium-detail model incorporating # reac-diff and elec signaling in neurons. The reac-diff model # has just Ca and CaM in it, and there are no-cross-compartment # reactions though Ca diffuses everywhere. The elec model controls the # Ca levels in the chem compartments. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth # Floor, Boston, MA 02110-1301, USA. # # Code: import sys sys.path.append('../../python') import os os.environ['NUMPTHREADS'] = '1' import math import numpy import matplotlib.pyplot as plt import moose import proto18 EREST_ACT = -70e-3 def loadElec(): library = moose.Neutral( '/library' ) moose.setCwe( '/library' ) proto18.make_Ca() proto18.make_Ca_conc() proto18.make_K_AHP() proto18.make_K_C() proto18.make_Na() proto18.make_K_DR() proto18.make_K_A() proto18.make_glu() proto18.make_NMDA() proto18.make_Ca_NMDA() proto18.make_NMDA_Ca_conc() proto18.make_axon() moose.setCwe( '/library' ) model = moose.Neutral( '/model' ) cellId = moose.loadModel( 'ca1_asym.p', '/model/elec', "Neutral" ) return cellId def loadChem( diffLength ): chem = moose.Neutral( '/model/chem' ) neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' ) neuroCompt.separateSpines = 1 neuroCompt.geometryPolicy = 'cylinder' spineCompt = moose.SpineMesh( '/model/chem/compartment_1' ) moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' ) psdCompt = moose.PsdMesh( '/model/chem/compartment_2' ) #print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' ) modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' ) #modelId = moose.loadModel( 'psd_merged31d.g', '/model/chem', 'ee' ) neuroCompt.name = 'dend' spineCompt.name = 'spine' psdCompt.name = 'psd' def makeNeuroMeshModel(): diffLength = 10e-6 # Aim for 2 soma compartments. elec = loadElec() loadChem( diffLength ) neuroCompt = moose.element( '/model/chem/dend' ) neuroCompt.diffLength = diffLength neuroCompt.cellPortion( elec, '/model/elec/#' ) for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ): if (x.diffConst > 0): x.diffConst = 1e-11 for x in moose.wildcardFind( '/model/chem/##/Ca' ): x.diffConst = 1e-10 # Put in dend solvers ns = neuroCompt.numSegments ndc = neuroCompt.numDiffCompts print 'ns = ', ns, ', ndc = ', ndc assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num ) assert( ns == 36 ) # assert( ndc == 278 ) # nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' ) nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' ) nmstoich = moose.Stoich( '/model/chem/dend/stoich' ) nmstoich.compartment = neuroCompt nmstoich.ksolve = nmksolve nmstoich.dsolve = nmdsolve nmstoich.path = "/model/chem/dend/##" print 'done setting path, numPools = ', nmdsolve.numPools assert( nmdsolve.numPools == 1 ) assert( nmdsolve.numAllVoxels == ndc ) assert( nmstoich.numAllPools == 1 ) # oddly, numLocalFields does not work. ca = moose.element( '/model/chem/dend/DEND/Ca' ) assert( ca.numData == ndc ) # Put in spine solvers. Note that these get info from the neuroCompt spineCompt = moose.element( '/model/chem/spine' ) sdc = spineCompt.mesh.num print 'sdc = ', sdc assert( sdc == 13 ) smksolve = moose.Ksolve( '/model/chem/spine/ksolve' ) smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' ) smstoich = moose.Stoich( '/model/chem/spine/stoich' ) smstoich.compartment = spineCompt smstoich.ksolve = smksolve smstoich.dsolve = smdsolve smstoich.path = "/model/chem/spine/##" print 'spine num Pools = ', smstoich.numAllPools assert( smstoich.numAllPools == 3 ) assert( smdsolve.numPools == 3 ) assert( smdsolve.numAllVoxels == sdc ) # Put in PSD solvers. Note that these get info from the neuroCompt psdCompt = moose.element( '/model/chem/psd' ) pdc = psdCompt.mesh.num assert( pdc == 13 ) pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' ) pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' ) pmstoich = moose.Stoich( '/model/chem/psd/stoich' ) pmstoich.compartment = psdCompt pmstoich.ksolve = pmksolve pmstoich.dsolve = pmdsolve pmstoich.path = "/model/chem/psd/##" assert( pmstoich.numAllPools == 3 ) assert( pmdsolve.numPools == 3 ) assert( pmdsolve.numAllVoxels == pdc ) foo = moose.element( '/model/chem/psd/Ca' ) print 'PSD: numfoo = ', foo.numData print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels # Put in junctions between the diffusion solvers nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve ) """ CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' ) print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' ) print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume """ ################################################################## # set up adaptors aCa = moose.Adaptor( '/model/chem/spine/adaptCa', sdc ) adaptCa = moose.vec( '/model/chem/spine/adaptCa' ) chemCa = moose.vec( '/model/chem/spine/Ca' ) #print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa ) assert( len( adaptCa ) == sdc ) assert( len( chemCa ) == sdc ) for i in range( sdc ): elecCa = moose.element( '/model/elec/spine_head_14_' + str(i+1) + '/NMDA_Ca_conc' ) #print elecCa moose.connect( elecCa, 'concOut', adaptCa[i], 'input', 'Single' ) moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' ) adaptCa.inputOffset = 0.0 # adaptCa.outputOffset = 0.00008 # 80 nM offset in chem. adaptCa.scale = 1e-4 # 520 to 0.0052 mM #print adaptCa.outputOffset moose.le( '/model/chem/dend/DEND' ) compts = neuroCompt.elecComptList begin = neuroCompt.startVoxelInCompt end = neuroCompt.endVoxelInCompt aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', len( compts)) adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' ) chemCa = moose.vec( '/model/chem/dend/DEND/Ca' ) #print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa ) assert( len( chemCa ) == ndc ) for i in zip( compts, adaptCa, begin, end ): name = i[0].path + '/Ca_conc' if ( moose.exists( name ) ): elecCa = moose.element( name ) #print i[2], i[3], ' ', elecCa #print i[1] moose.connect( elecCa, 'concOut', i[1], 'input', 'Single' ) for j in range( i[2], i[3] ): moose.connect( i[1], 'output', chemCa[j], 'setConc', 'Single' ) adaptCa.inputOffset = 0.0 # adaptCa.outputOffset = 0.00008 # 80 nM offset in chem. adaptCa.scale = 20e-6 # 10 arb units to 2 uM. def addPlot( objpath, field, plot ): #assert moose.exists( objpath ) if moose.exists( objpath ): tab = moose.Table( '/graphs/' + plot ) obj = moose.element( objpath ) if obj.className == 'Neutral': print "addPlot failed: object is a Neutral: ", objpath return moose.element( '/' ) else: #print "object was found: ", objpath, obj.className moose.connect( tab, 'requestOut', obj, field ) return tab else: print "addPlot failed: object not found: ", objpath return moose.element( '/' ) def makeElecPlots(): graphs = moose.Neutral( '/graphs' ) elec = moose.Neutral( '/graphs/elec' ) addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' ) addPlot( '/model/elec/spine_head_14_4', 'getVm', 'elec/spineVm' ) addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'elec/somaCa' ) addPlot( '/model/elec/lat_11_2/Ca_conc', 'getCa', 'elec/lat11Ca' ) addPlot( '/model/elec/spine_head_14_4/NMDA_Ca_conc', 'getCa', 'elec/spine4Ca' ) addPlot( '/model/elec/spine_head_14_12/NMDA_Ca_conc', 'getCa', 'elec/spine12Ca' ) def makeChemPlots(): graphs = moose.Neutral( '/graphs' ) chem = moose.Neutral( '/graphs/chem' ) addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' ) addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' ) addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' ) addPlot( '/model/chem/spine/Ca[3]', 'getConc', 'chem/spine4Ca' ) addPlot( '/model/chem/spine/Ca[11]', 'getConc', 'chem/spine12Ca' ) addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' ) addPlot( '/model/chem/dend/DEND/Ca[20]', 'getConc', 'chem/dendCa20' ) def testNeuroMeshMultiscale(): elecDt = 50e-6 chemDt = 0.005 ePlotDt = 0.5e-3 cPlotDt = 0.005 plotName = 'nm.plot' makeNeuroMeshModel() print "after model is completely done" for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ): print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb makeChemPlots() makeElecPlots() moose.setClock( 0, elecDt ) moose.setClock( 1, elecDt ) moose.setClock( 2, elecDt ) moose.setClock( 4, chemDt ) moose.setClock( 5, chemDt ) moose.setClock( 6, chemDt ) moose.setClock( 7, cPlotDt ) moose.setClock( 8, ePlotDt ) moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' ) moose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' ) moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' ) moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process') #moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' ) #moose.useClock( 4, '/model/chem/##[ISA=Adaptor]', 'process' ) moose.useClock( 4, '/model/chem/#/dsolve', 'process' ) moose.useClock( 5, '/model/chem/#/ksolve', 'process' ) moose.useClock( 6, '/model/chem/spine/adaptCa', 'process' ) moose.useClock( 6, '/model/chem/dend/DEND/adaptCa', 'process' ) moose.useClock( 7, '/graphs/chem/#', 'process' ) moose.useClock( 8, '/graphs/elec/#', 'process' ) #hsolve = moose.HSolve( '/model/elec/hsolve' ) #moose.useClock( 1, '/model/elec/hsolve', 'process' ) #hsolve.dt = elecDt #hsolve.target = '/model/elec/compt' #moose.reinit() moose.element( '/model/elec/soma' ).inject = 2e-10 moose.element( '/model/chem/psd/Ca' ).concInit = 0.001 moose.element( '/model/chem/spine/Ca' ).concInit = 0.002 moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003 moose.reinit() moose.start( 0.25 ) # moose.element( '/model/elec/soma' ).inject = 0 # moose.start( 0.25 ) plt.ion() fig = plt.figure( figsize=(8,8) ) chem = fig.add_subplot( 311 ) chem.set_ylim( 0, 0.002 ) plt.ylabel( 'Conc (mM)' ) plt.xlabel( 'time (seconds)' ) for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ): pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt line1, = chem.plot( pos, x.vector, label=x.name ) plt.legend() elec = fig.add_subplot( 312 ) plt.ylabel( 'Vm (V)' ) plt.xlabel( 'time (seconds)' ) for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ): pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt line1, = elec.plot( pos, x.vector, label=x.name ) plt.legend() lenplot = fig.add_subplot( 313 ) plt.ylabel( 'Ca (mM )' ) plt.xlabel( 'Voxel#)' ) spineCa = moose.vec( '/model/chem/spine/Ca' ) dendCa = moose.vec( '/model/chem/dend/DEND/Ca' ) line1, = lenplot.plot( range( len( spineCa ) ), spineCa.conc, label='spine' ) line2, = lenplot.plot( range( len( dendCa ) ), dendCa.conc, label='dend' ) ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConc]') ] line3, = lenplot.plot( range( len( ca ) ), ca, label='elec' ) spineCaM = moose.vec( '/model/chem/spine/Ca_CaM' ) line4, = lenplot.plot( range( len( spineCaM ) ), spineCaM.conc, label='spineCaM' ) psdCaM = moose.vec( '/model/chem/psd/Ca_CaM' ) line5, = lenplot.plot( range( len( psdCaM ) ), psdCaM.conc, label='psdCaM' ) plt.legend() fig.canvas.draw() raw_input() ''' for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ): t = numpy.arange( 0, x.vector.size, 1 ) pylab.plot( t, x.vector, label=x.name ) pylab.legend() pylab.show() ''' print 'All done' def main(): testNeuroMeshMultiscale() if __name__ == '__main__': main() # # minimal.py ends here.
gpl-2.0
timqian/sms-tools
software/transformations_interface/hpsTransformations_function.py
23
6610
# function call to the transformation functions of relevance for the hpsModel import numpy as np import matplotlib.pyplot as plt from scipy.signal import get_window import sys, os sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/')) import hpsModel as HPS import hpsTransformations as HPST import harmonicTransformations as HT import utilFunctions as UF def analysis(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100, minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01, stocf=0.1): """ Analyze a sound with the harmonic plus stochastic model inputFile: input sound file (monophonic with sampling rate of 44100) window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris) M: analysis window size N: fft size (power of two, bigger or equal than M) t: magnitude threshold of spectral peaks minSineDur: minimum duration of sinusoidal tracks nH: maximum number of harmonics minf0: minimum fundamental frequency in sound maxf0: maximum fundamental frequency in sound f0et: maximum error accepted in f0 detection algorithm harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation stocf: decimation factor used for the stochastic approximation returns inputFile: input file name; fs: sampling rate of input file, hfreq, hmag: harmonic frequencies, magnitude; mYst: stochastic residual """ # size of fft used in synthesis Ns = 512 # hop size (has to be 1/4 of Ns) H = 128 # read input sound (fs, x) = UF.wavread(inputFile) # compute analysis window w = get_window(window, M) # compute the harmonic plus stochastic model of the whole sound hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf) # synthesize the harmonic plus stochastic model without original phases y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, np.array([]), mYst, Ns, H, fs) # write output sound outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel.wav' UF.wavwrite(y,fs, outputFile) # create figure to plot plt.figure(figsize=(12, 9)) # frequency range to plot maxplotfreq = 15000.0 # plot the input sound plt.subplot(3,1,1) plt.plot(np.arange(x.size)/float(fs), x) plt.axis([0, x.size/float(fs), min(x), max(x)]) plt.ylabel('amplitude') plt.xlabel('time (sec)') plt.title('input sound: x') # plot spectrogram stochastic compoment plt.subplot(3,1,2) numFrames = int(mYst[:,0].size) sizeEnv = int(mYst[0,:].size) frmTime = H*np.arange(numFrames)/float(fs) binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1])) plt.autoscale(tight=True) # plot harmonic on top of stochastic spectrogram if (hfreq.shape[1] > 0): harms = hfreq*np.less(hfreq,maxplotfreq) harms[harms==0] = np.nan numFrames = int(harms[:,0].size) frmTime = H*np.arange(numFrames)/float(fs) plt.plot(frmTime, harms, color='k', ms=3, alpha=1) plt.xlabel('time (sec)') plt.ylabel('frequency (Hz)') plt.autoscale(tight=True) plt.title('harmonics + stochastic spectrogram') # plot the output sound plt.subplot(3,1,3) plt.plot(np.arange(y.size)/float(fs), y) plt.axis([0, y.size/float(fs), min(y), max(y)]) plt.ylabel('amplitude') plt.xlabel('time (sec)') plt.title('output sound: y') plt.tight_layout() plt.show(block=False) return inputFile, fs, hfreq, hmag, mYst def transformation_synthesis(inputFile, fs, hfreq, hmag, mYst, freqScaling = np.array([0, 1.2, 2.01, 1.2, 2.679, .7, 3.146, .7]), freqStretching = np.array([0, 1, 2.01, 1, 2.679, 1.5, 3.146, 1.5]), timbrePreservation = 1, timeScaling = np.array([0, 0, 2.138, 2.138-1.0, 3.146, 3.146])): """ transform the analysis values returned by the analysis function and synthesize the sound inputFile: name of input file fs: sampling rate of input file hfreq, hmag: harmonic frequencies and magnitudes mYst: stochastic residual freqScaling: frequency scaling factors, in time-value pairs (value of 1 no scaling) freqStretching: frequency stretching factors, in time-value pairs (value of 1 no stretching) timbrePreservation: 1 preserves original timbre, 0 it does not timeScaling: time scaling factors, in time-value pairs """ # size of fft used in synthesis Ns = 512 # hop size (has to be 1/4 of Ns) H = 128 # frequency scaling of the harmonics hfreqt, hmagt = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs) # time scaling the sound yhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreqt, hmagt, mYst, timeScaling) # synthesis from the trasformed hps representation y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs) # write output sound outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModelTransformation.wav' UF.wavwrite(y,fs, outputFile) # create figure to plot plt.figure(figsize=(12, 6)) # frequency range to plot maxplotfreq = 15000.0 # plot spectrogram of transformed stochastic compoment plt.subplot(2,1,1) numFrames = int(ystocEnv[:,0].size) sizeEnv = int(ystocEnv[0,:].size) frmTime = H*np.arange(numFrames)/float(fs) binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1])) plt.autoscale(tight=True) # plot transformed harmonic on top of stochastic spectrogram if (yhfreq.shape[1] > 0): harms = yhfreq*np.less(yhfreq,maxplotfreq) harms[harms==0] = np.nan numFrames = int(harms[:,0].size) frmTime = H*np.arange(numFrames)/float(fs) plt.plot(frmTime, harms, color='k', ms=3, alpha=1) plt.xlabel('time (sec)') plt.ylabel('frequency (Hz)') plt.autoscale(tight=True) plt.title('harmonics + stochastic spectrogram') # plot the output sound plt.subplot(2,1,2) plt.plot(np.arange(y.size)/float(fs), y) plt.axis([0, y.size/float(fs), min(y), max(y)]) plt.ylabel('amplitude') plt.xlabel('time (sec)') plt.title('output sound: y') plt.tight_layout() plt.show() if __name__ == "__main__": # analysis inputFile, fs, hfreq, hmag, mYst = analysis() # transformation and synthesis transformation_synthesis(inputFile, fs, hfreq, hmag, mYst) plt.show()
agpl-3.0
adammenges/statsmodels
statsmodels/regression/linear_model.py
16
93645
# TODO: Determine which tests are valid for GLSAR, and under what conditions # TODO: Fix issue with constant and GLS # TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None # TODO: GLS: default if sigma is none should be two-step GLS # TODO: Check nesting when performing model based tests, lr, wald, lm """ This module implements standard regression models: Generalized Least Squares (GLS) Ordinary Least Squares (OLS) Weighted Least Squares (WLS) Generalized Least Squares with autoregressive error terms GLSAR(p) Models are specified with an endogenous response variable and an exogenous design matrix and are fit using their `fit` method. Subclasses that have more complicated covariance matrices should write over the 'whiten' method as the fit method prewhitens the response by calling 'whiten'. General reference for regression models: D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression Analysis." 2nd. Ed., Wiley, 1992. Econometrics references for regression models: R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford, 2004. W. Green. "Econometric Analysis," 5th ed., Pearson, 2003. """ from __future__ import print_function from statsmodels.compat.python import lrange, lzip, range __docformat__ = 'restructuredtext en' __all__ = ['GLS', 'WLS', 'OLS', 'GLSAR'] import numpy as np import pandas as pd from scipy.linalg import toeplitz from scipy import stats from scipy import optimize from statsmodels.compat.numpy import np_matrix_rank from statsmodels.tools.data import _is_using_pandas from statsmodels.tools.tools import add_constant, chain_dot, pinv_extended from statsmodels.tools.decorators import (resettable_cache, cache_readonly, cache_writable) import statsmodels.base.model as base import statsmodels.base.wrapper as wrap from statsmodels.emplike.elregress import _ELRegOpts import warnings from statsmodels.tools.sm_exceptions import InvalidTestWarning # need import in module instead of lazily to copy `__doc__` from . import _prediction as pred def _get_sigma(sigma, nobs): """ Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its Cholesky decomposition. Handles dimensions and checks integrity. If sigma is None, returns None, None. Otherwise returns sigma, cholsigmainv. """ if sigma is None: return None, None sigma = np.asarray(sigma).squeeze() if sigma.ndim == 0: sigma = np.repeat(sigma, nobs) if sigma.ndim == 1: if sigma.shape != (nobs,): raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d " "array of shape %s x %s" % (nobs, nobs, nobs)) cholsigmainv = 1/np.sqrt(sigma) else: if sigma.shape != (nobs, nobs): raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d " "array of shape %s x %s" % (nobs, nobs, nobs)) cholsigmainv = np.linalg.cholesky(np.linalg.pinv(sigma)).T return sigma, cholsigmainv class RegressionModel(base.LikelihoodModel): """ Base class for linear regression models. Should not be directly called. Intended for subclassing. """ def __init__(self, endog, exog, **kwargs): super(RegressionModel, self).__init__(endog, exog, **kwargs) self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights']) def initialize(self): self.wexog = self.whiten(self.exog) self.wendog = self.whiten(self.endog) # overwrite nobs from class Model: self.nobs = float(self.wexog.shape[0]) self._df_model = None self._df_resid = None self.rank = None @property def df_model(self): """ The model degree of freedom, defined as the rank of the regressor matrix minus 1 if a constant is included. """ if self._df_model is None: if self.rank is None: self.rank = np_matrix_rank(self.exog) self._df_model = float(self.rank - self.k_constant) return self._df_model @df_model.setter def df_model(self, value): self._df_model = value @property def df_resid(self): """ The residual degree of freedom, defined as the number of observations minus the rank of the regressor matrix. """ if self._df_resid is None: if self.rank is None: self.rank = np_matrix_rank(self.exog) self._df_resid = self.nobs - self.rank return self._df_resid @df_resid.setter def df_resid(self, value): self._df_resid = value def whiten(self, X): raise NotImplementedError("Subclasses should implement.") def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs): """ Full fit of the model. The results include an estimate of covariance matrix, (whitened) residuals and an estimate of scale. Parameters ---------- method : str, optional Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse to solve the least squares problem. "qr" uses the QR factorization. cov_type : str, optional See `regression.linear_model.RegressionResults` for a description of the available covariance estimators cov_kwds : list or None, optional See `linear_model.RegressionResults.get_robustcov_results` for a description required keywords for alternative covariance estimators use_t : bool, optional Flag indicating to use the Student's t distribution when computing p-values. Default behavior depends on cov_type. See `linear_model.RegressionResults.get_robustcov_results` for implementation details. Returns ------- A RegressionResults class instance. See Also --------- regression.linear_model.RegressionResults regression.linear_model.RegressionResults.get_robustcov_results Notes ----- The fit method uses the pseudoinverse of the design/exogenous variables to solve the least squares minimization. """ if method == "pinv": if ((not hasattr(self, 'pinv_wexog')) or (not hasattr(self, 'normalized_cov_params')) or (not hasattr(self, 'rank'))): self.pinv_wexog, singular_values = pinv_extended(self.wexog) self.normalized_cov_params = np.dot(self.pinv_wexog, np.transpose(self.pinv_wexog)) # Cache these singular values for use later. self.wexog_singular_values = singular_values self.rank = np_matrix_rank(np.diag(singular_values)) beta = np.dot(self.pinv_wexog, self.wendog) elif method == "qr": if ((not hasattr(self, 'exog_Q')) or (not hasattr(self, 'exog_R')) or (not hasattr(self, 'normalized_cov_params')) or (getattr(self, 'rank', None) is None)): Q, R = np.linalg.qr(self.wexog) self.exog_Q, self.exog_R = Q, R self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R)) # Cache singular values from R. self.wexog_singular_values = np.linalg.svd(R, 0, 0) self.rank = np_matrix_rank(R) else: Q, R = self.exog_Q, self.exog_R # used in ANOVA self.effects = effects = np.dot(Q.T, self.wendog) beta = np.linalg.solve(R, effects) if self._df_model is None: self._df_model = float(self.rank - self.k_constant) if self._df_resid is None: self.df_resid = self.nobs - self.rank if isinstance(self, OLS): lfit = OLSResults(self, beta, normalized_cov_params=self.normalized_cov_params, cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t) else: lfit = RegressionResults(self, beta, normalized_cov_params=self.normalized_cov_params, cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t, **kwargs) return RegressionResultsWrapper(lfit) def fit_regularized(self, method="coord_descent", maxiter=1000, alpha=0., L1_wt=1., start_params=None, cnvrg_tol=1e-8, zero_tol=1e-8, **kwargs): """ Return a regularized fit to a linear regression model. Parameters ---------- method : string Only the coordinate descent algorithm is implemented. maxiter : integer The maximum number of iteration cycles (an iteration cycle involves running coordinate descent on all variables). alpha : scalar or array-like The penalty weight. If a scalar, the same penalty weight applies to all variables in the model. If a vector, it must have the same length as `params`, and contains a penalty weight for each coefficient. L1_wt : scalar The fraction of the penalty given to the L1 penalty term. Must be between 0 and 1 (inclusive). If 0, the fit is ridge regression. If 1, the fit is the lasso. start_params : array-like Starting values for ``params``. cnvrg_tol : scalar If ``params`` changes by less than this amount (in sup-norm) in once iteration cycle, the algorithm terminates with convergence. zero_tol : scalar Any estimated coefficient smaller than this value is replaced with zero. Returns ------- A RegressionResults object, of the same type returned by ``fit``. Notes ----- The approach closely follows that implemented in the glmnet package in R. The penalty is the "elastic net" penalty, which is a convex combination of L1 and L2 penalties. The function that is minimized is: ..math:: 0.5*RSS/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1) where RSS is the usual regression sum of squares, n is the sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms. Post-estimation results are based on the same data used to select variables, hence may be subject to overfitting biases. References ---------- Friedman, Hastie, Tibshirani (2008). Regularization paths for generalized linear models via coordinate descent. Journal of Statistical Software 33(1), 1-22 Feb 2010. """ k_exog = self.wexog.shape[1] if np.isscalar(alpha): alpha = alpha * np.ones(k_exog, dtype=np.float64) # Below we work with RSS + penalty, so we need to rescale. alpha *= 2 * self.wexog.shape[0] if start_params is None: params = np.zeros(k_exog, dtype=np.float64) else: params = start_params.copy() converged = False xxprod = 2*(self.wexog**2).sum(0) # Coordinate descent for itr in range(maxiter): params_save = params.copy() for k in range(self.wexog.shape[1]): params[k] = 0. wendog_adj = self.wendog - np.dot(self.wexog, params) xyprod = 2*np.dot(self.wexog[:,k], wendog_adj) den = xxprod[k] + alpha[k] * (1 - L1_wt) a = alpha[k] * L1_wt if a >= np.abs(xyprod): params[k] = 0. elif xyprod > 0: params[k] = (xyprod - a) / den else: params[k] = (xyprod + a) / den # Check for convergence pchange = np.max(np.abs(params - params_save)) if pchange < cnvrg_tol: converged = True break # Set approximate zero coefficients to be exactly zero params *= np.abs(params) >= zero_tol # Fit the reduced model to get standard errors and other # post-estimation results. ii = np.flatnonzero(params) cov = np.zeros((k_exog, k_exog), dtype=np.float64) if len(ii) > 0: model = self.__class__(self.wendog, self.wexog[:,ii]) rslt = model.fit() cov[np.ix_(ii, ii)] = rslt.normalized_cov_params lfit = RegressionResults(self, params, normalized_cov_params=cov) lfit.converged = converged return RegressionResultsWrapper(lfit) def predict(self, params, exog=None): """ Return linear predicted values from a design matrix. Parameters ---------- params : array-like Parameters of a linear model exog : array-like, optional. Design / exogenous data. Model exog is used if None. Returns ------- An array of fitted values Notes ----- If the model has not yet been fit, params is not optional. """ #JP: this doesn't look correct for GLMAR #SS: it needs its own predict method if exog is None: exog = self.exog return np.dot(exog, params) class GLS(RegressionModel): __doc__ = """ Generalized least squares model with a general covariance structure. %(params)s sigma : scalar or array `sigma` is the weighting matrix of the covariance. The default is None for no scaling. If `sigma` is a scalar, it is assumed that `sigma` is an n x n diagonal matrix with the given scalar, `sigma` as the value of each diagonal element. If `sigma` is an n-length vector, then `sigma` is assumed to be a diagonal matrix with the given `sigma` on the diagonal. This should be the same as WLS. %(extra_params)s **Attributes** pinv_wexog : array `pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`. cholsimgainv : array The transpose of the Cholesky decomposition of the pseudoinverse. df_model : float p - 1, where p is the number of regressors including the intercept. of freedom. df_resid : float Number of observations n less the number of parameters p. llf : float The value of the likelihood function of the fitted model. nobs : float The number of observations n. normalized_cov_params : array p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}` results : RegressionResults instance A property that returns the RegressionResults class if fit. sigma : array `sigma` is the n x n covariance structure of the error terms. wexog : array Design matrix whitened by `cholsigmainv` wendog : array Response variable whitened by `cholsigmainv` Notes ----- If sigma is a function of the data making one of the regressors a constant, then the current postestimation statistics will not be correct. Examples -------- >>> import numpy as np >>> import statsmodels.api as sm >>> data = sm.datasets.longley.load() >>> data.exog = sm.add_constant(data.exog) >>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid >>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit() >>> rho = res_fit.params `rho` is a consistent estimator of the correlation of the residuals from an OLS fit of the longley data. It is assumed that this is the true rho of the AR process data. >>> from scipy.linalg import toeplitz >>> order = toeplitz(np.arange(16)) >>> sigma = rho**order `sigma` is an n x n matrix of the autocorrelation structure of the data. >>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma) >>> gls_results = gls_model.fit() >>> print(gls_results.summary())) """ % {'params' : base._model_params_doc, 'extra_params' : base._missing_param_doc + base._extra_param_doc} def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None, **kwargs): #TODO: add options igls, for iterative fgls if sigma is None #TODO: default if sigma is none should be two-step GLS sigma, cholsigmainv = _get_sigma(sigma, len(endog)) super(GLS, self).__init__(endog, exog, missing=missing, hasconst=hasconst, sigma=sigma, cholsigmainv=cholsigmainv, **kwargs) #store attribute names for data arrays self._data_attr.extend(['sigma', 'cholsigmainv']) def whiten(self, X): """ GLS whiten method. Parameters ----------- X : array-like Data to be whitened. Returns ------- np.dot(cholsigmainv,X) See Also -------- regression.GLS """ X = np.asarray(X) if self.sigma is None or self.sigma.shape == (): return X elif self.sigma.ndim == 1: if X.ndim == 1: return X * self.cholsigmainv else: return X * self.cholsigmainv[:, None] else: return np.dot(self.cholsigmainv, X) def loglike(self, params): """ Returns the value of the Gaussian log-likelihood function at params. Given the whitened design matrix, the log-likelihood is evaluated at the parameter vector `params` for the dependent variable `endog`. Parameters ---------- params : array-like The parameter estimates Returns ------- loglike : float The value of the log-likelihood function for a GLS Model. Notes ----- The log-likelihood function for the normal distribution is .. math:: -\\frac{n}{2}\\log\\left(\\left(Y-\\hat{Y}\\right)^{\\prime}\\left(Y-\\hat{Y}\\right)\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}\\log\\left(\\left|\\Sigma\\right|\\right) Y and Y-hat are whitened. """ #TODO: combine this with OLS/WLS loglike and add _det_sigma argument nobs2 = self.nobs / 2.0 SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0) llf = -np.log(SSR) * nobs2 # concentrated likelihood llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant if np.any(self.sigma): #FIXME: robust-enough check? unneeded if _det_sigma gets defined if self.sigma.ndim==2: det = np.linalg.slogdet(self.sigma) llf -= .5*det[1] else: llf -= 0.5*np.sum(np.log(self.sigma)) # with error covariance matrix return llf class WLS(RegressionModel): __doc__ = """ A regression model with diagonal but non-identity covariance structure. The weights are presumed to be (proportional to) the inverse of the variance of the observations. That is, if the variables are to be transformed by 1/sqrt(W) you must supply weights = 1/W. %(params)s weights : array-like, optional 1d array of weights. If you supply 1/W then the variables are pre- multiplied by 1/sqrt(W). If no weights are supplied the default value is 1 and WLS reults are the same as OLS. %(extra_params)s Attributes ---------- weights : array The stored weights supplied as an argument. See regression.GLS Examples --------- >>> import numpy as np >>> import statsmodels.api as sm >>> Y = [1,3,4,5,2,3,4] >>> X = range(1,8) >>> X = sm.add_constant(X) >>> wls_model = sm.WLS(Y,X, weights=list(range(1,8))) >>> results = wls_model.fit() >>> results.params array([ 2.91666667, 0.0952381 ]) >>> results.tvalues array([ 2.0652652 , 0.35684428]) >>> print(results.t_test([1, 0])) <T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5> >>> print(results.f_test([0, 1])) <F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1> Notes ----- If the weights are a function of the data, then the post estimation statistics such as fvalue and mse_model might not be correct, as the package does not yet support no-constant regression. """ % {'params' : base._model_params_doc, 'extra_params' : base._missing_param_doc + base._extra_param_doc} def __init__(self, endog, exog, weights=1., missing='none', hasconst=None, **kwargs): weights = np.array(weights) if weights.shape == (): if (missing == 'drop' and 'missing_idx' in kwargs and kwargs['missing_idx'] is not None): # patsy may have truncated endog weights = np.repeat(weights, len(kwargs['missing_idx'])) else: weights = np.repeat(weights, len(endog)) # handle case that endog might be of len == 1 if len(weights) == 1: weights = np.array([weights.squeeze()]) else: weights = weights.squeeze() super(WLS, self).__init__(endog, exog, missing=missing, weights=weights, hasconst=hasconst, **kwargs) nobs = self.exog.shape[0] weights = self.weights # Experimental normalization of weights weights = weights / np.sum(weights) * nobs if weights.size != nobs and weights.shape[0] != nobs: raise ValueError('Weights must be scalar or same length as design') def whiten(self, X): """ Whitener for WLS model, multiplies each column by sqrt(self.weights) Parameters ---------- X : array-like Data to be whitened Returns ------- sqrt(weights)*X """ #print(self.weights.var())) X = np.asarray(X) if X.ndim == 1: return X * np.sqrt(self.weights) elif X.ndim == 2: return np.sqrt(self.weights)[:, None]*X def loglike(self, params): """ Returns the value of the gaussian log-likelihood function at params. Given the whitened design matrix, the log-likelihood is evaluated at the parameter vector `params` for the dependent variable `Y`. Parameters ---------- params : array-like The parameter estimates. Returns ------- llf : float The value of the log-likelihood function for a WLS Model. Notes -------- .. math:: -\\frac{n}{2}\\log\\left(Y-\\hat{Y}\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}log\\left(\\left|W\\right|\\right) where :math:`W` is a diagonal matrix """ nobs2 = self.nobs / 2.0 SSR = np.sum((self.wendog - np.dot(self.wexog,params))**2, axis=0) llf = -np.log(SSR) * nobs2 # concentrated likelihood llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant llf += 0.5 * np.sum(np.log(self.weights)) return llf class OLS(WLS): __doc__ = """ A simple ordinary least squares model. %(params)s %(extra_params)s Attributes ---------- weights : scalar Has an attribute weights = array(1.0) due to inheritance from WLS. See Also -------- GLS Examples -------- >>> import numpy as np >>> >>> import statsmodels.api as sm >>> >>> Y = [1,3,4,5,2,3,4] >>> X = range(1,8) >>> X = sm.add_constant(X) >>> >>> model = sm.OLS(Y,X) >>> results = model.fit() >>> results.params array([ 2.14285714, 0.25 ]) >>> results.tvalues array([ 1.87867287, 0.98019606]) >>> print(results.t_test([1, 0]))) <T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5> >>> print(results.f_test(np.identity(2))) <F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2> Notes ----- No constant is added by the model unless you are using formulas. """ % {'params' : base._model_params_doc, 'extra_params' : base._missing_param_doc + base._extra_param_doc} #TODO: change example to use datasets. This was the point of datasets! def __init__(self, endog, exog=None, missing='none', hasconst=None, **kwargs): super(OLS, self).__init__(endog, exog, missing=missing, hasconst=hasconst, **kwargs) if "weights" in self._init_keys: self._init_keys.remove("weights") def loglike(self, params): """ The likelihood function for the clasical OLS model. Parameters ---------- params : array-like The coefficients with which to estimate the log-likelihood. Returns ------- The concentrated likelihood function evaluated at params. """ nobs2 = self.nobs / 2.0 return -nobs2*np.log(2*np.pi)-nobs2*np.log(1/(2*nobs2) *\ np.dot(np.transpose(self.endog - np.dot(self.exog, params)), (self.endog - np.dot(self.exog,params)))) -\ nobs2 def whiten(self, Y): """ OLS model whitener does nothing: returns Y. """ return Y class GLSAR(GLS): __doc__ = """ A regression model with an AR(p) covariance structure. %(params)s rho : int Order of the autoregressive covariance %(extra_params)s Examples -------- >>> import statsmodels.api as sm >>> X = range(1,8) >>> X = sm.add_constant(X) >>> Y = [1,3,4,5,8,10,9] >>> model = sm.GLSAR(Y, X, rho=2) >>> for i in range(6): ... results = model.fit() ... print("AR coefficients: {0}".format(model.rho)) ... rho, sigma = sm.regression.yule_walker(results.resid, ... order=model.order) ... model = sm.GLSAR(Y, X, rho) ... AR coefficients: [ 0. 0.] AR coefficients: [-0.52571491 -0.84496178] AR coefficients: [-0.6104153 -0.86656458] AR coefficients: [-0.60439494 -0.857867 ] AR coefficients: [-0.6048218 -0.85846157] AR coefficients: [-0.60479146 -0.85841922] >>> results.params array([-0.66661205, 1.60850853]) >>> results.tvalues array([ -2.10304127, 21.8047269 ]) >>> print(results.t_test([1, 0])) <T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3> >>> print(results.f_test(np.identity(2))) <F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2> Or, equivalently >>> model2 = sm.GLSAR(Y, X, rho=2) >>> res = model2.iterative_fit(maxiter=6) >>> model2.rho array([-0.60479146, -0.85841922]) Notes ----- GLSAR is considered to be experimental. The linear autoregressive process of order p--AR(p)--is defined as: TODO """ % {'params' : base._model_params_doc, 'extra_params' : base._missing_param_doc + base._extra_param_doc} def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs): #this looks strange, interpreting rho as order if it is int if isinstance(rho, np.int): self.order = rho self.rho = np.zeros(self.order, np.float64) else: self.rho = np.squeeze(np.asarray(rho)) if len(self.rho.shape) not in [0,1]: raise ValueError("AR parameters must be a scalar or a vector") if self.rho.shape == (): self.rho.shape = (1,) self.order = self.rho.shape[0] if exog is None: #JP this looks wrong, should be a regression on constant #results for rho estimate now identical to yule-walker on y #super(AR, self).__init__(endog, add_constant(endog)) super(GLSAR, self).__init__(endog, np.ones((endog.shape[0],1)), missing=missing, **kwargs) else: super(GLSAR, self).__init__(endog, exog, missing=missing, **kwargs) def iterative_fit(self, maxiter=3, rtol=1e-4, **kwds): """ Perform an iterative two-stage procedure to estimate a GLS model. The model is assumed to have AR(p) errors, AR(p) parameters and regression coefficients are estimated iteratively. Parameters ---------- maxiter : integer, optional the number of iterations rtol : float, optional Relative tolerance between estimated coefficients to stop the estimation. Stops if max(abs(last - current) / abs(last)) < rtol """ # TODO: update this after going through example. converged = False i = -1 # need to initialize for maxiter < 1 (skip loop) history = {'params': [], 'rho':[self.rho]} for i in range(maxiter - 1): if hasattr(self, 'pinv_wexog'): del self.pinv_wexog self.initialize() results = self.fit() history['params'].append(results.params) if i == 0: last = results.params else: diff = np.max(np.abs(last - results.params) / np.abs(last)) if diff < rtol: converged = True break last = results.params self.rho, _ = yule_walker(results.resid, order=self.order, df=None) history['rho'].append(self.rho) # why not another call to self.initialize # Use kwarg to insert history if not converged and maxiter > 0: # maxiter <= 0 just does OLS if hasattr(self, 'pinv_wexog'): del self.pinv_wexog self.initialize() # if converged then this is a duplicate fit, because we didn't update rho results = self.fit(history=history, **kwds) results.iter = i + 1 # add last fit to history, not if duplicate fit if not converged: results.history['params'].append(results.params) results.iter += 1 results.converged = converged return results def whiten(self, X): """ Whiten a series of columns according to an AR(p) covariance structure. This drops initial p observations. Parameters ---------- X : array-like The data to be whitened, Returns ------- whitened array """ #TODO: notation for AR process X = np.asarray(X, np.float64) _X = X.copy() #the following loops over the first axis, works for 1d and nd for i in range(self.order): _X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)] return _X[self.order:] def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True): """ Estimate AR(p) parameters from a sequence X using Yule-Walker equation. Unbiased or maximum-likelihood estimator (mle) See, for example: http://en.wikipedia.org/wiki/Autoregressive_moving_average_model Parameters ---------- X : array-like 1d array order : integer, optional The order of the autoregressive process. Default is 1. method : string, optional Method can be "unbiased" or "mle" and this determines denominator in estimate of autocorrelation function (ACF) at lag k. If "mle", the denominator is n=X.shape[0], if "unbiased" the denominator is n-k. The default is unbiased. df : integer, optional Specifies the degrees of freedom. If `df` is supplied, then it is assumed the X has `df` degrees of freedom rather than `n`. Default is None. inv : bool If inv is True the inverse of R is also returned. Default is False. demean : bool True, the mean is subtracted from `X` before estimation. Returns ------- rho The autoregressive coefficients sigma TODO Examples -------- >>> import statsmodels.api as sm >>> from statsmodels.datasets.sunspots import load >>> data = load() >>> rho, sigma = sm.regression.yule_walker(data.endog, order=4, method="mle") >>> rho array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365]) >>> sigma 16.808022730464351 """ #TODO: define R better, look back at notes and technical notes on YW. #First link here is useful #http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm method = str(method).lower() if method not in ["unbiased", "mle"]: raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'") X = np.array(X, dtype=np.float64) if demean: X -= X.mean() # automatically demean's X n = df or X.shape[0] if method == "unbiased": # this is df_resid ie., n - p denom = lambda k: n - k else: denom = lambda k: n if X.ndim > 1 and X.shape[1] != 1: raise ValueError("expecting a vector to estimate AR parameters") r = np.zeros(order+1, np.float64) r[0] = (X**2).sum() / denom(0) for k in range(1,order+1): r[k] = (X[0:-k]*X[k:]).sum() / denom(k) R = toeplitz(r[:-1]) rho = np.linalg.solve(R, r[1:]) sigmasq = r[0] - (r[1:]*rho).sum() if inv==True: return rho, np.sqrt(sigmasq), np.linalg.inv(R) else: return rho, np.sqrt(sigmasq) class RegressionResults(base.LikelihoodModelResults): """ This class summarizes the fit of a linear regression model. It handles the output of contrasts, estimates of covariance, etc. Returns ------- **Attributes** aic Aikake's information criteria. For a model with a constant :math:`-2llf + 2(df_model + 1)`. For a model without a constant :math:`-2llf + 2(df_model)`. bic Bayes' information criteria For a model with a constant :math:`-2llf + \log(n)(df_model+1)`. For a model without a constant :math:`-2llf + \log(n)(df_model)` bse The standard errors of the parameter estimates. pinv_wexog See specific model class docstring centered_tss The total (weighted) sum of squares centered about the mean. cov_HC0 Heteroscedasticity robust covariance matrix. See HC0_se below. cov_HC1 Heteroscedasticity robust covariance matrix. See HC1_se below. cov_HC2 Heteroscedasticity robust covariance matrix. See HC2_se below. cov_HC3 Heteroscedasticity robust covariance matrix. See HC3_se below. cov_type Parameter covariance estimator used for standard errors and t-stats df_model Model degress of freedom. The number of regressors `p`. Does not include the constant if one is present df_resid Residual degrees of freedom. `n - p - 1`, if a constant is present. `n - p` if a constant is not included. ess Explained sum of squares. If a constant is present, the centered total sum of squares minus the sum of squared residuals. If there is no constant, the uncentered total sum of squares is used. fvalue F-statistic of the fully specified model. Calculated as the mean squared error of the model divided by the mean squared error of the residuals. f_pvalue p-value of the F-statistic fittedvalues The predicted the values for the original (unwhitened) design. het_scale adjusted squared residuals for heteroscedasticity robust standard errors. Is only available after `HC#_se` or `cov_HC#` is called. See HC#_se for more information. history Estimation history for iterative estimators HC0_se White's (1980) heteroskedasticity robust standard errors. Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1) where e_i = resid[i] HC0_se is a cached property. When HC0_se or cov_HC0 is called the RegressionResults instance will then have another attribute `het_scale`, which is in this case is just resid**2. HC1_se MacKinnon and White's (1985) alternative heteroskedasticity robust standard errors. Defined as sqrt(diag(n/(n-p)*HC_0) HC1_see is a cached property. When HC1_se or cov_HC1 is called the RegressionResults instance will then have another attribute `het_scale`, which is in this case is n/(n-p)*resid**2. HC2_se MacKinnon and White's (1985) alternative heteroskedasticity robust standard errors. Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1) where h_ii = x_i(X.T X)^(-1)x_i.T HC2_see is a cached property. When HC2_se or cov_HC2 is called the RegressionResults instance will then have another attribute `het_scale`, which is in this case is resid^(2)/(1-h_ii). HC3_se MacKinnon and White's (1985) alternative heteroskedasticity robust standard errors. Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1) where h_ii = x_i(X.T X)^(-1)x_i.T HC3_see is a cached property. When HC3_se or cov_HC3 is called the RegressionResults instance will then have another attribute `het_scale`, which is in this case is resid^(2)/(1-h_ii)^(2). model A pointer to the model instance that called fit() or results. mse_model Mean squared error the model. This is the explained sum of squares divided by the model degrees of freedom. mse_resid Mean squared error of the residuals. The sum of squared residuals divided by the residual degrees of freedom. mse_total Total mean squared error. Defined as the uncentered total sum of squares divided by n the number of observations. nobs Number of observations n. normalized_cov_params See specific model class docstring params The linear coefficients that minimize the least squares criterion. This is usually called Beta for the classical linear model. pvalues The two-tailed p values for the t-stats of the params. resid The residuals of the model. resid_pearson `wresid` normalized to have unit variance. rsquared R-squared of a model with an intercept. This is defined here as 1 - `ssr`/`centered_tss` if the constant is included in the model and 1 - `ssr`/`uncentered_tss` if the constant is omitted. rsquared_adj Adjusted R-squared. This is defined here as 1 - (`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no constant is included. scale A scale factor for the covariance matrix. Default value is ssr/(n-p). Note that the square root of `scale` is often called the standard error of the regression. ssr Sum of squared (whitened) residuals. uncentered_tss Uncentered sum of squares. Sum of the squared values of the (whitened) endogenous response variable. wresid The residuals of the transformed/whitened regressand and regressor(s) """ _cache = {} # needs to be a class attribute for scale setter? def __init__(self, model, params, normalized_cov_params=None, scale=1., cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs): super(RegressionResults, self).__init__(model, params, normalized_cov_params, scale) self._cache = resettable_cache() if hasattr(model, 'wexog_singular_values'): self._wexog_singular_values = model.wexog_singular_values else: self._wexog_singular_values = None self.df_model = model.df_model self.df_resid = model.df_resid if cov_type == 'nonrobust': self.cov_type = 'nonrobust' self.cov_kwds = {'description' : 'Standard Errors assume that the ' + 'covariance matrix of the errors is correctly ' + 'specified.'} if use_t is None: self.use_t = True # TODO: class default else: if cov_kwds is None: cov_kwds = {} if 'use_t' in cov_kwds: # TODO: we want to get rid of 'use_t' in cov_kwds use_t_2 = cov_kwds.pop('use_t') if use_t is None: use_t = use_t_2 # TODO: warn or not? self.get_robustcov_results(cov_type=cov_type, use_self=True, use_t=use_t, **cov_kwds) for key in kwargs: setattr(self, key, kwargs[key]) def __str__(self): self.summary() def conf_int(self, alpha=.05, cols=None): """ Returns the confidence interval of the fitted parameters. Parameters ---------- alpha : float, optional The `alpha` level for the confidence interval. ie., The default `alpha` = .05 returns a 95% confidence interval. cols : array-like, optional `cols` specifies which confidence intervals to return Notes ----- The confidence interval is based on Student's t-distribution. """ # keep method for docstring for now ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols) return ci @cache_readonly def nobs(self): return float(self.model.wexog.shape[0]) @cache_readonly def fittedvalues(self): return self.model.predict(self.params, self.model.exog) @cache_readonly def wresid(self): return self.model.wendog - self.model.predict(self.params, self.model.wexog) @cache_readonly def resid(self): return self.model.endog - self.model.predict(self.params, self.model.exog) #TODO: fix writable example @cache_writable() def scale(self): wresid = self.wresid return np.dot(wresid, wresid) / self.df_resid @cache_readonly def ssr(self): wresid = self.wresid return np.dot(wresid, wresid) @cache_readonly def centered_tss(self): model = self.model weights = getattr(model, 'weights', None) if weights is not None: return np.sum(weights*(model.endog - np.average(model.endog, weights=weights))**2) else: # this is probably broken for GLS centered_endog = model.wendog - model.wendog.mean() return np.dot(centered_endog, centered_endog) @cache_readonly def uncentered_tss(self): wendog = self.model.wendog return np.dot(wendog, wendog) @cache_readonly def ess(self): if self.k_constant: return self.centered_tss - self.ssr else: return self.uncentered_tss - self.ssr @cache_readonly def rsquared(self): if self.k_constant: return 1 - self.ssr/self.centered_tss else: return 1 - self.ssr/self.uncentered_tss @cache_readonly def rsquared_adj(self): return 1 - np.divide(self.nobs - self.k_constant, self.df_resid) * (1 - self.rsquared) @cache_readonly def mse_model(self): return self.ess/self.df_model @cache_readonly def mse_resid(self): return self.ssr/self.df_resid @cache_readonly def mse_total(self): if self.k_constant: return self.centered_tss / (self.df_resid + self.df_model) else: return self.uncentered_tss / (self.df_resid + self.df_model) @cache_readonly def fvalue(self): if hasattr(self, 'cov_type') and self.cov_type != 'nonrobust': # with heteroscedasticity or correlation robustness k_params = self.normalized_cov_params.shape[0] mat = np.eye(k_params) const_idx = self.model.data.const_idx # TODO: What if model includes implcit constant, e.g. all dummies but no constant regressor? # TODO: Restats as LM test by projecting orthogonalizing to constant? if self.model.data.k_constant == 1: # if constant is implicit, return nan see #2444 if const_idx is None: return np.nan idx = lrange(k_params) idx.pop(const_idx) mat = mat[idx] # remove constant ft = self.f_test(mat) # using backdoor to set another attribute that we already have self._cache['f_pvalue'] = ft.pvalue return ft.fvalue else: # for standard homoscedastic case return self.mse_model/self.mse_resid @cache_readonly def f_pvalue(self): return stats.f.sf(self.fvalue, self.df_model, self.df_resid) @cache_readonly def bse(self): return np.sqrt(np.diag(self.cov_params())) @cache_readonly def aic(self): return -2 * self.llf + 2 * (self.df_model + self.k_constant) @cache_readonly def bic(self): return (-2 * self.llf + np.log(self.nobs) * (self.df_model + self.k_constant)) @cache_readonly def eigenvals(self): """ Return eigenvalues sorted in decreasing order. """ if self._wexog_singular_values is not None: eigvals = self._wexog_singular_values ** 2 else: eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T, self.model.wexog)) return np.sort(eigvals)[::-1] @cache_readonly def condition_number(self): """ Return condition number of exogenous matrix. Calculated as ratio of largest to smallest eigenvalue. """ eigvals = self.eigenvals return np.sqrt(eigvals[0]/eigvals[-1]) #TODO: make these properties reset bse def _HCCM(self, scale): H = np.dot(self.model.pinv_wexog, scale[:,None]*self.model.pinv_wexog.T) return H @cache_readonly def cov_HC0(self): """ See statsmodels.RegressionResults """ self.het_scale = self.wresid**2 cov_HC0 = self._HCCM(self.het_scale) return cov_HC0 @cache_readonly def cov_HC1(self): """ See statsmodels.RegressionResults """ self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2) cov_HC1 = self._HCCM(self.het_scale) return cov_HC1 @cache_readonly def cov_HC2(self): """ See statsmodels.RegressionResults """ # probably could be optimized h = np.diag(chain_dot(self.model.wexog, self.normalized_cov_params, self.model.wexog.T)) self.het_scale = self.wresid**2/(1-h) cov_HC2 = self._HCCM(self.het_scale) return cov_HC2 @cache_readonly def cov_HC3(self): """ See statsmodels.RegressionResults """ h = np.diag(chain_dot(self.model.wexog, self.normalized_cov_params, self.model.wexog.T)) self.het_scale=(self.wresid/(1-h))**2 cov_HC3 = self._HCCM(self.het_scale) return cov_HC3 @cache_readonly def HC0_se(self): """ See statsmodels.RegressionResults """ return np.sqrt(np.diag(self.cov_HC0)) @cache_readonly def HC1_se(self): """ See statsmodels.RegressionResults """ return np.sqrt(np.diag(self.cov_HC1)) @cache_readonly def HC2_se(self): """ See statsmodels.RegressionResults """ return np.sqrt(np.diag(self.cov_HC2)) @cache_readonly def HC3_se(self): """ See statsmodels.RegressionResults """ return np.sqrt(np.diag(self.cov_HC3)) @cache_readonly def resid_pearson(self): """ Residuals, normalized to have unit variance. Returns ------- An array wresid/sqrt(scale) """ if not hasattr(self, 'resid'): raise ValueError('Method requires residuals.') eps = np.finfo(self.wresid.dtype).eps if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean(): # don't divide if scale is zero close to numerical precision from warnings import warn warn("All residuals are 0, cannot compute normed residuals.", RuntimeWarning) return self.wresid else: return self.wresid / np.sqrt(self.scale) def _is_nested(self, restricted): """ Parameters ---------- restricted : Result instance The restricted model is assumed to be nested in the current model. The result instance of the restricted model is required to have two attributes, residual sum of squares, `ssr`, residual degrees of freedom, `df_resid`. Returns ------- nested : bool True if nested, otherwise false Notes ----- A most nests another model if the regressors in the smaller model are spanned by the regressors in the larger model and the regressand is identical. """ if self.model.nobs != restricted.model.nobs: return False full_rank = self.model.rank restricted_rank = restricted.model.rank if full_rank <= restricted_rank: return False restricted_exog = restricted.model.wexog full_wresid = self.wresid scores = restricted_exog * full_wresid[:,None] score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2)) # TODO: Could be improved, and may fail depending on scale of regressors return np.allclose(score_l2,0) def compare_lm_test(self, restricted, demean=True, use_lr=False): """Use Lagrange Multiplier test to test whether restricted model is correct Parameters ---------- restricted : Result instance The restricted model is assumed to be nested in the current model. The result instance of the restricted model is required to have two attributes, residual sum of squares, `ssr`, residual degrees of freedom, `df_resid`. demean : bool Flag indicating whether the demean the scores based on the residuals from the restricted model. If True, the covariance of the scores are used and the LM test is identical to the large sample version of the LR test. Returns ------- lm_value : float test statistic, chi2 distributed p_value : float p-value of the test statistic df_diff : int degrees of freedom of the restriction, i.e. difference in df between models Notes ----- TODO: explain LM text """ import statsmodels.stats.sandwich_covariance as sw from numpy.linalg import inv if not self._is_nested(restricted): raise ValueError("Restricted model is not nested by full model.") wresid = restricted.wresid wexog = self.model.wexog scores = wexog * wresid[:,None] n = self.nobs df_full = self.df_resid df_restr = restricted.df_resid df_diff = (df_restr - df_full) s = scores.mean(axis=0) if use_lr: scores = wexog * self.wresid[:,None] demean = False if demean: scores = scores - scores.mean(0)[None,:] # Form matters here. If homoskedastics can be sigma^2 (X'X)^-1 # If Heteroskedastic then the form below is fine # If HAC then need to use HAC # If Cluster, shoudl use cluster cov_type = getattr(self, 'cov_type', 'nonrobust') if cov_type == 'nonrobust': sigma2 = np.mean(wresid**2) XpX = np.dot(wexog.T,wexog) / n Sinv = inv(sigma2 * XpX) elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'): Sinv = inv(np.dot(scores.T,scores) / n) elif cov_type == 'HAC': print("HAC") maxlags = self.cov_kwds['maxlags'] Sinv = inv(sw.S_hac_simple(scores, maxlags) / n) elif cov_type == 'cluster': #cluster robust standard errors groups = self.cov_kwds['groups'] # TODO: Might need demean option in S_crosssection by group? Sinv = inv(sw.S_crosssection(scores, groups)) else: raise ValueError('Only nonrobust, HC, HAC and cluster are ' + 'currently connected') lm_value = n * chain_dot(s,Sinv,s.T) p_value = stats.chi2.sf(lm_value, df_diff) return lm_value, p_value, df_diff def compare_f_test(self, restricted): """use F test to test whether restricted model is correct Parameters ---------- restricted : Result instance The restricted model is assumed to be nested in the current model. The result instance of the restricted model is required to have two attributes, residual sum of squares, `ssr`, residual degrees of freedom, `df_resid`. Returns ------- f_value : float test statistic, F distributed p_value : float p-value of the test statistic df_diff : int degrees of freedom of the restriction, i.e. difference in df between models Notes ----- See mailing list discussion October 17, This test compares the residual sum of squares of the two models. This is not a valid test, if there is unspecified heteroscedasticity or correlation. This method will issue a warning if this is detected but still return the results under the assumption of homoscedasticity and no autocorrelation (sphericity). """ has_robust1 = getattr(self, 'cov_type', 'nonrobust') != 'nonrobust' has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') != 'nonrobust') if has_robust1 or has_robust2: warnings.warn('F test for comparison is likely invalid with ' + 'robust covariance, proceeding anyway', InvalidTestWarning) ssr_full = self.ssr ssr_restr = restricted.ssr df_full = self.df_resid df_restr = restricted.df_resid df_diff = (df_restr - df_full) f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full p_value = stats.f.sf(f_value, df_diff, df_full) return f_value, p_value, df_diff def compare_lr_test(self, restricted, large_sample=False): """ Likelihood ratio test to test whether restricted model is correct Parameters ---------- restricted : Result instance The restricted model is assumed to be nested in the current model. The result instance of the restricted model is required to have two attributes, residual sum of squares, `ssr`, residual degrees of freedom, `df_resid`. large_sample : bool Flag indicating whether to use a heteroskedasticity robust version of the LR test, which is a modified LM test. Returns ------- lr_stat : float likelihood ratio, chisquare distributed with df_diff degrees of freedom p_value : float p-value of the test statistic df_diff : int degrees of freedom of the restriction, i.e. difference in df between models Notes ----- The exact likelihood ratio is valid for homoskedastic data, and is defined as .. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}} {\\mathcal{L}_{alternative}}\\right) where :math:`\mathcal{L}` is the likelihood of the model. With :math:`D` distributed as chisquare with df equal to difference in number of parameters or equivalently difference in residual degrees of freedom. The large sample version of the likelihood ratio is defined as .. math:: D=n s^{\\prime}S^{-1}s where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}` .. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null} is the average score of the model evaluated using the residuals from null model and the regressors from the alternative model and :math:`S` is the covariance of the scores, :math:`s_{i}`. The covariance of the scores is estimated using the same estimator as in the alternative model. This test compares the loglikelihood of the two models. This may not be a valid test, if there is unspecified heteroscedasticity or correlation. This method will issue a warning if this is detected but still return the results without taking unspecified heteroscedasticity or correlation into account. This test compares the loglikelihood of the two models. This may not be a valid test, if there is unspecified heteroscedasticity or correlation. This method will issue a warning if this is detected but still return the results without taking unspecified heteroscedasticity or correlation into account. is the average score of the model evaluated using the residuals from null model and the regressors from the alternative model and :math:`S` is the covariance of the scores, :math:`s_{i}`. The covariance of the scores is estimated using the same estimator as in the alternative model. TODO: put into separate function, needs tests """ # See mailing list discussion October 17, if large_sample: return self.compare_lm_test(restricted, use_lr=True) has_robust1 = (getattr(self, 'cov_type', 'nonrobust') != 'nonrobust') has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust') != 'nonrobust') if has_robust1 or has_robust2: warnings.warn('Likelihood Ratio test is likely invalid with ' + 'robust covariance, proceeding anyway', InvalidTestWarning) llf_full = self.llf llf_restr = restricted.llf df_full = self.df_resid df_restr = restricted.df_resid lrdf = (df_restr - df_full) lrstat = -2*(llf_restr - llf_full) lr_pvalue = stats.chi2.sf(lrstat, lrdf) return lrstat, lr_pvalue, lrdf def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds): """create new results instance with robust covariance as default Parameters ---------- cov_type : string the type of robust sandwich estimator to use. see Notes below use_t : bool If true, then the t distribution is used for inference. If false, then the normal distribution is used. If `use_t` is None, then an appropriate default is used, which is `true` if the cov_type is nonrobust, and `false` in all other cases. kwds : depends on cov_type Required or optional arguments for robust covariance calculation. see Notes below Returns ------- results : results instance This method creates a new results instance with the requested robust covariance as the default covariance of the parameters. Inferential statistics like p-values and hypothesis tests will be based on this covariance matrix. Notes ----- The following covariance types and required or optional arguments are currently available: - 'fixed scale' and optional keyword argument 'scale' which uses a predefined scale estimate with default equal to one. - 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments: heteroscedasticity robust covariance - 'HAC' and keywords - `maxlag` integer (required) : number of lags to use - `kernel` string (optional) : kernel, default is Bartlett - `use_correction` bool (optional) : If true, use small sample correction - 'cluster' and required keyword `groups`, integer group indicator - `groups` array_like, integer (required) : index of clusters or groups - `use_correction` bool (optional) : If True the sandwich covariance is calulated with a small sample correction. If False the the sandwich covariance is calulated without small sample correction. - `df_correction` bool (optional) If True (default), then the degrees of freedom for the inferential statistics and hypothesis tests, such as pvalues, f_pvalue, conf_int, and t_test and f_test, are based on the number of groups minus one instead of the total number of observations minus the number of explanatory variables. `df_resid` of the results instance is adjusted. If False, then `df_resid` of the results instance is not adjusted. - 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and autocorrelation robust standard errors in panel data keywords - `time` array_like (required) : index of time periods - `maxlag` integer (required) : number of lags to use - `kernel` string (optional) : kernel, default is Bartlett - `use_correction` False or string in ['hac', 'cluster'] (optional) : If False the the sandwich covariance is calulated without small sample correction. If `use_correction = 'cluster'` (default), then the same small sample correction as in the case of 'covtype='cluster'' is used. - `df_correction` bool (optional) adjustment to df_resid, see cov_type 'cluster' above #TODO: we need more options here - 'hac-panel' heteroscedasticity and autocorrelation robust standard errors in panel data. The data needs to be sorted in this case, the time series for each panel unit or cluster need to be stacked. keywords - `time` array_like (required) : index of time periods - `maxlag` integer (required) : number of lags to use - `kernel` string (optional) : kernel, default is Bartlett - `use_correction` False or string in ['hac', 'cluster'] (optional) : If False the the sandwich covariance is calulated without small sample correction. - `df_correction` bool (optional) adjustment to df_resid, see cov_type 'cluster' above #TODO: we need more options here Reminder: `use_correction` in "nw-groupsum" and "nw-panel" is not bool, needs to be in [False, 'hac', 'cluster'] TODO: Currently there is no check for extra or misspelled keywords, except in the case of cov_type `HCx` """ import statsmodels.stats.sandwich_covariance as sw # TODO: make separate function that returns a robust cov plus info use_self = kwds.pop('use_self', False) if use_self: res = self else: res = self.__class__(self.model, self.params, normalized_cov_params=self.normalized_cov_params, scale=self.scale) res.cov_type = cov_type # use_t might already be defined by the class, and already set if use_t is None: use_t = self.use_t res.cov_kwds = {'use_t':use_t} # store for information res.use_t = use_t adjust_df = False if cov_type in ['cluster', 'nw-panel', 'nw-groupsum']: df_correction = kwds.get('df_correction', None) # TODO: check also use_correction, do I need all combinations? if df_correction is not False: # i.e. in [None, True]: # user didn't explicitely set it to False adjust_df = True res.cov_kwds['adjust_df'] = adjust_df # verify and set kwds, and calculate cov # TODO: this should be outsourced in a function so we can reuse it in # other models # TODO: make it DRYer repeated code for checking kwds if cov_type in ['fixed scale', 'fixed_scale']: res.cov_kwds['description'] = ('Standard Errors are based on ' + 'fixed scale') res.cov_kwds['scale'] = scale = kwds.get('scale', 1.) res.cov_params_default = scale * res.normalized_cov_params elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'): if kwds: raise ValueError('heteroscedasticity robust covarians ' + 'does not use keywords') res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' + 'robust ' + '(' + cov_type + ')') # TODO cannot access cov without calling se first getattr(self, cov_type.upper() + '_se') res.cov_params_default = getattr(self, 'cov_' + cov_type.upper()) elif cov_type == 'HAC': maxlags = kwds['maxlags'] # required?, default in cov_hac_simple res.cov_kwds['maxlags'] = maxlags use_correction = kwds.get('use_correction', False) res.cov_kwds['use_correction'] = use_correction res.cov_kwds['description'] = ('Standard Errors are heteroscedasticity ' + 'and autocorrelation robust (HAC) using %d lags and %s small ' + 'sample correction') % (maxlags, ['without', 'with'][use_correction]) res.cov_params_default = sw.cov_hac_simple(self, nlags=maxlags, use_correction=use_correction) elif cov_type == 'cluster': #cluster robust standard errors, one- or two-way groups = kwds['groups'] if not hasattr(groups, 'shape'): groups = np.asarray(groups).T if groups.ndim >= 2: groups = groups.squeeze() res.cov_kwds['groups'] = groups use_correction = kwds.get('use_correction', True) res.cov_kwds['use_correction'] = use_correction if groups.ndim == 1: if adjust_df: # need to find number of groups # duplicate work self.n_groups = n_groups = len(np.unique(groups)) res.cov_params_default = sw.cov_cluster(self, groups, use_correction=use_correction) elif groups.ndim == 2: if hasattr(groups, 'values'): groups = groups.values if adjust_df: # need to find number of groups # duplicate work n_groups0 = len(np.unique(groups[:,0])) n_groups1 = len(np.unique(groups[:, 1])) self.n_groups = (n_groups0, n_groups1) n_groups = min(n_groups0, n_groups1) # use for adjust_df # Note: sw.cov_cluster_2groups has 3 returns res.cov_params_default = sw.cov_cluster_2groups(self, groups, use_correction=use_correction)[0] else: raise ValueError('only two groups are supported') res.cov_kwds['description'] = ('Standard Errors are robust to' + 'cluster correlation ' + '(' + cov_type + ')') elif cov_type == 'nw-panel': #cluster robust standard errors res.cov_kwds['time'] = time = kwds['time'] #TODO: nlags is currently required #nlags = kwds.get('nlags', True) #res.cov_kwds['nlags'] = nlags #TODO: `nlags` or `maxlags` res.cov_kwds['maxlags'] = maxlags = kwds['maxlags'] use_correction = kwds.get('use_correction', 'hac') res.cov_kwds['use_correction'] = use_correction weights_func = kwds.get('weights_func', sw.weights_bartlett) res.cov_kwds['weights_func'] = weights_func # TODO: clumsy time index in cov_nw_panel tt = (np.nonzero(np.diff(time) < 0)[0] + 1).tolist() groupidx = lzip([0] + tt, tt + [len(time)]) self.n_groups = n_groups = len(groupidx) res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx, weights_func=weights_func, use_correction=use_correction) res.cov_kwds['description'] = ('Standard Errors are robust to' + 'cluster correlation ' + '(' + cov_type + ')') elif cov_type == 'nw-groupsum': # Driscoll-Kraay standard errors res.cov_kwds['time'] = time = kwds['time'] #TODO: nlags is currently required #nlags = kwds.get('nlags', True) #res.cov_kwds['nlags'] = nlags #TODO: `nlags` or `maxlags` res.cov_kwds['maxlags'] = maxlags = kwds['maxlags'] use_correction = kwds.get('use_correction', 'cluster') res.cov_kwds['use_correction'] = use_correction weights_func = kwds.get('weights_func', sw.weights_bartlett) res.cov_kwds['weights_func'] = weights_func if adjust_df: # need to find number of groups tt = (np.nonzero(np.diff(time) < 0)[0] + 1) self.n_groups = n_groups = len(tt) + 1 res.cov_params_default = sw.cov_nw_groupsum(self, maxlags, time, weights_func=weights_func, use_correction=use_correction) res.cov_kwds['description'] = ( 'Driscoll and Kraay Standard Errors are robust to ' + 'cluster correlation ' + '(' + cov_type + ')') else: raise ValueError('cov_type not recognized. See docstring for ' + 'available options and spelling') if adjust_df: # Note: df_resid is used for scale and others, add new attribute res.df_resid_inference = n_groups - 1 return res def get_prediction(self, exog=None, transform=True, weights=None, row_labels=None, **kwds): return pred.get_prediction(self, exog=exog, transform=transform, weights=weights, row_labels=row_labels, **kwds) get_prediction.__doc__ = pred.get_prediction.__doc__ def summary(self, yname=None, xname=None, title=None, alpha=.05): """Summarize the Regression Results Parameters ----------- yname : string, optional Default is `y` xname : list of strings, optional Default is `var_##` for ## in p the number of regressors title : string, optional Title for the top table. If not None, then this replaces the default title alpha : float significance level for the confidence intervals Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary.Summary : class to hold summary results """ #TODO: import where we need it (for now), add as cached attributes from statsmodels.stats.stattools import (jarque_bera, omni_normtest, durbin_watson) jb, jbpv, skew, kurtosis = jarque_bera(self.wresid) omni, omnipv = omni_normtest(self.wresid) eigvals = self.eigenvals condno = self.condition_number self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis, omni=omni, omnipv=omnipv, condno=condno, mineigval=eigvals[-1]) #TODO not used yet #diagn_left_header = ['Models stats'] #diagn_right_header = ['Residual stats'] #TODO: requiring list/iterable is a bit annoying #need more control over formatting #TODO: default don't work if it's not identically spelled top_left = [('Dep. Variable:', None), ('Model:', None), ('Method:', ['Least Squares']), ('Date:', None), ('Time:', None), ('No. Observations:', None), ('Df Residuals:', None), #[self.df_resid]), #TODO: spelling ('Df Model:', None), #[self.df_model]) ] if hasattr(self, 'cov_type'): top_left.append(('Covariance Type:', [self.cov_type])) top_right = [('R-squared:', ["%#8.3f" % self.rsquared]), ('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]), ('F-statistic:', ["%#8.4g" % self.fvalue] ), ('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]), ('Log-Likelihood:', None), #["%#6.4g" % self.llf]), ('AIC:', ["%#8.4g" % self.aic]), ('BIC:', ["%#8.4g" % self.bic]) ] diagn_left = [('Omnibus:', ["%#6.3f" % omni]), ('Prob(Omnibus):', ["%#6.3f" % omnipv]), ('Skew:', ["%#6.3f" % skew]), ('Kurtosis:', ["%#6.3f" % kurtosis]) ] diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]), ('Jarque-Bera (JB):', ["%#8.3f" % jb]), ('Prob(JB):', ["%#8.3g" % jbpv]), ('Cond. No.', ["%#8.3g" % condno]) ] if title is None: title = self.model.__class__.__name__ + ' ' + "Regression Results" #create summary table instance from statsmodels.iolib.summary import Summary smry = Summary() smry.add_table_2cols(self, gleft=top_left, gright=top_right, yname=yname, xname=xname, title=title) smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha, use_t=self.use_t) smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right, yname=yname, xname=xname, title="") #add warnings/notes, added to text format only etext =[] if hasattr(self, 'cov_type'): etext.append(self.cov_kwds['description']) if self.model.exog.shape[0] < self.model.exog.shape[1]: wstr = "The input rank is higher than the number of observations." etext.append(wstr) if eigvals[-1] < 1e-10: wstr = "The smallest eigenvalue is %6.3g. This might indicate " wstr += "that there are\n" wstr += "strong multicollinearity problems or that the design " wstr += "matrix is singular." wstr = wstr % eigvals[-1] etext.append(wstr) elif condno > 1000: #TODO: what is recommended wstr = "The condition number is large, %6.3g. This might " wstr += "indicate that there are\n" wstr += "strong multicollinearity or other numerical " wstr += "problems." wstr = wstr % condno etext.append(wstr) if etext: etext = ["[{0}] {1}".format(i + 1, text) for i, text in enumerate(etext)] etext.insert(0, "Warnings:") smry.add_extra_txt(etext) return smry #top = summary_top(self, gleft=topleft, gright=diagn_left, #[], # yname=yname, xname=xname, # title=self.model.__class__.__name__ + ' ' + # "Regression Results") #par = summary_params(self, yname=yname, xname=xname, alpha=.05, # use_t=False) # #diagn = summary_top(self, gleft=diagn_left, gright=diagn_right, # yname=yname, xname=xname, # title="Linear Model") # #return summary_return([top, par, diagn], return_fmt=return_fmt) def summary2(self, yname=None, xname=None, title=None, alpha=.05, float_format="%.4f"): """Experimental summary function to summarize the regression results Parameters ----------- xname : List of strings of length equal to the number of parameters Names of the independent variables (optional) yname : string Name of the dependent variable (optional) title : string, optional Title for the top table. If not None, then this replaces the default title alpha : float significance level for the confidence intervals float_format: string print format for floats in parameters summary Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. See Also -------- statsmodels.iolib.summary.Summary : class to hold summary results """ # Diagnostics from statsmodels.stats.stattools import (jarque_bera, omni_normtest, durbin_watson) from statsmodels.compat.collections import OrderedDict jb, jbpv, skew, kurtosis = jarque_bera(self.wresid) omni, omnipv = omni_normtest(self.wresid) dw = durbin_watson(self.wresid) eigvals = self.eigenvals condno = self.condition_number eigvals = np.sort(eigvals) #in increasing order diagnostic = OrderedDict([ ('Omnibus:', "%.3f" % omni), ('Prob(Omnibus):', "%.3f" % omnipv), ('Skew:', "%.3f" % skew), ('Kurtosis:', "%.3f" % kurtosis), ('Durbin-Watson:', "%.3f" % dw), ('Jarque-Bera (JB):', "%.3f" % jb), ('Prob(JB):', "%.3f" % jbpv), ('Condition No.:', "%.0f" % condno) ]) # Summary from statsmodels.iolib import summary2 smry = summary2.Summary() smry.add_base(results=self, alpha=alpha, float_format=float_format, xname=xname, yname=yname, title=title) smry.add_dict(diagnostic) # Warnings if eigvals[-1] < 1e-10: warn = "The smallest eigenvalue is %6.3g. This might indicate that\ there are strong multicollinearity problems or that the design\ matrix is singular." % eigvals[-1] smry.add_text(warn) if condno > 1000: warn = "* The condition number is large (%.g). This might indicate \ strong multicollinearity or other numerical problems." % condno smry.add_text(warn) return smry class OLSResults(RegressionResults): """ Results class for for an OLS model. Most of the methods and attributes are inherited from RegressionResults. The special methods that are only available for OLS are: - get_influence - outlier_test - el_test - conf_int_el See Also -------- RegressionResults """ def get_influence(self): """ get an instance of Influence with influence and outlier measures Returns ------- infl : Influence instance the instance has methods to calculate the main influence and outlier measures for the OLS regression See also -------- :class:`statsmodels.stats.outliers_influence.OLSInfluence` """ from statsmodels.stats.outliers_influence import OLSInfluence return OLSInfluence(self) def outlier_test(self, method='bonf', alpha=.05): """ Test observations for outliers according to method Parameters ---------- method : str - `bonferroni` : one-step correction - `sidak` : one-step correction - `holm-sidak` : - `holm` : - `simes-hochberg` : - `hommel` : - `fdr_bh` : Benjamini/Hochberg - `fdr_by` : Benjamini/Yekutieli See `statsmodels.stats.multitest.multipletests` for details. alpha : float familywise error rate Returns ------- table : ndarray or DataFrame Returns either an ndarray or a DataFrame if labels is not None. Will attempt to get labels from model_results if available. The columns are the Studentized residuals, the unadjusted p-value, and the corrected p-value according to method. Notes ----- The unadjusted p-value is stats.t.sf(abs(resid), df) where df = df_resid - 1. """ from statsmodels.stats.outliers_influence import outlier_test return outlier_test(self, method, alpha) def el_test(self, b0_vals, param_nums, return_weights=0, ret_params=0, method='nm', stochastic_exog=1, return_params=0): """ Tests single or joint hypotheses of the regression parameters using Empirical Likelihood. Parameters ---------- b0_vals : 1darray The hypothesized value of the parameter to be tested param_nums : 1darray The parameter number to be tested print_weights : bool If true, returns the weights that optimize the likelihood ratio at b0_vals. Default is False ret_params : bool If true, returns the parameter vector that maximizes the likelihood ratio at b0_vals. Also returns the weights. Default is False method : string Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The optimization method that optimizes over nuisance parameters. Default is 'nm' stochastic_exog : bool When TRUE, the exogenous variables are assumed to be stochastic. When the regressors are nonstochastic, moment conditions are placed on the exogenous variables. Confidence intervals for stochastic regressors are at least as large as non-stochastic regressors. Default = TRUE Returns ------- res : tuple The p-value and -2 times the log-likelihood ratio for the hypothesized values. Examples -------- >>> import statsmodels.api as sm >>> data = sm.datasets.stackloss.load() >>> endog = data.endog >>> exog = sm.add_constant(data.exog) >>> model = sm.OLS(endog, exog) >>> fitted = model.fit() >>> fitted.params >>> array([-39.91967442, 0.7156402 , 1.29528612, -0.15212252]) >>> fitted.rsquared >>> 0.91357690446068196 >>> # Test that the slope on the first variable is 0 >>> fitted.test_beta([0], [1]) >>> (1.7894660442330235e-07, 27.248146353709153) """ params = np.copy(self.params) opt_fun_inst = _ELRegOpts() # to store weights if len(param_nums) == len(params): llr = opt_fun_inst._opt_nuis_regress([], param_nums=param_nums, endog=self.model.endog, exog=self.model.exog, nobs=self.model.nobs, nvar=self.model.exog.shape[1], params=params, b0_vals=b0_vals, stochastic_exog=stochastic_exog) pval = 1 - stats.chi2.cdf(llr, len(param_nums)) if return_weights: return llr, pval, opt_fun_inst.new_weights else: return llr, pval x0 = np.delete(params, param_nums) args = (param_nums, self.model.endog, self.model.exog, self.model.nobs, self.model.exog.shape[1], params, b0_vals, stochastic_exog) if method == 'nm': llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0, maxfun=10000, maxiter=10000, full_output=1, disp=0, args=args)[1] if method == 'powell': llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0, full_output=1, disp=0, args=args)[1] pval = 1 - stats.chi2.cdf(llr, len(param_nums)) if ret_params: return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params elif return_weights: return llr, pval, opt_fun_inst.new_weights else: return llr, pval def conf_int_el(self, param_num, sig=.05, upper_bound=None, lower_bound=None, method='nm', stochastic_exog=1): """ Computes the confidence interval for the parameter given by param_num using Empirical Likelihood Parameters ---------- param_num : float The parameter for which the confidence interval is desired sig : float The significance level. Default is .05 upper_bound : float The maximum value the upper limit can be. Default is the 99.9% confidence value under OLS assumptions. lower_bound : float The minimum value the lower limit can be. Default is the 99.9% confidence value under OLS assumptions. method : string Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The optimization method that optimizes over nuisance parameters. Default is 'nm' Returns ------- ci : tuple The confidence interval See Also -------- el_test Notes ----- This function uses brentq to find the value of beta where test_beta([beta], param_num)[1] is equal to the critical value. The function returns the results of each iteration of brentq at each value of beta. The current function value of the last printed optimization should be the critical value at the desired significance level. For alpha=.05, the value is 3.841459. To ensure optimization terminated successfully, it is suggested to do el_test([lower_limit], [param_num]) If the optimization does not terminate successfully, consider switching optimization algorithms. If optimization is still not successful, try changing the values of start_int_params. If the current function value repeatedly jumps from a number between 0 and the critical value and a very large number (>50), the starting parameters of the interior minimization need to be changed. """ r0 = stats.chi2.ppf(1 - sig, 1) if upper_bound is None: upper_bound = self.conf_int(.01)[param_num][1] if lower_bound is None: lower_bound = self.conf_int(.01)[param_num][0] f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]), method=method, stochastic_exog=stochastic_exog)[0]-r0 lowerl = optimize.brenth(f, lower_bound, self.params[param_num]) upperl = optimize.brenth(f, self.params[param_num], upper_bound) # ^ Seems to be faster than brentq in most cases return (lowerl, upperl) class RegressionResultsWrapper(wrap.ResultsWrapper): _attrs = { 'chisq' : 'columns', 'sresid' : 'rows', 'weights' : 'rows', 'wresid' : 'rows', 'bcov_unscaled' : 'cov', 'bcov_scaled' : 'cov', 'HC0_se' : 'columns', 'HC1_se' : 'columns', 'HC2_se' : 'columns', 'HC3_se' : 'columns', 'norm_resid' : 'rows', } _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs, _attrs) _methods = {} _wrap_methods = wrap.union_dicts( base.LikelihoodResultsWrapper._wrap_methods, _methods) wrap.populate_wrapper(RegressionResultsWrapper, RegressionResults) if __name__ == "__main__": import statsmodels.api as sm data = sm.datasets.longley.load() data.exog = add_constant(data.exog, prepend=False) ols_results = OLS(data.endog, data.exog).fit() #results gls_results = GLS(data.endog, data.exog).fit() #results print(ols_results.summary()) tables = ols_results.summary(returns='tables') csv = ols_results.summary(returns='csv') """ Summary of Regression Results ======================================= | Dependent Variable: ['y']| | Model: OLS| | Method: Least Squares| | Date: Tue, 29 Jun 2010| | Time: 22:32:21| | # obs: 16.0| | Df residuals: 9.0| | Df model: 6.0| =========================================================================== | coefficient std. error t-statistic prob.| --------------------------------------------------------------------------- | x1 15.0619 84.9149 0.1774 0.8631| | x2 -0.0358 0.0335 -1.0695 0.3127| | x3 -2.0202 0.4884 -4.1364 0.002535| | x4 -1.0332 0.2143 -4.8220 0.0009444| | x5 -0.0511 0.2261 -0.2261 0.8262| | x6 1829.1515 455.4785 4.0159 0.003037| | const -3482258.6346 890420.3836 -3.9108 0.003560| =========================================================================== | Models stats Residual stats | --------------------------------------------------------------------------- | R-squared: 0.995479 Durbin-Watson: 2.55949 | | Adjusted R-squared: 0.992465 Omnibus: 0.748615 | | F-statistic: 330.285 Prob(Omnibus): 0.687765 | | Prob (F-statistic): 4.98403e-10 JB: 0.352773 | | Log likelihood: -109.617 Prob(JB): 0.838294 | | AIC criterion: 233.235 Skew: 0.419984 | | BIC criterion: 238.643 Kurtosis: 2.43373 | --------------------------------------------------------------------------- """
bsd-3-clause
Lucas-Armand/genetic-algorithm
dev/8ºSemana/testes of speed.py
5
3255
# -*- coding: utf-8 -*- import os import csv import random import numpy as np import timeit import time as Time import matplotlib.pyplot as plt import matplotlib.animation as animation from itertools import product, combinations class Block: def __init__(self,point,a,b,c,weight,btype): self.p=point self.a=a self.b=b self.c=c self.w=weight self.t=btype def csv_read(name): #Metodo de leitura, transforma um arquivo CSV em um vetor CSV=open(name,'r') dados=CSV.read() dados=dados.replace(',','.') dados=dados.replace(';',',') CSV.close() CSV=open("temp.csv",'w') CSV.write(dados) CSV.close() CSV=open("temp.csv",'r') dados=csv.reader(CSV) v=[] for i in dados: I=[] for j in i: try: j = float(j) except: pass I.append(j) v.append(I) CSV.close() os.remove("temp.csv") return (v) def defineGeometry(name): vect = csv_read(name) blockNumber ={} for i in vect: a = i[1] b = i[2] c = i[3] point = [i[4],i[5],i[6]] weight = i[7] btype = i[-1] block = Block(point,a,b,c,weight,btype) blockNumber[i[0]] = block return blockNumber bNumb=defineGeometry('GeometriaNavio.csv') # Define vicinity #deck vicinity={1:[2]} for i in range(2,16): vicinity[i] = [i-1,i+1] vicinity[16] = [15] #side vicinity[17] = [18,19] vicinity[18] = [17,20] for i in range(19,31): v = i-1 if i%2==0 else i+1 vicinity[i] = [v,i-2,i+2] vicinity[31] = [29,32] vicinity[32] = [30,31] #bott vicinity[33] = [34,35] vicinity[34] = [33,36] for i in range(35,63): v = i-1 if i%2==0 else i+1 vicinity[i] = [v,i-2,i+2] vicinity[63] = [61,64] vicinity[64] = [63,62] #coff vicinity[65] = [66] for i in range(66,70): vicinity[i] = [i-1,i+1] vicinity[70] = [69] alfa = 10 beta = 1 built = [] time = 0 append = built.append def order(x): return vicinity[x] def time(bNumb,vicinity,chromo): t_time = Time.time() alfa = 1 built = [] time = 0 append = built.append def time_vector(x,y): for i in y: if i in built: time = alfa break try:time except: time = 10*alfa append(x) return time vic = [vicinity[x] for x in chromo] time = sum((time_vector(x,y) for x,y in zip(chromo,vic))) return time chromo = [44, 39, 56, 47, 49, 37, 42, 46, 51, 58, 60, 62, 52, 41, 35, 33, 50, 61, 54, 34, 59, 43, 48, 45, 55, 53, 38, 57, 64, 67, 68, 63, 40, 36, 21, 66, 22, 6, 20, 65, 18, 5, 17, 69, 28, 27, 70, 29, 1, 12, 30, 13, 14, 26, 31, 24, 19, 2, 3, 4, 25, 11, 32, 10, 15, 16, 9, 23, 7, 8] import cProfile cProfile.run('time(bNumb,vicinity,chromo)') ## ##print timeit.timeit(setup='from __main__ import chromo;'+ ## 'from __main__ import bNumb;'+ ## 'from __main__ import time;'+ ## 'from __main__ import vicinity ' ## ,stmt='time(bNumb,vicinity,chromo)') #print t.timeit(number = 1000000)
gpl-3.0
jchodera/assaytools
scripts/xml2png4scans.py
2
5177
# This script takes xml data file output from the Tecan Infinite m1000 Pro plate reader # and makes quick and dirty images of the raw data. #But with scans and not just singlet reads. # The same procedure can be used to make matrices suitable for analysis using # matrix = dataframe.values # Made by Sonya Hanson, with some help from things that worked in xml2png.py # Friday, June 20,2014 # Usage: python xml2png4scans.py *.xml ############ For future to combine with xml2png.py # # for i, sect in enumerate(Sections): # reads = sect.xpath("*/Well") # parameters = root.xpath(path)[0] # if reads[0].attrib['Type'] == "Scan": # ############## import numpy as np import matplotlib.pyplot as plt from lxml import etree import pandas as pd import matplotlib.cm as cm import seaborn import sys import os # Define extract function that extracts parameters def extract(taglist): result = [] for p in taglist: print "Attempting to extract tag '%s'..." % p try: param = parameters.xpath("*[@Name='" + p + "']")[0] result.append( p + '=' + param.attrib['Value']) except: # tag not found result.append(None) return result def process_files(xml_files): so_many = len(xml_files) print "****This script is about to make png files for %s xml files. ****" % so_many for file in xml_files: # Parse XML file. root = etree.parse(file) # Remove extension from xml filename. file_name = os.path.splitext(file)[0] # Extract plate type and barcode. plate = root.xpath("/*/Header/Parameters/Parameter[@Name='Plate']")[0] plate_type = plate.attrib['Value'] bar = root.xpath("/*/Plate/BC")[0] barcode = bar.text # Define Sections. Sections = root.xpath("/*/Section") much = len(Sections) print "****The xml file " + file + " has %s data sections:****" % much for sect in Sections: print sect.attrib['Name'] data = [] for i, sect in enumerate(Sections): # Extract Parameters for this section. path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters" parameters = root.xpath(path)[0] # Parameters are extracted slightly differently depending on Absorbance or Fluorescence read. if parameters[0].attrib['Value'] == "Absorbance": result = extract(["Mode", "Wavelength Start", "Wavelength End", "Wavelength Step Size"]) title = '%s, %s, %s, %s' % tuple(result) else: result = extract(["Gain", "Excitation Wavelength", "Emission Wavelength", "Part of Plate", "Mode"]) title = '%s, %s, %s, \n %s, %s' % tuple(result) print "****The %sth section has the parameters:****" %i print title # Extract Reads for this section. Sections = root.xpath("/*/Section") reads = root.xpath("/*/*/*/Well") wellIDs = [read.attrib['Pos'] for read in reads] data = [(float(s.text), float(s.attrib['WL']), r.attrib['Pos']) for r in reads for s in r] dataframe = pd.DataFrame(data, columns=['fluorescence','wavelength (nm)','Well']) dataframe_pivot = pd.pivot_table(dataframe, index = 'wavelength (nm)', columns = ['Well']) # Make plot, complete with separate png for each section. section_name = sect.attrib['Name'] fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 12)) for i in range(1,12): dataframe_pivot.fluorescence.get('A' + str(i)).plot(ax=axes[0,0], title='A', c=cm.hsv(i*15)) for i in range(1,12): dataframe_pivot.fluorescence.get('B' + str(i)).plot(ax=axes[0,1], title='B', c=cm.hsv(i*15)) for i in range(1,12): dataframe_pivot.fluorescence.get('C' + str(i)).plot(ax=axes[0,2], title='C', c=cm.hsv(i*15)) for i in range(1,12): dataframe_pivot.fluorescence.get('D' + str(i)).plot(ax=axes[1,0], title='D', c=cm.hsv(i*15)) for i in range(1,12): dataframe_pivot.fluorescence.get('E' + str(i)).plot(ax=axes[1,1], title='E', c=cm.hsv(i*15)) for i in range(1,12): dataframe_pivot.fluorescence.get('F' + str(i)).plot(ax=axes[1,2], title='F', c=cm.hsv(i*15)) for i in range(1,12): dataframe_pivot.fluorescence.get('G' + str(i)).plot(ax=axes[2,0], title='G', c=cm.hsv(i*15)) for i in range(1,12): dataframe_pivot.fluorescence.get('H' + str(i)).plot(ax=axes[2,1], title='H', c=cm.hsv(i*15)) fig.suptitle('%s \n %s \n Barcode = %s' % (title, plate_type, barcode), fontsize=14) fig.subplots_adjust(hspace=0.3) plt.savefig('%s_%s.png' % (file_name, section_name)) return def entry_point(): xml_files = sys.argv[1:] process_files(xml_files) if __name__ == '__main__': xml_files = sys.argv[1:] process_files(xml_files)
lgpl-2.1
kazemakase/scikit-learn
examples/plot_multilabel.py
87
4279
# Authors: Vlad Niculae, Mathieu Blondel # License: BSD 3 clause """ ========================= Multilabel classification ========================= This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles. The classification is performed by projecting to the first two principal components found by PCA and CCA for visualisation purposes, followed by using the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one. Note: in the plot, "unlabeled samples" does not mean that we don't know the labels (as in semi-supervised learning) but that the samples simply do *not* have a label. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_multilabel_classification from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.preprocessing import LabelBinarizer from sklearn.decomposition import PCA from sklearn.cross_decomposition import CCA def plot_hyperplane(clf, min_x, max_x, linestyle, label): # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough yy = a * xx - (clf.intercept_[0]) / w[1] plt.plot(xx, yy, linestyle, label=label) def plot_subfigure(X, Y, subplot, title, transform): if transform == "pca": X = PCA(n_components=2).fit_transform(X) elif transform == "cca": X = CCA(n_components=2).fit(X, Y).transform(X) else: raise ValueError min_x = np.min(X[:, 0]) max_x = np.max(X[:, 0]) min_y = np.min(X[:, 1]) max_y = np.max(X[:, 1]) classif = OneVsRestClassifier(SVC(kernel='linear')) classif.fit(X, Y) plt.subplot(2, 2, subplot) plt.title(title) zero_class = np.where(Y[:, 0]) one_class = np.where(Y[:, 1]) plt.scatter(X[:, 0], X[:, 1], s=40, c='gray') plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b', facecolors='none', linewidths=2, label='Class 1') plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange', facecolors='none', linewidths=2, label='Class 2') plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--', 'Boundary\nfor class 1') plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.', 'Boundary\nfor class 2') plt.xticks(()) plt.yticks(()) plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x) plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y) if subplot == 2: plt.xlabel('First principal component') plt.ylabel('Second principal component') plt.legend(loc="upper left") plt.figure(figsize=(8, 6)) X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=True, return_indicator=True, random_state=1) plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca") X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, return_indicator=True, random_state=1) plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca") plt.subplots_adjust(.04, .02, .97, .94, .09, .2) plt.show()
bsd-3-clause
andaag/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
134
7452
""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.)) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
smorante/continuous-goal-directed-actions
demonstration-feature-selection/src/alternatives/main_dtw_mds_dbscan.py
2
3384
# -*- coding: utf-8 -*- """ Author: Santiago Morante Robotics Lab. Universidad Carlos III de Madrid """ ########################## DTW #################################### import libmddtw import matplotlib.pyplot as plt from dtw import dtw ########################## MDS #################################### import numpy as np from sklearn.metrics import euclidean_distances import libmds ########################## DBSCAN #################################### import libdbscan from sklearn.preprocessing import StandardScaler # to normalize import glob from sklearn import preprocessing EXPERIMENT = "experiment-1" PATH = "../datasets/" + EXPERIMENT +"/raw/*.csv" def normalize(X): return StandardScaler().fit_transform(X) def standardize(X): return preprocessing.scale(X) def main(): demons=[] demoNames = sorted(glob.glob(PATH)) print demoNames for elem in demoNames: tmp = np.loadtxt(elem) tmp_clean = tmp[:,1:] tmp_clean = standardize(tmp_clean) demons.append(tmp_clean) dist=np.zeros((len(demoNames),len(demoNames))) ########################################################################## ########################## DTW #################################### ########################################################################## # fill distance matrix for i in range(len(demoNames)): for j in range(len(demoNames)): mddtw = libmddtw.Mddtw() x,y = mddtw.collapseRows(demons[i],demons[j]) #fig = plt.figure() #plt.plot(x) #plt.plot(y) singleDist, singleCost, singlePath = mddtw.compute(demons[i],demons[j]) dist[i][j]=singleDist # print 'Minimum distance found:', singleDist #fig = plt.figure() # plt.imshow(cost.T, origin='lower', cmap=plt.cm.gray, interpolation='nearest') # plt.plot(path[0], path[1], 'w') # plt.xlim((-0.5, cost.shape[0]-0.5)) # plt.ylim((-0.5, cost.shape[1]-0.5)) # print "dist", dist ########################################################################### ########################### MDS #################################### ########################################################################### md = libmds.Mds() md.create(n_components=2, metric=True, max_iter=3000, eps=1e-12, random_state=None, dissimilarity="precomputed", n_jobs=-1, n_init=100) points = md.compute(dist) print "points", points # md.plot() ########################################################################## ########################## DBSCAN #################################### ########################################################################## # normalize normalizedPoints = normalize(points) # ########################## dbscan db = libdbscan.Dbscan() db.create(eps=1.5, min_samples=2) db.compute(normalizedPoints) db.plot() print "[INFO] Detected outliers: ", db.getOutliers() ############################################################################## ############################################################################## if __name__ == "__main__": main()
mit
sandeepgupta2k4/tensorflow
tensorflow/examples/learn/iris.py
35
1654
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNClassifier for Iris plant dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import datasets from sklearn import metrics from sklearn import model_selection import tensorflow as tf def main(unused_argv): # Load dataset. iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) # Build 3 layer DNN with 10, 20, 10 units respectively. feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input( x_train) classifier = tf.contrib.learn.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3) # Fit and predict. classifier.fit(x_train, y_train, steps=200) predictions = list(classifier.predict(x_test, as_iterable=True)) score = metrics.accuracy_score(y_test, predictions) print('Accuracy: {0:f}'.format(score)) if __name__ == '__main__': tf.app.run()
apache-2.0
raincoatrun/ThinkStats2
code/chap13soln.py
68
2961
"""This file contains code for use with "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function import pandas import numpy as np import thinkplot import thinkstats2 import survival def CleanData(resp): """Cleans respondent data. resp: DataFrame """ resp.cmdivorcx.replace([9998, 9999], np.nan, inplace=True) resp['notdivorced'] = resp.cmdivorcx.isnull().astype(int) resp['duration'] = (resp.cmdivorcx - resp.cmmarrhx) / 12.0 resp['durationsofar'] = (resp.cmintvw - resp.cmmarrhx) / 12.0 month0 = pandas.to_datetime('1899-12-15') dates = [month0 + pandas.DateOffset(months=cm) for cm in resp.cmbirth] resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10 def ResampleDivorceCurve(resps): """Plots divorce curves based on resampled data. resps: list of respondent DataFrames """ for _ in range(41): samples = [thinkstats2.ResampleRowsWeighted(resp) for resp in resps] sample = pandas.concat(samples, ignore_index=True) PlotDivorceCurveByDecade(sample, color='#225EA8', alpha=0.1) thinkplot.Show(xlabel='years', axis=[0, 28, 0, 1]) def ResampleDivorceCurveByDecade(resps): """Plots divorce curves for each birth cohort. resps: list of respondent DataFrames """ for i in range(41): samples = [thinkstats2.ResampleRowsWeighted(resp) for resp in resps] sample = pandas.concat(samples, ignore_index=True) groups = sample.groupby('decade') if i == 0: survival.AddLabelsByDecade(groups, alpha=0.7) EstimateSurvivalByDecade(groups, alpha=0.1) thinkplot.Save(root='survival7', xlabel='years', axis=[0, 28, 0, 1]) def EstimateSurvivalByDecade(groups, **options): """Groups respondents by decade and plots survival curves. groups: GroupBy object """ thinkplot.PrePlot(len(groups)) for name, group in groups: print(name, len(group)) _, sf = EstimateSurvival(group) thinkplot.Plot(sf, **options) def EstimateSurvival(resp): """Estimates the survival curve. resp: DataFrame of respondents returns: pair of HazardFunction, SurvivalFunction """ complete = resp[resp.notdivorced == 0].duration ongoing = resp[resp.notdivorced == 1].durationsofar hf = survival.EstimateHazardFunction(complete, ongoing) sf = hf.MakeSurvival() return hf, sf def main(): resp6 = survival.ReadFemResp2002() CleanData(resp6) married6 = resp6[resp6.evrmarry==1] resp7 = survival.ReadFemResp2010() CleanData(resp7) married7 = resp7[resp7.evrmarry==1] ResampleDivorceCurveByDecade([married6, married7]) if __name__ == '__main__': main()
gpl-3.0
mistercrunch/panoramix
superset/db_engine_specs/hive.py
1
20297
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import os import re import tempfile import time from datetime import datetime from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING from urllib import parse import numpy as np import pandas as pd import pyarrow as pa import pyarrow.parquet as pq from flask import current_app, g from sqlalchemy import Column, text from sqlalchemy.engine.base import Engine from sqlalchemy.engine.reflection import Inspector from sqlalchemy.engine.url import make_url, URL from sqlalchemy.orm import Session from sqlalchemy.sql.expression import ColumnClause, Select from superset.db_engine_specs.base import BaseEngineSpec from superset.db_engine_specs.presto import PrestoEngineSpec from superset.exceptions import SupersetException from superset.extensions import cache_manager from superset.models.sql_lab import Query from superset.sql_parse import ParsedQuery, Table from superset.utils import core as utils if TYPE_CHECKING: # prevent circular imports from superset.models.core import Database QueryStatus = utils.QueryStatus logger = logging.getLogger(__name__) def upload_to_s3(filename: str, upload_prefix: str, table: Table) -> str: """ Upload the file to S3. :param filename: The file to upload :param upload_prefix: The S3 prefix :param table: The table that will be created :returns: The S3 location of the table """ # Optional dependency import boto3 # pylint: disable=import-error bucket_path = current_app.config["CSV_TO_HIVE_UPLOAD_S3_BUCKET"] if not bucket_path: logger.info("No upload bucket specified") raise Exception( "No upload bucket specified. You can specify one in the config file." ) s3 = boto3.client("s3") location = os.path.join("s3a://", bucket_path, upload_prefix, table.table) s3.upload_file( filename, bucket_path, os.path.join(upload_prefix, table.table, os.path.basename(filename)), ) return location class HiveEngineSpec(PrestoEngineSpec): """Reuses PrestoEngineSpec functionality.""" engine = "hive" engine_name = "Apache Hive" max_column_name_length = 767 allows_alias_to_source_column = True allows_hidden_ordeby_agg = False # When running `SHOW FUNCTIONS`, what is the name of the column with the # function names? _show_functions_column = "tab_name" # pylint: disable=line-too-long _time_grain_expressions = { None: "{col}", "PT1S": "from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:mm:ss')", "PT1M": "from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:mm:00')", "PT1H": "from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:00:00')", "P1D": "from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd 00:00:00')", "P1W": "date_format(date_sub({col}, CAST(7-from_unixtime(unix_timestamp({col}),'u') as int)), 'yyyy-MM-dd 00:00:00')", "P1M": "from_unixtime(unix_timestamp({col}), 'yyyy-MM-01 00:00:00')", "P0.25Y": "date_format(add_months(trunc({col}, 'MM'), -(month({col})-1)%3), 'yyyy-MM-dd 00:00:00')", "P1Y": "from_unixtime(unix_timestamp({col}), 'yyyy-01-01 00:00:00')", "P1W/1970-01-03T00:00:00Z": "date_format(date_add({col}, INT(6-from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')", "1969-12-28T00:00:00Z/P1W": "date_format(date_add({col}, -INT(from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')", } # Scoping regex at class level to avoid recompiling # 17/02/07 19:36:38 INFO ql.Driver: Total jobs = 5 jobs_stats_r = re.compile(r".*INFO.*Total jobs = (?P<max_jobs>[0-9]+)") # 17/02/07 19:37:08 INFO ql.Driver: Launching Job 2 out of 5 launching_job_r = re.compile( ".*INFO.*Launching Job (?P<job_number>[0-9]+) out of " "(?P<max_jobs>[0-9]+)" ) # 17/02/07 19:36:58 INFO exec.Task: 2017-02-07 19:36:58,152 Stage-18 # map = 0%, reduce = 0% stage_progress_r = re.compile( r".*INFO.*Stage-(?P<stage_number>[0-9]+).*" r"map = (?P<map_progress>[0-9]+)%.*" r"reduce = (?P<reduce_progress>[0-9]+)%.*" ) @classmethod def patch(cls) -> None: from pyhive import hive from TCLIService import ( constants as patched_constants, TCLIService as patched_TCLIService, ttypes as patched_ttypes, ) from superset.db_engines import hive as patched_hive hive.TCLIService = patched_TCLIService hive.constants = patched_constants hive.ttypes = patched_ttypes hive.Cursor.fetch_logs = patched_hive.fetch_logs @classmethod def get_all_datasource_names( cls, database: "Database", datasource_type: str ) -> List[utils.DatasourceName]: return BaseEngineSpec.get_all_datasource_names(database, datasource_type) @classmethod def fetch_data( cls, cursor: Any, limit: Optional[int] = None ) -> List[Tuple[Any, ...]]: import pyhive from TCLIService import ttypes state = cursor.poll() if state.operationState == ttypes.TOperationState.ERROR_STATE: raise Exception("Query error", state.errorMessage) try: return super().fetch_data(cursor, limit) except pyhive.exc.ProgrammingError: return [] @classmethod def df_to_sql( cls, database: "Database", table: Table, df: pd.DataFrame, to_sql_kwargs: Dict[str, Any], ) -> None: """ Upload data from a Pandas DataFrame to a database. The data is stored via the binary Parquet format which is both less problematic and more performant than a text file. More specifically storing a table as a CSV text file has severe limitations including the fact that the Hive CSV SerDe does not support multiline fields. Note this method does not create metadata for the table. :param database: The database to upload the data to :param: table The table to upload the data to :param df: The dataframe with data to be uploaded :param to_sql_kwargs: The kwargs to be passed to pandas.DataFrame.to_sql` method """ engine = cls.get_engine(database) if to_sql_kwargs["if_exists"] == "append": raise SupersetException("Append operation not currently supported") if to_sql_kwargs["if_exists"] == "fail": # Ensure table doesn't already exist. if table.schema: table_exists = not database.get_df( f"SHOW TABLES IN {table.schema} LIKE '{table.table}'" ).empty else: table_exists = not database.get_df( f"SHOW TABLES LIKE '{table.table}'" ).empty if table_exists: raise SupersetException("Table already exists") elif to_sql_kwargs["if_exists"] == "replace": engine.execute(f"DROP TABLE IF EXISTS {str(table)}") def _get_hive_type(dtype: np.dtype) -> str: hive_type_by_dtype = { np.dtype("bool"): "BOOLEAN", np.dtype("float64"): "DOUBLE", np.dtype("int64"): "BIGINT", np.dtype("object"): "STRING", } return hive_type_by_dtype.get(dtype, "STRING") schema_definition = ", ".join( f"`{name}` {_get_hive_type(dtype)}" for name, dtype in df.dtypes.items() ) with tempfile.NamedTemporaryFile( dir=current_app.config["UPLOAD_FOLDER"], suffix=".parquet" ) as file: pq.write_table(pa.Table.from_pandas(df), where=file.name) engine.execute( text( f""" CREATE TABLE {str(table)} ({schema_definition}) STORED AS PARQUET LOCATION :location """ ), location=upload_to_s3( filename=file.name, upload_prefix=current_app.config[ "CSV_TO_HIVE_UPLOAD_DIRECTORY_FUNC" ](database, g.user, table.schema), table=table, ), ) @classmethod def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]: tt = target_type.upper() if tt == utils.TemporalType.DATE: return f"CAST('{dttm.date().isoformat()}' AS DATE)" if tt == utils.TemporalType.TIMESTAMP: return f"""CAST('{dttm .isoformat(sep=" ", timespec="microseconds")}' AS TIMESTAMP)""" return None @classmethod def adjust_database_uri( cls, uri: URL, selected_schema: Optional[str] = None ) -> None: if selected_schema: uri.database = parse.quote(selected_schema, safe="") @classmethod def _extract_error_message(cls, ex: Exception) -> str: msg = str(ex) match = re.search(r'errorMessage="(.*?)(?<!\\)"', msg) if match: msg = match.group(1) return msg @classmethod def progress(cls, log_lines: List[str]) -> int: total_jobs = 1 # assuming there's at least 1 job current_job = 1 stages: Dict[int, float] = {} for line in log_lines: match = cls.jobs_stats_r.match(line) if match: total_jobs = int(match.groupdict()["max_jobs"]) or 1 match = cls.launching_job_r.match(line) if match: current_job = int(match.groupdict()["job_number"]) total_jobs = int(match.groupdict()["max_jobs"]) or 1 stages = {} match = cls.stage_progress_r.match(line) if match: stage_number = int(match.groupdict()["stage_number"]) map_progress = int(match.groupdict()["map_progress"]) reduce_progress = int(match.groupdict()["reduce_progress"]) stages[stage_number] = (map_progress + reduce_progress) / 2 logger.info( "Progress detail: {}, " # pylint: disable=logging-format-interpolation "current job {}, " "total jobs: {}".format(stages, current_job, total_jobs) ) stage_progress = sum(stages.values()) / len(stages.values()) if stages else 0 progress = 100 * (current_job - 1) / total_jobs + stage_progress / total_jobs return int(progress) @classmethod def get_tracking_url(cls, log_lines: List[str]) -> Optional[str]: lkp = "Tracking URL = " for line in log_lines: if lkp in line: return line.split(lkp)[1] return None @classmethod def handle_cursor( # pylint: disable=too-many-locals cls, cursor: Any, query: Query, session: Session ) -> None: """Updates progress information""" from pyhive import hive unfinished_states = ( hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE, ) polled = cursor.poll() last_log_line = 0 tracking_url = None job_id = None query_id = query.id while polled.operationState in unfinished_states: query = session.query(type(query)).filter_by(id=query_id).one() if query.status == QueryStatus.STOPPED: cursor.cancel() break log = cursor.fetch_logs() or "" if log: log_lines = log.splitlines() progress = cls.progress(log_lines) logger.info( "Query %s: Progress total: %s", str(query_id), str(progress) ) needs_commit = False if progress > query.progress: query.progress = progress needs_commit = True if not tracking_url: tracking_url = cls.get_tracking_url(log_lines) if tracking_url: job_id = tracking_url.split("/")[-2] logger.info( "Query %s: Found the tracking url: %s", str(query_id), tracking_url, ) tracking_url = current_app.config["TRACKING_URL_TRANSFORMER"] logger.info( "Query %s: Transformation applied: %s", str(query_id), tracking_url, ) query.tracking_url = tracking_url logger.info("Query %s: Job id: %s", str(query_id), str(job_id)) needs_commit = True if job_id and len(log_lines) > last_log_line: # Wait for job id before logging things out # this allows for prefixing all log lines and becoming # searchable in something like Kibana for l in log_lines[last_log_line:]: logger.info("Query %s: [%s] %s", str(query_id), str(job_id), l) last_log_line = len(log_lines) if needs_commit: session.commit() time.sleep(current_app.config["HIVE_POLL_INTERVAL"]) polled = cursor.poll() @classmethod def get_columns( cls, inspector: Inspector, table_name: str, schema: Optional[str] ) -> List[Dict[str, Any]]: return inspector.get_columns(table_name, schema) @classmethod def where_latest_partition( # pylint: disable=too-many-arguments cls, table_name: str, schema: Optional[str], database: "Database", query: Select, columns: Optional[List[Dict[str, str]]] = None, ) -> Optional[Select]: try: col_names, values = cls.latest_partition( table_name, schema, database, show_first=True ) except Exception: # pylint: disable=broad-except # table is not partitioned return None if values is not None and columns is not None: for col_name, value in zip(col_names, values): for clm in columns: if clm.get("name") == col_name: query = query.where(Column(col_name) == value) return query return None @classmethod def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[ColumnClause]: return BaseEngineSpec._get_fields(cols) # pylint: disable=protected-access @classmethod def latest_sub_partition( cls, table_name: str, schema: Optional[str], database: "Database", **kwargs: Any ) -> str: # TODO(bogdan): implement` pass @classmethod def _latest_partition_from_df(cls, df: pd.DataFrame) -> Optional[List[str]]: """Hive partitions look like ds={partition name}""" if not df.empty: return [df.ix[:, 0].max().split("=")[1]] return None @classmethod def _partition_query( # pylint: disable=too-many-arguments cls, table_name: str, database: "Database", limit: int = 0, order_by: Optional[List[Tuple[str, bool]]] = None, filters: Optional[Dict[Any, Any]] = None, ) -> str: return f"SHOW PARTITIONS {table_name}" @classmethod def select_star( # pylint: disable=too-many-arguments cls, database: "Database", table_name: str, engine: Engine, schema: Optional[str] = None, limit: int = 100, show_cols: bool = False, indent: bool = True, latest_partition: bool = True, cols: Optional[List[Dict[str, Any]]] = None, ) -> str: return super( # pylint: disable=bad-super-call PrestoEngineSpec, cls ).select_star( database, table_name, engine, schema, limit, show_cols, indent, latest_partition, cols, ) @classmethod def modify_url_for_impersonation( cls, url: URL, impersonate_user: bool, username: Optional[str] ) -> None: """ Modify the SQL Alchemy URL object with the user to impersonate if applicable. :param url: SQLAlchemy URL object :param impersonate_user: Flag indicating if impersonation is enabled :param username: Effective username """ # Do nothing in the URL object since instead this should modify # the configuraiton dictionary. See get_configuration_for_impersonation @classmethod def update_impersonation_config( cls, connect_args: Dict[str, Any], uri: str, username: Optional[str], ) -> None: """ Update a configuration dictionary that can set the correct properties for impersonating users :param connect_args: :param uri: URI string :param impersonate_user: Flag indicating if impersonation is enabled :param username: Effective username :return: None """ url = make_url(uri) backend_name = url.get_backend_name() # Must be Hive connection, enable impersonation, and set optional param # auth=LDAP|KERBEROS # this will set hive.server2.proxy.user=$effective_username on connect_args['configuration'] if backend_name == "hive" and username is not None: configuration = connect_args.get("configuration", {}) configuration["hive.server2.proxy.user"] = username connect_args["configuration"] = configuration @staticmethod def execute( # type: ignore cursor, query: str, async_: bool = False ): # pylint: disable=arguments-differ kwargs = {"async": async_} cursor.execute(query, **kwargs) @classmethod @cache_manager.cache.memoize() def get_function_names(cls, database: "Database") -> List[str]: """ Get a list of function names that are able to be called on the database. Used for SQL Lab autocomplete. :param database: The database to get functions for :return: A list of function names useable in the database """ df = database.get_df("SHOW FUNCTIONS") if cls._show_functions_column in df: return df[cls._show_functions_column].tolist() columns = df.columns.values.tolist() logger.error( "Payload from `SHOW FUNCTIONS` has the incorrect format. " "Expected column `%s`, found: %s.", cls._show_functions_column, ", ".join(columns), exc_info=True, ) # if the results have a single column, use that if len(columns) == 1: return df[columns[0]].tolist() # otherwise, return no function names to prevent errors return [] @classmethod def is_readonly_query(cls, parsed_query: ParsedQuery) -> bool: """Pessimistic readonly, 100% sure statement won't mutate anything""" return ( super().is_readonly_query(parsed_query) or parsed_query.is_set() or parsed_query.is_show() )
apache-2.0
aurelieladier/openturns
validation/src/optimal_lhs/validate_MC_small.py
7
1877
#! /usr/bin/env python import openturns as ot import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from openturns.viewer import View import time ot.Log.Show(ot.Log.INFO) # Bounds are [0,1]^dimension dimension = 2 bounds = ot.Interval(dimension) nSimu = 10000 c2 = ot.SpaceFillingC2() # Size of sample size = 10 print("dimension=%d, size=%d"%(dimension, size)) for nSimu in [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200, 102400, 204800, 409600]: ot.RandomGenerator.SetSeed(0) # Factory: lhs generates lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized mc = ot.MonteCarloLHS(lhsDesign, nSimu, c2) tic = time.time() design = mc.generate() result = mc.getResult() toc = time.time() print("%d %f %f"%(nSimu,result.getOptimalValue(), toc-tic)) pp = PdfPages('small_mc_OTLHS.pdf') # plot criterion & save it crit = result.drawHistoryCriterion() fig = View(crit, plot_kwargs={'color':'blue'}).getFigure() pp.savefig(fig) plt.close(fig) minDist = ot.SpaceFillingMinDist() # Factory: lhs generates lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized mc = ot.MonteCarloLHS(lhsDesign, nSimu, minDist) tic = time.time() design = mc.generate() result = mc.getResult() toc = time.time() print("cpu time=%f"%(toc-tic)) print("dimension=%d, size=%d,mc=%s"%(dimension, size, mc)) print("optimal value="+ str(result.getOptimalValue())+" c2="+str(result.getC2())+" phiP="+str(result.getPhiP())+" minDist="+str(result.getMinDist())) # plot criterion & save it crit = result.drawHistoryCriterion() fig = View(crit, plot_kwargs={'color':'blue'}).getFigure() pp.savefig(fig) plt.close(fig) pp.close()
lgpl-3.0
ueshin/apache-spark
python/pyspark/pandas/tests/test_spark_functions.py
11
2127
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from pyspark.pandas.spark import functions as SF from pyspark.pandas.utils import spark_column_equals from pyspark.sql import functions as F from pyspark.sql.types import ( ByteType, FloatType, IntegerType, LongType, ) from pyspark.testing.pandasutils import PandasOnSparkTestCase class SparkFunctionsTests(PandasOnSparkTestCase): def test_lit(self): self.assertTrue(spark_column_equals(SF.lit(np.int64(1)), F.lit(1).astype(LongType()))) self.assertTrue(spark_column_equals(SF.lit(np.int32(1)), F.lit(1).astype(IntegerType()))) self.assertTrue(spark_column_equals(SF.lit(np.int8(1)), F.lit(1).astype(ByteType()))) self.assertTrue(spark_column_equals(SF.lit(np.byte(1)), F.lit(1).astype(ByteType()))) self.assertTrue( spark_column_equals(SF.lit(np.float32(1)), F.lit(float(1)).astype(FloatType())) ) self.assertTrue(spark_column_equals(SF.lit(1), F.lit(1))) if __name__ == "__main__": import unittest from pyspark.pandas.tests.test_spark_functions import * # noqa: F401 try: import xmlrunner # type: ignore[import] testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
btabibian/scikit-learn
examples/hetero_feature_union.py
81
6241
""" ============================================= Feature Union with Heterogeneous Data Sources ============================================= Datasets can often contain components of that require different feature extraction and processing pipelines. This scenario might occur when: 1. Your dataset consists of heterogeneous data types (e.g. raster images and text captions) 2. Your dataset is stored in a Pandas DataFrame and different columns require different processing pipelines. This example demonstrates how to use :class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing different types of features. We use the 20-newsgroups dataset and compute standard bag-of-words features for the subject line and body in separate pipelines as well as ad hoc features on the body. We combine them (with weights) using a FeatureUnion and finally train a classifier on the combined set of features. The choice of features is not particularly helpful, but serves to illustrate the technique. """ # Author: Matt Terry <[email protected]> # # License: BSD 3 clause from __future__ import print_function import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.datasets import fetch_20newsgroups from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import classification_report from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.svm import SVC class ItemSelector(BaseEstimator, TransformerMixin): """For data grouped by feature, select subset of data at a provided key. The data is expected to be stored in a 2D data structure, where the first index is over features and the second is over samples. i.e. >> len(data[key]) == n_samples Please note that this is the opposite convention to scikit-learn feature matrixes (where the first index corresponds to sample). ItemSelector only requires that the collection implement getitem (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas DataFrame, numpy record array, etc. >> data = {'a': [1, 5, 2, 5, 2, 8], 'b': [9, 4, 1, 4, 1, 3]} >> ds = ItemSelector(key='a') >> data['a'] == ds.transform(data) ItemSelector is not designed to handle data grouped by sample. (e.g. a list of dicts). If your data is structured this way, consider a transformer along the lines of `sklearn.feature_extraction.DictVectorizer`. Parameters ---------- key : hashable, required The key corresponding to the desired value in a mappable. """ def __init__(self, key): self.key = key def fit(self, x, y=None): return self def transform(self, data_dict): return data_dict[self.key] class TextStats(BaseEstimator, TransformerMixin): """Extract features from each document for DictVectorizer""" def fit(self, x, y=None): return self def transform(self, posts): return [{'length': len(text), 'num_sentences': text.count('.')} for text in posts] class SubjectBodyExtractor(BaseEstimator, TransformerMixin): """Extract the subject & body from a usenet post in a single pass. Takes a sequence of strings and produces a dict of sequences. Keys are `subject` and `body`. """ def fit(self, x, y=None): return self def transform(self, posts): features = np.recarray(shape=(len(posts),), dtype=[('subject', object), ('body', object)]) for i, text in enumerate(posts): headers, _, bod = text.partition('\n\n') bod = strip_newsgroup_footer(bod) bod = strip_newsgroup_quoting(bod) features['body'][i] = bod prefix = 'Subject:' sub = '' for line in headers.split('\n'): if line.startswith(prefix): sub = line[len(prefix):] break features['subject'][i] = sub return features pipeline = Pipeline([ # Extract the subject & body ('subjectbody', SubjectBodyExtractor()), # Use FeatureUnion to combine the features from subject and body ('union', FeatureUnion( transformer_list=[ # Pipeline for pulling features from the post's subject line ('subject', Pipeline([ ('selector', ItemSelector(key='subject')), ('tfidf', TfidfVectorizer(min_df=50)), ])), # Pipeline for standard bag-of-words model for body ('body_bow', Pipeline([ ('selector', ItemSelector(key='body')), ('tfidf', TfidfVectorizer()), ('best', TruncatedSVD(n_components=50)), ])), # Pipeline for pulling ad hoc features from post's body ('body_stats', Pipeline([ ('selector', ItemSelector(key='body')), ('stats', TextStats()), # returns a list of dicts ('vect', DictVectorizer()), # list of dicts -> feature matrix ])), ], # weight components in FeatureUnion transformer_weights={ 'subject': 0.8, 'body_bow': 0.5, 'body_stats': 1.0, }, )), # Use a SVC classifier on the combined features ('svc', SVC(kernel='linear')), ]) # limit the list of categories to make running this example faster. categories = ['alt.atheism', 'talk.religion.misc'] train = fetch_20newsgroups(random_state=1, subset='train', categories=categories, ) test = fetch_20newsgroups(random_state=1, subset='test', categories=categories, ) pipeline.fit(train.data, train.target) y = pipeline.predict(test.data) print(classification_report(y, test.target))
bsd-3-clause
wbengine/SPMILM
egs/1-billion/run_trf_2.py
1
6271
import os import sys import numpy as np import matplotlib.pyplot as plt sys.path.insert(0, os.getcwd() + '/../../tools/') import wb import trf # revise this function to config the dataset used to train different model def data(tskdir): train = tskdir + 'data/train.txt' valid = tskdir + 'data/valid.txt' test = tskdir + 'data/test.txt' return data_verfy([train, valid, test]) + data_wsj92nbest() def data_verfy(paths): for w in paths: if not os.path.isfile(w): print('[ERROR] no such file: ' + w) return paths def data_wsj92nbest(): root = './data/WSJ92-test-data/' nbest = root + '1000best.sent' trans = root + 'transcript.txt' ac = root + '1000best.acscore' lm = root + '1000best.lmscore' return data_verfy([nbest, trans, ac, lm]) def evaulate_trf(model, vocab, read_model, tsize, fres): res_name = '{}:'.format(int(tsize)) + os.path.split(read_model)[-1] tskdir = '{}/'.format(tsize) # rescore config = ' -vocab {} '.format(vocab) config += ' -read {}.model '.format(read_model) config += ' -nbest {} '.format(data(tskdir)[3]) config += ' -lmscore {0}.lmscore'.format(read_model) model.use(config) # WER [read_nbest, read_templ, read_acscore, read_lmscore] = data(tskdir)[3:7] read_lmscore = read_model + '.lmscore' [wer, lmscale, acscale] = wb.TuneWER(read_nbest, read_templ, wb.LoadScore(read_lmscore), wb.LoadScore(read_acscore), np.linspace(0.1,0.9,9)) print('wer={:.4f} lmscale={:.2f} acscale={:.2f}'.format(wer, lmscale, acscale)) # calculate the ppl on wsj test templ_txt = model.workdir + os.path.split(read_templ)[-1] + '.rmlabel' wb.file_rmlabel(read_templ, templ_txt) PPL_templ = model.ppl(vocab, read_model+'.model', templ_txt) LL_templ = -wb.PPL2LL(PPL_templ, templ_txt) # output the result fres.Add(res_name, ['LL-wsj', 'PPL-wsj'], [LL_templ, PPL_templ]) fres.AddWER(res_name, wer) def main(): if len(sys.argv) == 1: print('\"python run.py -train\" train LSTM\n', '\"python run.py -rescore\" rescore nbest\n', '\"python run.py -wer\" compute WER' ) for tsize in [2]: bindir = '../../tools/trf/bin/' tskdir = '{}/'.format(tsize) workdir = tskdir + 'trflm/' fres = wb.FRes('result.txt') model = trf.model(bindir, workdir) class_num = 200 train = workdir + 'train.id' valid = workdir + 'valid.id' test = workdir + 'test.id' vocab = workdir + 'vocab_c{}.list'.format(class_num) order = 4 feat = 'g4_w_c_ws_cs_wsh_csh_tied.fs' #feat = 'g4_w_c_ws_cs_cpw.fs' maxlen = 100 tmax = 50000 t0 = 2000 minibatch = 100 gamma_lambda = '1000,0' gamma_zeta = '0,0.6' reg = 1e-5 thread = 8 write_model = workdir + 'trf_c{}_{}'.format(class_num, feat[0:-3]) write_name = '{}:{}'.format(tsize, os.path.split(write_model)[1]) if '-class' in sys.argv: # just cluster for each tsks. model.prepare(data(tskdir)[0], data(tskdir)[1], data(tskdir)[2], class_num) if '-train' in sys.argv or '-all' in sys.argv: config = '-vocab {} -train {} -valid {} -test {} '.format(vocab, train, valid, test) config += ' -order {} -feat {} '.format(order, feat) config += ' -len {} '.format(maxlen) config += ' -write {0}.model -log {0}.log '.format(write_model) config += ' -t0 {} -iter {}'.format(t0, tmax) config += ' -gamma-lambda {} -gamma-zeta {}'.format(gamma_lambda, gamma_zeta) config += ' -L2 {} '.format(reg) config += ' -mini-batch {} '.format(minibatch) config += ' -thread {} '.format(thread) config += ' -print-per-iter 10 -write-at-iter 10000:10000:{}'.format(tmax) model.prepare(data(tskdir)[0], data(tskdir)[1], data(tskdir)[2], class_num) model.train(config) # output LL = model.get_last_value(write_model + '.log') fres.AddLL(write_name, LL, data(tskdir)[0:3]) if '-plot' in sys.argv: baseline = fres.Get('{}:KN5'.format(tsize)) trf.PlotLog([write_model], [baseline]) if '-rescore' in sys.argv or '-all' in sys.argv: config = ' -vocab {} '.format(vocab) config += ' -read {}.model '.format(write_model) config += ' -nbest {} '.format(data(tskdir)[3]) config += ' -lmscore {0}.lmscore -lmscore-test-id {0}.test-id '.format(write_model) model.use(config) if '-wer' in sys.argv or '-all' in sys.argv: [read_nbest, read_templ, read_acscore, read_lmscore] = data(tskdir)[3:7] read_lmscore = write_model + '.lmscore' [wer, lmscale, acscale] = wb.TuneWER(read_nbest, read_templ, wb.LoadScore(read_lmscore), wb.LoadScore(read_acscore), np.linspace(0.1,0.9,9)) print('wer={:.4f} lmscale={:.2f} acscale={:.2f}'.format(wer, lmscale, acscale)) # calculate the ppl on wsj test write_templ_id = workdir + os.path.split(read_templ)[1] + '.id' v = trf.ReadVocab(vocab) trf.NbestToID(read_templ, write_templ_id, v) config = ' -vocab {} '.format(vocab) config += ' -read {}.model '.format(write_model) config += ' -test {} '.format(write_templ_id) LL_templ = model.use(config) PPL_templ = wb.LL2PPL(-LL_templ, write_templ_id) # output the result fres.Add(write_name, ['LL-wsj', 'PPL-wsj'], [LL_templ, PPL_templ]) fres.AddWER(write_name, wer) if '-inter' in sys.argv: # calculate the WER for intermediate models for n in np.linspace(10000, 40000, 4): inter_model = workdir + 'trf_c{}_{}.n{}'.format(class_num, feat[0:-3], int(n)) evaulate_trf(model, vocab, inter_model, tsize, fres) if __name__ == '__main__': main()
apache-2.0
plissonf/scikit-learn
examples/svm/plot_svm_scale_c.py
223
5375
""" ============================================== Scaling the regularization parameter for SVCs ============================================== The following example illustrates the effect of scaling the regularization parameter when using :ref:`svm` for :ref:`classification <svm_classification>`. For SVC classification, we are interested in a risk minimization for the equation: .. math:: C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w) where - :math:`C` is used to set the amount of regularization - :math:`\mathcal{L}` is a `loss` function of our samples and our model parameters. - :math:`\Omega` is a `penalty` function of our model parameters If we consider the loss function to be the individual error per sample, then the data-fit term, or the sum of the error for each sample, will increase as we add more samples. The penalization term, however, will not increase. When using, for example, :ref:`cross validation <cross_validation>`, to set the amount of regularization with `C`, there will be a different amount of samples between the main problem and the smaller problems within the folds of the cross validation. Since our loss function is dependent on the amount of samples, the latter will influence the selected value of `C`. The question that arises is `How do we optimally adjust C to account for the different amount of training samples?` The figures below are used to illustrate the effect of scaling our `C` to compensate for the change in the number of samples, in the case of using an `l1` penalty, as well as the `l2` penalty. l1-penalty case ----------------- In the `l1` case, theory says that prediction consistency (i.e. that under given hypothesis, the estimator learned predicts as well as a model knowing the true distribution) is not possible because of the bias of the `l1`. It does say, however, that model consistency, in terms of finding the right set of non-zero parameters as well as their signs, can be achieved by scaling `C1`. l2-penalty case ----------------- The theory says that in order to achieve prediction consistency, the penalty parameter should be kept constant as the number of samples grow. Simulations ------------ The two figures below plot the values of `C` on the `x-axis` and the corresponding cross-validation scores on the `y-axis`, for several different fractions of a generated data-set. In the `l1` penalty case, the cross-validation-error correlates best with the test-error, when scaling our `C` with the number of samples, `n`, which can be seen in the first figure. For the `l2` penalty case, the best result comes from the case where `C` is not scaled. .. topic:: Note: Two separate datasets are used for the two different plots. The reason behind this is the `l1` case works better on sparse data, while `l2` is better suited to the non-sparse case. """ print(__doc__) # Author: Andreas Mueller <[email protected]> # Jaques Grobler <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.svm import LinearSVC from sklearn.cross_validation import ShuffleSplit from sklearn.grid_search import GridSearchCV from sklearn.utils import check_random_state from sklearn import datasets rnd = check_random_state(1) # set up dataset n_samples = 100 n_features = 300 # l1 data (only 5 informative features) X_1, y_1 = datasets.make_classification(n_samples=n_samples, n_features=n_features, n_informative=5, random_state=1) # l2 data: non sparse, but less features y_2 = np.sign(.5 - rnd.rand(n_samples)) X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis] X_2 += 5 * rnd.randn(n_samples, n_features / 5) clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False, tol=1e-3), np.logspace(-2.3, -1.3, 10), X_1, y_1), (LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=1e-4), np.logspace(-4.5, -2, 10), X_2, y_2)] colors = ['b', 'g', 'r', 'c'] for fignum, (clf, cs, X, y) in enumerate(clf_sets): # set up the plot for each regressor plt.figure(fignum, figsize=(9, 10)) for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]): param_grid = dict(C=cs) # To get nice curve, we need a large number of iterations to # reduce the variance grid = GridSearchCV(clf, refit=False, param_grid=param_grid, cv=ShuffleSplit(n=n_samples, train_size=train_size, n_iter=250, random_state=1)) grid.fit(X, y) scores = [x[1] for x in grid.grid_scores_] scales = [(1, 'No scaling'), ((n_samples * train_size), '1/n_samples'), ] for subplotnum, (scaler, name) in enumerate(scales): plt.subplot(2, 1, subplotnum + 1) plt.xlabel('C') plt.ylabel('CV Score') grid_cs = cs * float(scaler) # scale the C's plt.semilogx(grid_cs, scores, label="fraction %.2f" % train_size) plt.title('scaling=%s, penalty=%s, loss=%s' % (name, clf.penalty, clf.loss)) plt.legend(loc="best") plt.show()
bsd-3-clause
jaredwo/topowx
twx/utils/config.py
1
10923
from ConfigParser import ConfigParser from twx.utils import ymdL, mkdir_p import numpy as np import os import pandas as pd class TwxConfig(): '''Class to load and access TopoWx configuration settings in a INI file. Upon initialization, also creates necessary sub-directories in the TopoWx data root directory if they do not exist. Example TopoWx INI File: [TOPOWX_CONFIG] # Path to TopoWx data root TWX_DATA_ROOT=[a path] # Lon/lat bounding box for station observations STN_BBOX=-126.0,22.0,-64.0,53.0 # Start date for which to process station observations OBS_START_DATE=1895-01-01 # End data for which to process station observations OBS_END_DATE=2016-03-29 # Start date for interpolation INTERP_START_DATE=1948-01-01 # End date for interpolation INTERP_END_DATE=2015-12-31 # Station observation elements to process OBS_ELEMS=tmin,tmax,prcp,tobs_tmin,tobs_tmax,tobs_prcp # Primary station observation elements OBS_MAIN_ELEMS=tmin,tmax,prcp # Station chunk size for which to load and process station observations STN_READ_CHUNK_GHCND=100 STN_READ_CHUNK_SNOTEL=20 STN_READ_CHUNK_RAWS=20 # Station chunk size for loading and writing to netcdf file STN_WRITE_CHUNK_NC=100 # Station chunk size for creating aggregated data (e.g.--monthly from daily) STN_AGG_CHUNK=1000 # A geonames username for accessing DEM elevation services USERNAME_GEONAMES=[a username] ''' def __init__(self, fpath_ini): cfg = ConfigParser() cfg.read(fpath_ini) self.twx_data_root = cfg.get('TOPOWX_CONFIG', 'twx_data_root') self.obs_start_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG', 'obs_start_date')) self.obs_end_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG', 'obs_end_date')) self.interp_start_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG', 'interp_start_date')) self.interp_end_date = pd.Timestamp(cfg.get('TOPOWX_CONFIG', 'interp_end_date')) bbox_str = cfg.get('TOPOWX_CONFIG', 'stn_bbox') self.stn_bbox = tuple([np.float(i) for i in bbox_str.split(',')]) self.obs_elems = tuple(cfg.get('TOPOWX_CONFIG', 'obs_elems').split(',')) self.obs_main_elems = tuple(cfg.get('TOPOWX_CONFIG', 'obs_main_elems').split(',')) self.stn_read_chunk_ghcnd = int(cfg.get('TOPOWX_CONFIG', 'stn_read_chunk_ghcnd')) self.stn_read_chunk_snotel = int(cfg.get('TOPOWX_CONFIG', 'stn_read_chunk_snotel')) self.stn_read_chunk_raws = int(cfg.get('TOPOWX_CONFIG', 'stn_read_chunk_raws')) self.stn_write_chunk_nc = int(cfg.get('TOPOWX_CONFIG', 'stn_write_chunk_nc')) self.stn_agg_chunk = int(cfg.get('TOPOWX_CONFIG', 'stn_agg_chunk')) self.username_geonames = cfg.get('TOPOWX_CONFIG', 'username_geonames') self.fpath_log_daily_infill = cfg.get('TOPOWX_CONFIG', 'fpath_log_daily_infill') self.twx_data_version = cfg.get('TOPOWX_CONFIG', 'twx_data_version') # Make TopoWx data directory for local storage of station observations self.path_stndata = os.path.join(self.twx_data_root, 'station_data') mkdir_p(self.path_stndata) fname_stndata_hdf_ghcnd = 'obs_ghcnd_%d_%d.hdf' % (ymdL(self.obs_start_date), ymdL(self.obs_end_date)) self.fpath_stndata_hdf_ghcnd = os.path.join(self.path_stndata, fname_stndata_hdf_ghcnd) fname_stndata_hdf_snotel = 'obs_snotel_%d_%d.hdf' % (ymdL(self.obs_start_date), ymdL(self.obs_end_date)) self.fpath_stndata_hdf_snotel = os.path.join(self.path_stndata, fname_stndata_hdf_snotel) fname_stndata_hdf_raws = 'obs_raws_%d_%d.hdf' % (ymdL(self.obs_start_date), ymdL(self.obs_end_date)) self.fpath_stndata_hdf_raws = os.path.join(self.path_stndata, fname_stndata_hdf_raws) fname_stndata_nc_all = 'obs_all_%d_%d.nc' % (ymdL(self.obs_start_date), ymdL(self.obs_end_date)) self.fpath_stndata_nc_all = os.path.join(self.path_stndata, fname_stndata_nc_all) fname_stndata_nc_tair_tobs_adj = 'tair_tobs_adj_%d_%d.nc' % (ymdL(self.obs_start_date), ymdL(self.obs_end_date)) self.fpath_stndata_nc_tair_tobs_adj = os.path.join(self.path_stndata, fname_stndata_nc_tair_tobs_adj) fname_stndata_nc_tair_homog = 'tair_homog_%d_%d.nc' % (ymdL(self.obs_start_date), ymdL(self.obs_end_date)) self.fpath_stndata_nc_tair_homog = os.path.join(self.path_stndata, fname_stndata_nc_tair_homog) self.fpath_locqa_hdf = os.path.join(self.path_stndata, 'locqa.hdf') self.fpath_locqa_fail_csv = os.path.join(self.path_stndata, 'locqa_fail.csv') # Make TopoWx data directory for PHA-based homogenization self.path_homog_pha = os.path.join(self.path_stndata, 'homog') mkdir_p(self.path_homog_pha) self.fpath_pha_tgz = os.path.join(self.path_homog_pha, 'phav52i.tar.gz') # Make TopoWx data directories for reanalysis data self.path_reanalysis_data = os.path.join(self.twx_data_root, 'reanalysis_data') mkdir_p(self.path_reanalysis_data) self.path_reanalysis_namerica = os.path.join(self.path_reanalysis_data, 'n_america_subset') mkdir_p(self.path_reanalysis_namerica) # Make TopoWx data directory for infilled station observations self.path_stndata_infill = os.path.join(self.path_stndata, 'infill') mkdir_p(self.path_stndata_infill) self.fpath_xval_infill_nc = os.path.join(self.path_stndata_infill, 'xval_infill_tair.nc') self.fpath_stndata_nc_infill_tmin = os.path.join(self.path_stndata_infill, 'infill_tmin.nc') self.fpath_stndata_nc_infill_tmax = os.path.join(self.path_stndata_infill, 'infill_tmax.nc') self.fpath_flagged_bad_stns = os.path.join(self.path_stndata_infill, 'bad_stns.csv') self.fpath_stndata_nc_serial_tmin = os.path.join(self.path_stndata_infill, 'serial_tmin.nc') self.fpath_stndata_nc_serial_tmax = os.path.join(self.path_stndata_infill, 'serial_tmax.nc') # Make data directories for storing interp param optimization files # Temperature normals self.path_interp_optim_norms = os.path.join(self.path_stndata_infill, 'optim_norm') mkdir_p(self.path_interp_optim_norms) # Daily anomalies self.path_interp_optim_anoms = os.path.join(self.path_stndata_infill, 'optim_anom') mkdir_p(self.path_interp_optim_anoms) self.fpath_xval_interp_nc_tmin = os.path.join(self.path_stndata_infill, 'xval_interp_tmin.nc') self.fpath_xval_interp_nc_tmax = os.path.join(self.path_stndata_infill, 'xval_interp_tmax.nc') # Make TopoWx data directory for raster data self.path_rasters = os.path.join(self.twx_data_root, 'rasters') mkdir_p(self.path_rasters) self.path_predictor_rasters = os.path.join(self.path_rasters, 'conus_interp_grids', 'ncdf') mkdir_p(self.path_predictor_rasters) # Make TopoWx data directory for writing output tiles self.path_tile_out = os.path.join(self.twx_data_root, 'tile_output') mkdir_p(self.path_tile_out) # Make TopoWx log directory self.path_logs = os.path.join(self.twx_data_root, 'logs') mkdir_p(self.path_logs) ################################## # Make TopoWx data directory for final outputs ################################## self.path_final_output = os.path.join(self.twx_data_root, 'final_output_data') mkdir_p(self.path_final_output) # Final auxiliary data directories self.path_aux_data = os.path.join(self.path_final_output, 'auxiliary_data') mkdir_p(self.path_aux_data) self.path_aux_stndata = os.path.join(self.path_aux_data, 'station_data') mkdir_p(self.path_aux_stndata) self.fpath_stndata_nc_aux_tmin = os.path.join(self.path_aux_stndata, 'stn_obs_tmin.nc') self.fpath_stndata_nc_aux_tmax = os.path.join(self.path_aux_stndata, 'stn_obs_tmax.nc') self.fpath_pha_adj_aux = os.path.join(self.path_aux_stndata, 'homog_adjust.csv') self.path_aux_grids = os.path.join(self.path_aux_data, 'auxiliary_grids') mkdir_p(self.path_aux_grids) # Final TopoWx output mosaics for normals, daily, and monthly data self.path_mosaic_norms = os.path.join(self.path_final_output, 'normals') mkdir_p(self.path_mosaic_norms) self.path_mosaic_daily = os.path.join(self.path_final_output, 'daily') mkdir_p(self.path_mosaic_daily) self.path_mosaic_monthly = os.path.join(self.path_final_output, 'monthly') mkdir_p(self.path_mosaic_monthly)
gpl-3.0
cdiazbas/MPySIR
all2maps.py
1
4874
# Author: [email protected] import matplotlib.pyplot as plt import pyLib.imtools as imtools import numpy as np # ========================= CREANDO PHIMAP import matplotlib.colors as mcolors def make_colormap(seq): seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3] cdict = {'red': [], 'green': [], 'blue': []} for i, item in enumerate(seq): if isinstance(item, float): r1, g1, b1 = seq[i - 1] r2, g2, b2 = seq[i + 1] cdict['red'].append([item, r1, r2]) cdict['green'].append([item, g1, g2]) cdict['blue'].append([item, b1, b2]) return mcolors.LinearSegmentedColormap('CustomMap', cdict) c = mcolors.ColorConverter().to_rgb phimap = make_colormap([c('white'), c('tomato'), 0.33, c('tomato'), c('deepskyblue'), 0.66, c('deepskyblue'),c('white')]) def dimMap(resultadoSir): height = resultadoSir.shape[0]*(resultadoSir[0][-1][0][0]+1) width = (resultadoSir[0][-1][0][1]+1) return [height, width] def readmapa(resultadoSir, mapa, magnitud): cont = 0 for fila in range(0, height): for columna in range(0, width): punto = cont % resultadoSir.shape[1] veces = int(cont/resultadoSir.shape[1]) if magnitud == 8 or magnitud == 9 or magnitud == 10 or magnitud == 11: mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud] else: mapa[columna,fila] = resultadoSir[veces][punto][1][0][magnitud][index] cont += 1 return mapa def corrphi(mapa): mapa[mapa<0] = (mapa[mapa<0]+360) % 360; mapa[mapa>180] = (mapa[mapa>180]-180) def do1map(logTau, magnitud): # ============================================================================================== # global index # global magnitud # ========================= INPUT invSir1 = 'MAPA1.npy' invSir2 = 'MAPA2.npy' # logTau = 0.0 # magnitud = 2 # hsv cmapArray = ['gray','gray','gray','bone','bone','seismic','Spectral_r',phimap,'bone','gray','gray','cubehelix'] magTitle = [r'${\rm log(\tau)=}$',r'${\rm T\ [kK]}$','p',r'${\rm v\ [km/s]}$',r'${\rm B\ [kG]}$',r'${\rm v\ [km/s]}$',r'${\rm \gamma\ [d]}$',r'${\rm \phi\ [d]}$','vmacro','fillingf','difusa',r'${\rm \chi^2}$'] magFile = ['TAU','TEMP','PRESION','VMICRO','CAMPO','VLOS','GAMMA','PHI','VMACRO','FILLING','DIFUSA','CHI2'] # ========================= MAP resultadoSir1 = np.load(invSir1) resultadoSir2 = np.load(invSir2) # height, width = dimMap(resultadoSir1) # print('height:',height,'width:',width) # mapa = np.zeros((height, width)) index = np.where(resultadoSir1[0][0][1][0][0] == logTau)[0][0] print('logTau: '+str(logTau)+' -> index: '+str(index)) # readmapa(resultadoSir1, mapa.T ,magnitud) from pySir import sirtools as st mapa1 = st.readSIRMap(resultadoSir1, magnitud, index) mapa2 = st.readSIRMap(resultadoSir2, magnitud, index) mapa = np.concatenate((mapa1, mapa2)) from scipy import ndimage mapa = ndimage.median_filter(np.flipud(mapa), 3) # Limites en la escala de color if magnitud == 7: corrphi(mapa) print('3sigma_map: {0:2.2f}'.format(3*np.std(mapa))) print('Mean_map: {0:2.2f}'.format(np.mean(mapa))) print('Min_map: {0:2.2f}'.format(np.min(mapa))) print('Max_map: {0:2.2f}'.format(np.max(mapa))) vmini = np.mean(mapa)-3*np.std(mapa) if np.min(mapa) >= 0.0 and magnitud != 1: vmini = 0. vmaxi = np.mean(mapa)+3*np.std(mapa) if magnitud == 1 or magnitud == 4: vmini = np.min(mapa); vmaxi = np.max(mapa) if magnitud == 6: vmaxi = 180. if magnitud == 7: vmaxi = 180.;vmini = 0. if magnitud == 11: vmaxi = np.max(mapa); vmini = 0. if magnitud == 5: vmini = np.mean(mapa)-4*np.std(mapa); vmaxi = -vmini from matplotlib.colors import LogNorm plt.imshow(mapa,cmap=cmapArray[magnitud],origin='lower',interpolation='None',vmin=vmini,vmax=vmaxi)#norm=LogNorm() plt.title('Map 17jun14.006 (1-2)') plt.xlabel('Slit Axis [pix]') plt.ylabel('Time Axis [pix]') cb = plt.colorbar(shrink=.46)#, ticks=[0.6, 0.8, 1., 1.2]) #cb = plt.colorbar(shrink=.46, ticks=[0.3, 0.6, 0.9, 1.2, 1.5]) # cb.set_label(r'Intensity HeI ({0:4.1f}) /$I_{{qs}}$({1:4.1f})'.format(xLambda[341],xLambda[posicontinuo]), labelpad=5., y=0.5, fontsize=12.) loglabel = r'${\rm log(\tau)=}$' cb.set_label(r""+magTitle[magnitud]+r", "+loglabel+"{0}".format(logTau), labelpad=8., y=0.5, fontsize=12.) # plt.show() plt.savefig(magFile[magnitud]+'_log{0:02d}.pdf'.format(int(logTau)), bbox_inches='tight') print(magFile[magnitud]+'_log{0:02d}.pdf SAVE'.format(int(logTau))) print('-----------------------'+str(magnitud)) plt.clf() for magnitud in range(12): do1map(0.0, magnitud)
mit
timberhill/blablaplot
blablaplot.py
1
6659
#!/usr/bin/python from numpy import loadtxt, asarray from numpy.random import normal as gaussian_noise import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import warnings """ Here you register new characters in format: '<char>' : (<width>, <height>, '<filename>'), """ charlist = { 'a' : (0.7, 1.0, 'a'), 'b' : (0.7, 1.0, 'b'), 'c' : (0.7, 1.0, 'c'), 'd' : (0.7, 1.0, 'd'), 'e' : (0.7, 1.0, 'e'), 'f' : (0.7, 1.0, 'f'), 'g' : (0.7, 1.0, 'g'), 'h' : (0.7, 1.0, 'h'), 'i' : (0.4, 1.0, 'i'), 'j' : (0.4, 1.0, 'j'), 'k' : (0.7, 1.0, 'k'), 'l' : (0.7, 1.0, 'l'), 'm' : (0.7, 1.0, 'm'), 'n' : (0.7, 1.0, 'n'), 'o' : (0.7, 1.0, 'o'), 'p' : (0.7, 1.0, 'p'), 'q' : (0.7, 1.0, 'q'), 'r' : (0.7, 1.0, 'r'), 's' : (0.7, 1.0, 's'), 't' : (0.7, 1.0, 't'), 'u' : (0.7, 1.0, 'u'), 'v' : (0.7, 1.0, 'v'), 'w' : (0.7, 1.0, 'w'), 'x' : (0.7, 1.0, 'x'), 'y' : (0.7, 1.0, 'y'), 'z' : (0.7, 1.0, 'z'), '0' : (0.7, 1.0, '0'), '1' : (0.5, 1.0, '1'), '2' : (0.7, 1.0, '2'), '3' : (0.7, 1.0, '3'), '4' : (0.7, 1.0, '4'), '5' : (0.7, 1.0, '5'), '6' : (0.7, 1.0, '6'), '7' : (0.7, 1.0, '7'), '8' : (0.7, 1.0, '8'), '9' : (0.7, 1.0, '9'), ' ' : (0.7, 0.0, 'space'), '?' : (0.7, 1.0, 'questionmark'), '!' : (0.2, 1.0, 'exclamationmark'), ',' : (0.1, 0.1, 'comma'), '.' : (0.2, 0.1, 'fullstop'), '&' : (0.6, 1.0, 'ampersand'), '$' : (0.5, 1.0, 'dollar'), '@' : (0.7, 1.0, 'at'), '(' : (0.3, 1.0, 'brackets_open'), ')' : (0.3, 1.0, 'brackets_close'), '#' : (0.7, 1.0, 'hash'), '%' : (0.7, 1.0, 'percent'), } class Character(object): """ ARGUMENTS char - single character (first one is chosen) size - size of the letter (width, height) self.xs, self.ys - arrays with letter points """ def __init__(self, char, filename='', size=(1.0, 1.0), jitter=0.0): if len(char) < 1: raise Exception('Empty string is passed to Character() constructor.') self.char = char[0] if len(filename) > 0: self.filename = filename else: 'chars/' + self.char + '.dat' self._getPoints() self.resize(size=size) def _getPoints(self): xs, ys = loadtxt('chars/' + self.filename + '.dat', unpack=True) self.xs = asarray(xs) self.ys = asarray(ys) self._sort() def _sort(self): points = zip(self.xs, self.ys) sorted_points = sorted(points) self.xs = asarray([point[0] for point in sorted_points]) self.ys = asarray([point[1] for point in sorted_points]) def resize(self, size=(1.0, 1.0)): self.size = size if len(self.xs) < 1: self._getPoints() xmin = min(self.xs) xmax = max(self.xs) ymin = min(self.ys) ymax = max(self.ys) for i in range(0, len(self.xs)): self.xs[i] = self.size[0] * (self.xs[i] - xmin) / (xmax - xmin) self.ys[i] = self.size[1] * (self.ys[i] - ymin) / (ymax - ymin) class TextyPloty(object): """ ARGUMENTS jitter - to randomize points locations, represents sigma for gaussian noise spacing - distance between letters offset - offset from zero point if format (x, y) scale - scale/size of the letters func - function to add text to """ def __init__(self, jitter=0.0, spacing=0.1, offset=(0.0, 0.0), scale=(1.0, 1.0), func=None): self.jitter = jitter self.spacing = spacing self.offset = offset self.scale = scale self.func = func self.charlist = charlist """ ARGUMENTS text - string to plot RETURNS xs, ys - points coordinates """ def get(self, text): xs, ys = [], [] xoffset = self.offset[0] for char in text: if char == ' ': xoffset += self.charlist[char][0] * self.scale[0] elif char == '\t': xoffset += self.charlist[char][0] * self.scale[0] * 4 elif char in self.charlist: charobj = Character(char=char, filename=self.charlist[char][2], size=self.charlist[char]) xs.extend(self.scale[0] * charobj.xs + xoffset) ys.extend(self.scale[1] * charobj.ys + self.offset[1]) xoffset += self.charlist[char][0] * self.scale[0] else: warnings.warn('Could not find file with "' + char + '" character. Skipping...', Warning) xoffset += self.spacing * self.scale[0] if self.func != None: for i in range(0,len(xs)): ys[i] += self.func(xs[i]) if self.jitter > 0: noise = gaussian_noise(0.0, self.jitter*self.scale[1], (len(ys))) ys = [x+y for x, y in zip(ys, noise)] return asarray(xs), asarray(ys) class ResidualsPlot(object): """ """ def __init__(self, data=([],[]), datastyle='k.', xs_fit=[], func=None, fitstyle='r-', \ xlabel='', ylabel='', reslabel='', ratio=[4, 1], figsize=(10,6), axis=None, res_axis=None, \ fitlabel='fit', datalabel='points'): self.plt_instance = plt self.xs = data[0] self.ys = data[1] self.datastyle = datastyle self.xs_fit = xs_fit self.func = func self.ys_fit = self.func(self.xs_fit) self.fitstyle = fitstyle self.xlabel = xlabel self.ylabel = ylabel self.reslabel = reslabel self.ratio = ratio self.figsize = figsize self.axis = axis self.res_axis = res_axis self.fitlabel = fitlabel self.datalabel = datalabel def draw(self): self.redraw() def redraw(self): self.plt_instance = plt self.plt_instance.figure(figsize=self.figsize) self.gridspec_instance = gridspec.GridSpec(2, 1, height_ratios=self.ratio) self.gridspec_instance.update(hspace=0.00) self.ax0 = self.plt_instance.subplot(self.gridspec_instance[0]) self.ax1 = self.plt_instance.subplot(self.gridspec_instance[1]) self.ys_res = self.ys - self.func(self.xs) # set axis ranges if self.axis == None: self.ax0.axis([min(self.xs_fit) * 1.1, max(self.xs_fit)*1.1, min(self.ys_fit) * 1.1, max(self.ys_fit) * 1.1]) elif len(self.axis) != 4: raise Exception('ResidualsPlot: axis should contain 4 numbers: (x1, x2, y1, y2)') else: self.ax0.axis(self.axis) if self.res_axis == None: self.ax1.axis([min(self.xs_fit) * 1.1, max(self.xs_fit)*1.1, min(self.ys_res) * 1.1, max(self.ys_res)*1.1]) elif len(self.res_axis) != 4: raise Exception('ResidualsPlot: res_axis should contain 4 numbers: (x1, x2, y1, y2)') else: self.ax1.axis(self.res_axis) # set axis labels self.ax0.set_ylabel(self.ylabel) self.ax1.set_ylabel(self.reslabel) self.ax1.set_xlabel(self.xlabel) # first subplot: datapoints and fit self.ax0.plot(self.xs_fit, self.ys_fit, self.fitstyle, label=self.fitlabel) self.ax0.plot(self.xs, self.ys, self.datastyle, label=self.datalabel) # second subplot: residuals self.ax1.plot([min(self.xs), max(self.xs)], [0,0], self.fitstyle) self.ax1.plot(self.xs, self.ys_res, self.datastyle) self.ax0.legend(loc="upper right") def show(self): self.plt_instance.show() def savefig(self, name='plot.pdf'): self.plt_instance.savefig(name)
mit
tiankanl/2014_fall_ASTR599
notebooks/fig_code/helpers.py
74
2301
""" Small helpers for code that is not shown in the notebooks """ from sklearn import neighbors, datasets, linear_model import pylab as pl import numpy as np from matplotlib.colors import ListedColormap # Create color maps for 3-class classification problem, as with iris cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) def plot_iris_knn(): iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target knn = neighbors.KNeighborsClassifier(n_neighbors=3) knn.fit(X, y) x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1 y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1 xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100)) Z = knn.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) pl.figure() pl.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) pl.xlabel('sepal length (cm)') pl.ylabel('sepal width (cm)') pl.axis('tight') def plot_polynomial_regression(): rng = np.random.RandomState(0) x = 2*rng.rand(100) - 1 f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9 y = f(x) + .4 * rng.normal(size=100) x_test = np.linspace(-1, 1, 100) pl.figure() pl.scatter(x, y, s=4) X = np.array([x**i for i in range(5)]).T X_test = np.array([x_test**i for i in range(5)]).T regr = linear_model.LinearRegression() regr.fit(X, y) pl.plot(x_test, regr.predict(X_test), label='4th order') X = np.array([x**i for i in range(10)]).T X_test = np.array([x_test**i for i in range(10)]).T regr = linear_model.LinearRegression() regr.fit(X, y) pl.plot(x_test, regr.predict(X_test), label='9th order') pl.legend(loc='best') pl.axis('tight') pl.title('Fitting a 4th and a 9th order polynomial') pl.figure() pl.scatter(x, y, s=4) pl.plot(x_test, f(x_test), label="truth") pl.axis('tight') pl.title('Ground truth (9th order polynomial)')
apache-2.0
mwv/scikit-learn
examples/datasets/plot_random_dataset.py
348
2254
""" ============================================== Plot randomly generated classification dataset ============================================== Plot several randomly generated 2D classification datasets. This example illustrates the :func:`datasets.make_classification` :func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles` functions. For ``make_classification``, three binary and two multi-class classification datasets are generated, with different numbers of informative features and clusters per class. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_gaussian_quantiles plt.figure(figsize=(8, 8)) plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95) plt.subplot(321) plt.title("One informative feature, one cluster per class", fontsize='small') X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1, n_clusters_per_class=1) plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1) plt.subplot(322) plt.title("Two informative features, one cluster per class", fontsize='small') X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2, n_clusters_per_class=1) plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1) plt.subplot(323) plt.title("Two informative features, two clusters per class", fontsize='small') X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2) plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2) plt.subplot(324) plt.title("Multi-class, two informative features, one cluster", fontsize='small') X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2, n_clusters_per_class=1, n_classes=3) plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1) plt.subplot(325) plt.title("Three blobs", fontsize='small') X1, Y1 = make_blobs(n_features=2, centers=3) plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1) plt.subplot(326) plt.title("Gaussian divided into three quantiles", fontsize='small') X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3) plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1) plt.show()
bsd-3-clause
r-mart/scikit-learn
examples/mixture/plot_gmm_selection.py
248
3223
""" ================================= Gaussian Mixture Model Selection ================================= This example shows that model selection can be performed with Gaussian Mixture Models using information-theoretic criteria (BIC). Model selection concerns both the covariance type and the number of components in the model. In that case, AIC also provides the right result (not shown to save time), but BIC is better suited if the problem is to identify the right model. Unlike Bayesian procedures, such inferences are prior-free. In that case, the model with 2 components and full covariance (which corresponds to the true generative model) is selected. """ print(__doc__) import itertools import numpy as np from scipy import linalg import matplotlib.pyplot as plt import matplotlib as mpl from sklearn import mixture # Number of samples per component n_samples = 500 # Generate random sample, two components np.random.seed(0) C = np.array([[0., -0.1], [1.7, .4]]) X = np.r_[np.dot(np.random.randn(n_samples, 2), C), .7 * np.random.randn(n_samples, 2) + np.array([-6, 3])] lowest_bic = np.infty bic = [] n_components_range = range(1, 7) cv_types = ['spherical', 'tied', 'diag', 'full'] for cv_type in cv_types: for n_components in n_components_range: # Fit a mixture of Gaussians with EM gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type) gmm.fit(X) bic.append(gmm.bic(X)) if bic[-1] < lowest_bic: lowest_bic = bic[-1] best_gmm = gmm bic = np.array(bic) color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y']) clf = best_gmm bars = [] # Plot the BIC scores spl = plt.subplot(2, 1, 1) for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)): xpos = np.array(n_components_range) + .2 * (i - 2) bars.append(plt.bar(xpos, bic[i * len(n_components_range): (i + 1) * len(n_components_range)], width=.2, color=color)) plt.xticks(n_components_range) plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()]) plt.title('BIC score per model') xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\ .2 * np.floor(bic.argmin() / len(n_components_range)) plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14) spl.set_xlabel('Number of components') spl.legend([b[0] for b in bars], cv_types) # Plot the winner splot = plt.subplot(2, 1, 2) Y_ = clf.predict(X) for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_, color_iter)): v, w = linalg.eigh(covar) if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan2(w[0][1], w[0][0]) angle = 180 * angle / np.pi # convert to degrees v *= 4 ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(.5) splot.add_artist(ell) plt.xlim(-10, 10) plt.ylim(-3, 6) plt.xticks(()) plt.yticks(()) plt.title('Selected GMM: full model, 2 components') plt.subplots_adjust(hspace=.35, bottom=.02) plt.show()
bsd-3-clause
MusicVisualizationUMass/TeamNameGenerator
src/musicvisualizer/pipeline/models/tests/losc_manual_tests.py
1
1565
#!/usr/bin/env python3 '''A set of manual tests to make sure bits and pieces are working. Not part of the general testing framework but kept for now in case there is some useful data here later ''' from musicvisualizer.pipeline.models.linear_oscillator import LinearOscillatorModel import matplotlib.pyplot as plt #plt.style.use('ggplot') if __name__ == '__main__': points = 1024 def dataIn(): print("CUSTOM DATA IN") t = 0.0 MAXT = 30 while t < MAXT: t += 0.1 def dataIn_empty(): for i in range(1000): yield (0.0, 0.0) def dataIn_singlePulse(): yield(points/2, 20) for i in range(1000): yield (0.0, 0.0) M = LinearOscillatorModel( sampleRate = 24, # Visual sample rate dataInFPS = 96, # Data sample rate (to generate visual) number_of_points = points, # how many points in simulation? hook = 11.0, dataIn = None, damping = 0.9999) I = iter(M) plt.ion() # Interactive I/O for frame in I: ys = [ p[0] for p in frame] vs = [ p[1] for p in frame] xs = range(len(frame)) # print("max(ys) = {}".format(max(ys))) # print("max(vs) = {}".format(max(vs))) ys = ys + [1.0, -1.0] xs = list(xs) + [0, 0] plt.scatter(xs, ys) plt.show() # Update visuals plt.pause(0.02) # Pause plt.cla() # Clear
mit
Ziqi-Li/bknqgis
pandas/pandas/core/dtypes/inference.py
8
8381
""" basic inference routines """ import collections import re import numpy as np from numbers import Number from pandas.compat import (PY2, string_types, text_type, string_and_binary_types) from pandas._libs import lib is_bool = lib.is_bool is_integer = lib.is_integer is_float = lib.is_float is_complex = lib.is_complex is_scalar = lib.isscalar is_decimal = lib.is_decimal is_interval = lib.is_interval def is_number(obj): """ Check if the object is a number. Parameters ---------- obj : The object to check. Returns ------- is_number : bool Whether `obj` is a number or not. Examples -------- >>> is_number(1) True >>> is_number("foo") False """ return isinstance(obj, (Number, np.number)) def is_string_like(obj): """ Check if the object is a string. Parameters ---------- obj : The object to check. Examples -------- >>> is_string_like("foo") True >>> is_string_like(1) False Returns ------- is_str_like : bool Whether `obj` is a string or not. """ return isinstance(obj, (text_type, string_types)) def _iterable_not_string(obj): """ Check if the object is an iterable but not a string. Parameters ---------- obj : The object to check. Returns ------- is_iter_not_string : bool Whether `obj` is a non-string iterable. Examples -------- >>> _iterable_not_string([1, 2, 3]) True >>> _iterable_not_string("foo") False >>> _iterable_not_string(1) False """ return (isinstance(obj, collections.Iterable) and not isinstance(obj, string_types)) def is_iterator(obj): """ Check if the object is an iterator. For example, lists are considered iterators but not strings or datetime objects. Parameters ---------- obj : The object to check. Returns ------- is_iter : bool Whether `obj` is an iterator. Examples -------- >>> is_iterator([1, 2, 3]) True >>> is_iterator(datetime(2017, 1, 1)) False >>> is_iterator("foo") False >>> is_iterator(1) False """ if not hasattr(obj, '__iter__'): return False if PY2: return hasattr(obj, 'next') else: # Python 3 generators have # __next__ instead of next return hasattr(obj, '__next__') def is_file_like(obj): """ Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check. Returns ------- is_file_like : bool Whether `obj` has file-like properties. Examples -------- >>> buffer(StringIO("data")) >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False """ if not (hasattr(obj, 'read') or hasattr(obj, 'write')): return False if not hasattr(obj, "__iter__"): return False return True def is_re(obj): """ Check if the object is a regex pattern instance. Parameters ---------- obj : The object to check. Returns ------- is_regex : bool Whether `obj` is a regex pattern. Examples -------- >>> is_re(re.compile(".*")) True >>> is_re("foo") False """ return isinstance(obj, re._pattern_type) def is_re_compilable(obj): """ Check if the object can be compiled into a regex pattern instance. Parameters ---------- obj : The object to check. Returns ------- is_regex_compilable : bool Whether `obj` can be compiled as a regex pattern. Examples -------- >>> is_re_compilable(".*") True >>> is_re_compilable(1) False """ try: re.compile(obj) except TypeError: return False else: return True def is_list_like(obj): """ Check if the object is list-like. Objects that are considered list-like are for example Python lists, tuples, sets, NumPy arrays, and Pandas Series. Strings and datetime objects, however, are not considered list-like. Parameters ---------- obj : The object to check. Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_list_like([1, 2, 3]) True >>> is_list_like({1, 2, 3}) True >>> is_list_like(datetime(2017, 1, 1)) False >>> is_list_like("foo") False >>> is_list_like(1) False """ return (hasattr(obj, '__iter__') and not isinstance(obj, string_and_binary_types)) def is_nested_list_like(obj): """ Check if the object is list-like, and that all of its elements are also list-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check. Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like """ return (is_list_like(obj) and hasattr(obj, '__len__') and len(obj) > 0 and all(is_list_like(item) for item in obj)) def is_dict_like(obj): """ Check if the object is dict-like. Parameters ---------- obj : The object to check. Returns ------- is_dict_like : bool Whether `obj` has dict-like properties. Examples -------- >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False """ return hasattr(obj, '__getitem__') and hasattr(obj, 'keys') def is_named_tuple(obj): """ Check if the object is a named tuple. Parameters ---------- obj : The object to check. Returns ------- is_named_tuple : bool Whether `obj` is a named tuple. Examples -------- >>> Point = namedtuple("Point", ["x", "y"]) >>> p = Point(1, 2) >>> >>> is_named_tuple(p) True >>> is_named_tuple((1, 2)) False """ return isinstance(obj, tuple) and hasattr(obj, '_fields') def is_hashable(obj): """Return True if hash(obj) will succeed, False otherwise. Some types will pass a test against collections.Hashable but fail when they are actually hashed with hash(). Distinguish between these and other types by trying the call to hash() and seeing if they raise TypeError. Examples -------- >>> a = ([],) >>> isinstance(a, collections.Hashable) True >>> is_hashable(a) False """ # Unfortunately, we can't use isinstance(obj, collections.Hashable), which # can be faster than calling hash. That is because numpy scalars on Python # 3 fail this test. # Reconsider this decision once this numpy bug is fixed: # https://github.com/numpy/numpy/issues/5562 try: hash(obj) except TypeError: return False else: return True def is_sequence(obj): """ Check if the object is a sequence of objects. String types are not included as sequences here. Parameters ---------- obj : The object to check. Returns ------- is_sequence : bool Whether `obj` is a sequence of objects. Examples -------- >>> l = [1, 2, 3] >>> >>> is_sequence(l) True >>> is_sequence(iter(l)) False """ try: iter(obj) # Can iterate over it. len(obj) # Has a length associated with it. return not isinstance(obj, string_and_binary_types) except (TypeError, AttributeError): return False
gpl-2.0
yassersouri/omgh
src/scripts/grid_search_c.py
1
2365
import sys import os sys.path.append(os.path.dirname(os.path.dirname(__file__))) from dataset import CUB_200_2011 from storage import datastore from deep_extractor import CNN_Features_CAFFE_REFERENCE from datetime import datetime as dt import settings import utils import numpy as np cub = CUB_200_2011(settings.CUB_ROOT) features_storage_r = datastore(settings.storage('ccrft')) feature_extractor_r = CNN_Features_CAFFE_REFERENCE(features_storage_r, make_net=False) features_storage_c = datastore(settings.storage('cccft')) feature_extractor_c = CNN_Features_CAFFE_REFERENCE(features_storage_c, make_net=False) features_storage_p_h = datastore(settings.storage('ccpheadft-100000')) feature_extractor_p_h = CNN_Features_CAFFE_REFERENCE(features_storage_p_h, make_net=False) features_storage_p_h = datastore(settings.storage('ccpheadft-100000')) feature_extractor_p_h = CNN_Features_CAFFE_REFERENCE(features_storage_p_h, make_net=False) features_storage_p_b = datastore(settings.storage('ccpbodyft-10000')) feature_extractor_p_b = CNN_Features_CAFFE_REFERENCE(features_storage_p_b, make_net=False) Xtrain_r, ytrain_r, Xtest_r, ytest_r = cub.get_train_test(feature_extractor_r.extract_one) Xtrain_c, ytrain_c, Xtest_c, ytest_c = cub.get_train_test(feature_extractor_c.extract_one) Xtrain_p_h, ytrain_p_h, Xtest_p_h, ytest_p_h = cub.get_train_test(feature_extractor_p_h.extract_one) Xtrain_p_b, ytrain_p_b, Xtest_p_b, ytest_p_b = cub.get_train_test(feature_extractor_p_b.extract_one) Xtrain = np.concatenate((Xtrain_r, Xtrain_c, Xtrain_p_h, Xtrain_p_b), axis=1) Xtest = np.concatenate((Xtest_r, Xtest_c, Xtest_p_h, Xtest_p_b), axis=1) import numpy from sklearn import svm from sklearn.metrics import accuracy_score from sklearn.grid_search import GridSearchCV CS = numpy.array([100, 10, 1, 0.1, 0.01, 0.001, 0.0001]) model = svm.LinearSVC() grid_search = GridSearchCV(estimator=model, param_grid=dict(C=CS), n_jobs=3) grid_search.fit(Xtrain, ytrain_r) print 'best c:', grid_search.best_params_ a = dt.now() model = svm.LinearSVC(C=grid_search.best_params_['C']) model.fit(Xtrain, ytrain_r) b = dt.now() print 'fitted in: %s' % (b - a) a = dt.now() predictions = model.predict(Xtest) b = dt.now() print 'predicted in: %s' % (b - a) print 'accuracy', accuracy_score(ytest_r, predictions) print 'mean accuracy', utils.mean_accuracy(ytest_r, predictions)
mit
louispotok/pandas
pandas/tests/indexes/datetimes/test_partial_slicing.py
1
15706
""" test partial slicing on Series/Frame """ import pytest from datetime import datetime import numpy as np import pandas as pd import operator as op from pandas import (DatetimeIndex, Series, DataFrame, date_range, Index, Timedelta, Timestamp) from pandas.util import testing as tm class TestSlicing(object): def test_dti_slicing(self): dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') dti2 = dti[[1, 3, 5]] v1 = dti2[0] v2 = dti2[1] v3 = dti2[2] assert v1 == Timestamp('2/28/2005') assert v2 == Timestamp('4/30/2005') assert v3 == Timestamp('6/30/2005') # don't carry freq through irregular slicing assert dti2.freq is None def test_slice_keeps_name(self): # GH4226 st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles') dr = pd.date_range(st, et, freq='H', name='timebucket') assert dr[1:].name == dr.name def test_slice_with_negative_step(self): ts = Series(np.arange(20), date_range('2014-01-01', periods=20, freq='MS')) SLC = pd.IndexSlice def assert_slices_equivalent(l_slc, i_slc): tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc]) tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1]) assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1]) assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1]) assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1]) assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1], SLC[13:8:-1]) assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp( '2014-10-01'):-1], SLC[13:8:-1]) assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1], SLC[13:8:-1]) assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1], SLC[13:8:-1]) assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0]) def test_slice_with_zero_step_raises(self): ts = Series(np.arange(20), date_range('2014-01-01', periods=20, freq='MS')) tm.assert_raises_regex(ValueError, 'slice step cannot be zero', lambda: ts[::0]) tm.assert_raises_regex(ValueError, 'slice step cannot be zero', lambda: ts.loc[::0]) tm.assert_raises_regex(ValueError, 'slice step cannot be zero', lambda: ts.loc[::0]) def test_slice_bounds_empty(self): # GH 14354 empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015') right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc') exp = Timestamp('2015-01-02 23:59:59.999999999') assert right == exp left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc') exp = Timestamp('2015-01-02 00:00:00') assert left == exp def test_slice_duplicate_monotonic(self): # https://github.com/pandas-dev/pandas/issues/16515 idx = pd.DatetimeIndex(['2017', '2017']) result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc') expected = Timestamp('2017-01-01') assert result == expected def test_monotone_DTI_indexing_bug(self): # GH 19362 # Testing accessing the first element in a montononic descending # partial string indexing. df = pd.DataFrame(list(range(5))) date_list = ['2018-01-02', '2017-02-10', '2016-03-10', '2015-03-15', '2014-03-16'] date_index = pd.to_datetime(date_list) df['date'] = date_index expected = pd.DataFrame({0: list(range(5)), 'date': date_index}) tm.assert_frame_equal(df, expected) df = pd.DataFrame({'A': [1, 2, 3]}, index=pd.date_range('20170101', periods=3)[::-1]) expected = pd.DataFrame({'A': 1}, index=pd.date_range('20170103', periods=1)) tm.assert_frame_equal(df.loc['2017-01-03'], expected) def test_slice_year(self): dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500) s = Series(np.arange(len(dti)), index=dti) result = s['2005'] expected = s[s.index.year == 2005] tm.assert_series_equal(result, expected) df = DataFrame(np.random.rand(len(dti), 5), index=dti) result = df.loc['2005'] expected = df[df.index.year == 2005] tm.assert_frame_equal(result, expected) rng = date_range('1/1/2000', '1/1/2010') result = rng.get_loc('2009') expected = slice(3288, 3653) assert result == expected def test_slice_quarter(self): dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500) s = Series(np.arange(len(dti)), index=dti) assert len(s['2001Q1']) == 90 df = DataFrame(np.random.rand(len(dti), 5), index=dti) assert len(df.loc['1Q01']) == 90 def test_slice_month(self): dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500) s = Series(np.arange(len(dti)), index=dti) assert len(s['2005-11']) == 30 df = DataFrame(np.random.rand(len(dti), 5), index=dti) assert len(df.loc['2005-11']) == 30 tm.assert_series_equal(s['2005-11'], s['11-2005']) def test_partial_slice(self): rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s['2005-05':'2006-02'] expected = s['20050501':'20060228'] tm.assert_series_equal(result, expected) result = s['2005-05':] expected = s['20050501':] tm.assert_series_equal(result, expected) result = s[:'2006-02'] expected = s[:'20060228'] tm.assert_series_equal(result, expected) result = s['2005-1-1'] assert result == s.iloc[0] pytest.raises(Exception, s.__getitem__, '2004-12-31') def test_partial_slice_daily(self): rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s['2005-1-31'] tm.assert_series_equal(result, s.iloc[:24]) pytest.raises(Exception, s.__getitem__, '2004-12-31 00') def test_partial_slice_hourly(self): rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s['2005-1-1'] tm.assert_series_equal(result, s.iloc[:60 * 4]) result = s['2005-1-1 20'] tm.assert_series_equal(result, s.iloc[:60]) assert s['2005-1-1 20:00'] == s.iloc[0] pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15') def test_partial_slice_minutely(self): rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s['2005-1-1 23:59'] tm.assert_series_equal(result, s.iloc[:60]) result = s['2005-1-1'] tm.assert_series_equal(result, s.iloc[:60]) assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0] pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00') def test_partial_slice_second_precision(self): rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59, microsecond=999990), periods=20, freq='US') s = Series(np.arange(20), rng) tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10]) tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10]) tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:]) tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:]) assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0] tm.assert_raises_regex(KeyError, '2005-1-1 00:00:00', lambda: s['2005-1-1 00:00:00']) def test_partial_slicing_dataframe(self): # GH14856 # Test various combinations of string slicing resolution vs. # index resolution # - If string resolution is less precise than index resolution, # string is considered a slice # - If string resolution is equal to or more precise than index # resolution, string is considered an exact match formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S'] resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second'] for rnum, resolution in enumerate(resolutions[2:], 2): # we check only 'day', 'hour', 'minute' and 'second' unit = Timedelta("1 " + resolution) middate = datetime(2012, 1, 1, 0, 0, 0) index = DatetimeIndex([middate - unit, middate, middate + unit]) values = [1, 2, 3] df = DataFrame({'a': values}, index, dtype=np.int64) assert df.index.resolution == resolution # Timestamp with the same resolution as index # Should be exact match for Series (return scalar) # and raise KeyError for Frame for timestamp, expected in zip(index, values): ts_string = timestamp.strftime(formats[rnum]) # make ts_string as precise as index result = df['a'][ts_string] assert isinstance(result, np.int64) assert result == expected pytest.raises(KeyError, df.__getitem__, ts_string) # Timestamp with resolution less precise than index for fmt in formats[:rnum]: for element, theslice in [[0, slice(None, 1)], [1, slice(1, None)]]: ts_string = index[element].strftime(fmt) # Series should return slice result = df['a'][ts_string] expected = df['a'][theslice] tm.assert_series_equal(result, expected) # Frame should return slice as well result = df[ts_string] expected = df[theslice] tm.assert_frame_equal(result, expected) # Timestamp with resolution more precise than index # Compatible with existing key # Should return scalar for Series # and raise KeyError for Frame for fmt in formats[rnum + 1:]: ts_string = index[1].strftime(fmt) result = df['a'][ts_string] assert isinstance(result, np.int64) assert result == 2 pytest.raises(KeyError, df.__getitem__, ts_string) # Not compatible with existing key # Should raise KeyError for fmt, res in list(zip(formats, resolutions))[rnum + 1:]: ts = index[1] + Timedelta("1 " + res) ts_string = ts.strftime(fmt) pytest.raises(KeyError, df['a'].__getitem__, ts_string) pytest.raises(KeyError, df.__getitem__, ts_string) def test_partial_slicing_with_multiindex(self): # GH 4758 # partial string indexing with a multi-index buggy df = DataFrame({'ACCOUNT': ["ACCT1", "ACCT1", "ACCT1", "ACCT2"], 'TICKER': ["ABC", "MNP", "XYZ", "XYZ"], 'val': [1, 2, 3, 4]}, index=date_range("2013-06-19 09:30:00", periods=4, freq='5T')) df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True) expected = DataFrame([ [1] ], index=Index(['ABC'], name='TICKER'), columns=['val']) result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')] tm.assert_frame_equal(result, expected) expected = df_multi.loc[ (pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')] result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')] tm.assert_series_equal(result, expected) # this is a KeyError as we don't do partial string selection on # multi-levels def f(): df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')] pytest.raises(KeyError, f) # GH 4294 # partial slice on a series mi s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range( '2000-1-1', periods=1000)).stack() s2 = s[:-1].copy() expected = s2['2000-1-4'] result = s2[pd.Timestamp('2000-1-4')] tm.assert_series_equal(result, expected) result = s[pd.Timestamp('2000-1-4')] expected = s['2000-1-4'] tm.assert_series_equal(result, expected) df2 = pd.DataFrame(s) expected = df2.xs('2000-1-4') result = df2.loc[pd.Timestamp('2000-1-4')] tm.assert_frame_equal(result, expected) def test_partial_slice_doesnt_require_monotonicity(self): # For historical reasons. s = pd.Series(np.arange(10), pd.date_range('2014-01-01', periods=10)) nonmonotonic = s[[3, 5, 4]] expected = nonmonotonic.iloc[:0] timestamp = pd.Timestamp('2014-01-10') tm.assert_series_equal(nonmonotonic['2014-01-10':], expected) tm.assert_raises_regex(KeyError, r"Timestamp\('2014-01-10 00:00:00'\)", lambda: nonmonotonic[timestamp:]) tm.assert_series_equal(nonmonotonic.loc['2014-01-10':], expected) tm.assert_raises_regex(KeyError, r"Timestamp\('2014-01-10 00:00:00'\)", lambda: nonmonotonic.loc[timestamp:]) def test_loc_datetime_length_one(self): # GH16071 df = pd.DataFrame(columns=['1'], index=pd.date_range('2016-10-01T00:00:00', '2016-10-01T23:59:59')) result = df.loc[datetime(2016, 10, 1):] tm.assert_frame_equal(result, df) result = df.loc['2016-10-01T00:00:00':] tm.assert_frame_equal(result, df) @pytest.mark.parametrize('datetimelike', [ Timestamp('20130101'), datetime(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')]) @pytest.mark.parametrize('op,expected', [ (op.lt, [True, False, False, False]), (op.le, [True, True, False, False]), (op.eq, [False, True, False, False]), (op.gt, [False, False, False, True])]) def test_selection_by_datetimelike(self, datetimelike, op, expected): # GH issue #17965, test for ability to compare datetime64[ns] columns # to datetimelike df = DataFrame({'A': [pd.Timestamp('20120101'), pd.Timestamp('20130101'), np.nan, pd.Timestamp('20130103')]}) result = op(df.A, datetimelike) expected = Series(expected, name='A') tm.assert_series_equal(result, expected)
bsd-3-clause
dorvaljulien/StarFiddle
density_scatter_plot.py
1
4635
""" A "density scatter plot" is a scatter plot with points displaying a color corresponding to the local "point density". """ import matplotlib.pyplot as plt, numpy as np, numpy.random, scipy #mport cubehelix from kdtree import Tree log=np.log10 def DensityScatter(xdat,ydat,ax=None,NNb=15,Nbins=20,logx=False,logy=False,**kwargs): """ ax = DensityScatter( xdat, ydat,ax=None, NNb=15, Nbins=20, logx=False, logy=False, **kwargs) ------------------------------------------------------------ xdat : data array for x ydat : data array for y ax : If needed, previously existing matplotlib axis object Nnb : Number of neighbour points to compute local density Nbins : Number of density(colour) bins logx : Boolean, do you want xdata to be displayed on a logscale ? logy : Boolean, do you want ydata to be displayed on a logscale ? **kwargs : Means any additionnal keyword will be passed to plt.plot Display a scatter plot of xdat, ydat and attribute colors to points according to the local point density. Allows to visualize the distribution of points in high density regions without doing an histogram2d. """ N=len(xdat) xdat = np.array(xdat); ydat = np.array(ydat) X = (xdat - min(xdat))/(max(xdat) - min(xdat)) Y = (ydat - min(ydat))/(max(ydat) - min(ydat)) if logx: X = log(xdat/max(xdat)) if logy: Y = log(ydat/max(ydat)) T = Tree(X, Y) density = np.zeros(N) def ComputeDensity(nb,d): return nb/( np.pi*d**2) for i in range(N): _, dist = T.nnearest(i, NNb) density[i] = ComputeDensity(NNb,dist[-1]) density_bins = np.logspace( 0.5*(log(min(density))+log(max(density))), log(max(density)), Nbins) density_bins = np.array( [0] + list(density_bins) ) SelectionIndices = [] for i in range(Nbins): ind_arr = np.nonzero(( density_bins[i] <= density ) * ( density < density_bins[i+1]))[0] SelectionIndices.append(ind_arr) if ax is None: fig = plt.figure() ax = fig.add_subplot(111) cm = plt.get_cmap("rainbow") for i,(ind,alph) in enumerate(zip(SelectionIndices,np.linspace(1.,0.,Nbins))): color = cm(1.*(i)/Nbins) ax.plot( xdat[ind], ydat[ind], "o", color=color, alpha=alph, markeredgecolor="none",**kwargs) if logy: ax.set_yscale("log") if logx: ax.set_xscale("log") return ax def DensityScatter3D(xdat,ydat,zdat,ax=None,NNb=15,Nbins=20,**kwargs): """ ax = DensityScatter3D( xdat, ydat, zdat, ax=None, NNb=15, Nbins=20, **kwargs) ------------------------------------------------------------ xdat : data array for x ydat : data array for y zdat : data array for z ax : If needed, previously existing matplotlib axis object Nnb : Number of neighbour points to compute local density Nbins : Number of density(colour) bins **kwargs : Means any additionnal keyword will be passed to plt.plot Display a 3d scatter plot of xdat, ydat, zdat and attribute colors to points according to the local point density. It's kind of experimental, I played with transparency and order or display to be able to see what's going on in high density regions. """ N=len(xdat) xdat = np.array(xdat); ydat = np.array(ydat) X = (xdat - min(xdat))/(max(xdat) - min(xdat)) Y = (ydat - min(ydat))/(max(ydat) - min(ydat)) Z = (zdat - min(zdat))/(max(zdat) - min(zdat)) T = Tree(X, Y, Z) density = np.zeros(N) def ComputeDensity(nb,d): return nb/( 4./3 *np.pi*d**3) for i in range(N): _, dist = T.nnearest(i, NNb) density[i] = ComputeDensity(NNb,dist[-1]) density_bins = np.logspace( 0.5*(log(min(density))+log(max(density))), log(max(density)), Nbins) density_bins = np.array( [0] + list(density_bins) ) SelectionIndices = [] for i in range(Nbins): ind_arr = np.nonzero(( density_bins[i] <= density ) * ( density < density_bins[i+1]))[0] SelectionIndices.append(ind_arr) if ax is None: fig = plt.figure() ax = fig.add_subplot(111, projection="3d") cm = plt.get_cmap("rainbow") for i,(ind,alph) in enumerate(zip(SelectionIndices,np.linspace(1.,0.,Nbins))): color = cm(1.*(i)/Nbins) ax.plot( xdat[ind], ydat[ind], zdat[ind], "o", color=color, alpha=alph, markeredgecolor="none",**kwargs) return ax
mit
Huyuwei/tvm
docs/conf.py
2
8618
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # documentation build configuration file, created by # sphinx-quickstart on Thu Jul 23 19:40:08 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os, subprocess import shlex import recommonmark import sphinx_gallery from recommonmark.parser import CommonMarkParser from recommonmark.transform import AutoStructify # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../python/')) sys.path.insert(0, os.path.join(curr_path, '../topi/python')) sys.path.insert(0, os.path.join(curr_path, '../nnvm/python')) sys.path.insert(0, os.path.join(curr_path, '../vta/python')) # -- General configuration ------------------------------------------------ # General information about the project. project = u'tvm' author = u'%s developers' % project copyright = u'2018, %s' % author github_doc_root = 'https://github.com/tqchen/tvm/tree/master/docs/' # add markdown parser CommonMarkParser.github_doc_root = github_doc_root source_parsers = { '.md': CommonMarkParser } os.environ['TVM_BUILD_DOC'] = '1' os.environ['NNVM_BUILD_DOC'] = '1' # Version information. import tvm version = tvm.__version__ release = tvm.__version__ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'sphinx_gallery.gen_gallery', ] breathe_projects = {'tvm' : 'doxygen/xml/'} breathe_default_project = 'tvm' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = ['.rst', '.md'] # The encoding of source files. #source_encoding = 'utf-8-sig' # generate autosummary even if no references autosummary_generate = True # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme is set by the make target html_theme = os.environ.get('TVM_THEME', 'rtd') on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # only import rtd theme and set it if want to build docs locally if not on_rtd and html_theme == 'rtd': import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_theme_options = { 'analytics_id': 'UA-75982049-2', 'logo_only': True, } html_logo = "_static/img/tvm-logo-small.png" html_favicon = "_static/img/tvm-logo-square.png" # Output file base name for HTML help builder. htmlhelp_basename = project + 'doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, '%s.tex' % project, project, author, 'manual'), ] # hook for doxygen def run_doxygen(folder): """Run the doxygen make command in the designated folder.""" try: #retcode = subprocess.call("cd %s; make doc" % folder, shell=True) retcode = subprocess.call("rm -rf _build/html/doxygen", shell=True) retcode = subprocess.call("mkdir -p _build/html", shell=True) retcode = subprocess.call("cp -rf doxygen/html _build/html/doxygen", shell=True) if retcode < 0: sys.stderr.write("doxygen terminated by signal %s" % (-retcode)) except OSError as e: sys.stderr.write("doxygen execution failed: %s" % e) intersphinx_mapping = { 'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None), 'numpy': ('http://docs.scipy.org/doc/numpy/', None), 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None), 'matplotlib': ('http://matplotlib.org/', None), } from sphinx_gallery.sorting import ExplicitOrder examples_dirs = ["../tutorials/", "../vta/tutorials/"] gallery_dirs = ["tutorials", "vta/tutorials"] subsection_order = ExplicitOrder( ['../tutorials/frontend', '../tutorials/language', '../tutorials/optimize', '../tutorials/autotvm', '../tutorials/dev', '../tutorials/topi', '../tutorials/deployment', '../vta/tutorials/frontend', '../vta/tutorials/optimize', '../vta/tutorials/autotvm']) def generate_doxygen_xml(app): """Run the doxygen make commands if we're on the ReadTheDocs server""" run_doxygen('..') def setup(app): # Add hook for building doxygen xml when needed # no c++ API for now app.connect("builder-inited", generate_doxygen_xml) app.add_stylesheet('css/tvm_theme.css') app.add_config_value('recommonmark_config', { 'url_resolver': lambda url: github_doc_root + url, 'auto_doc_ref': True }, True) app.add_transform(AutoStructify) sphinx_gallery_conf = { 'backreferences_dir': 'gen_modules/backreferences', 'doc_module': ('tvm', 'numpy'), 'reference_url': { 'tvm': None, 'matplotlib': 'http://matplotlib.org', 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'}, 'examples_dirs': examples_dirs, 'gallery_dirs': gallery_dirs, 'subsection_order': subsection_order, 'filename_pattern': os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"), 'find_mayavi_figures': False, 'expected_failing_examples': [] }
apache-2.0
dpshelio/sunpy
examples/parse_time.py
2
3839
""" ======================================== Parsing times with sunpy.time.parse_time ======================================== This is an example to show some possible usage of ``parse_time``. ``parse_time`` is a function that can be useful to create `~astropy.time.Time` objects from various other time objects and strings. """ ############################################################################## # Import the required modules. from datetime import datetime, date import time import numpy as np import pandas from sunpy.time import parse_time ############################################################################## # Suppose you want to parse some strings, ``parse_time`` can do that. t1 = parse_time('1995-12-31 23:59:60') ############################################################################## # Of course you could do the same with `~astropy.time.Time`. # But SunPy ``parse_time`` can parse even more formats of time strings. # And as you see from the examples, thanks to `~astropy.time.Time`, ``parse_time`` # can handle leap seconds too. t2 = parse_time('1995-Dec-31 23:59:60') ############################################################################## # You can mention the scale of the time as a keyword parameter if you need. # Similar to scale you can pass in any astropy Time compatible keywords to # ``parse_time``. See all arguments # `here: <https://docs.astropy.org/en/stable/time/#creating-a-time-object>`__ t3 = parse_time('2012:124:21:08:12', scale='tai') ############################################################################## # Now that you are done with strings, let's see other type ``parse_time`` handles, # tuples. `~astropy.time.Time` does not handle tuples but ``parse_time`` does. t4 = parse_time((1998, 11, 14)) t5 = parse_time((2001, 1, 1, 12, 12, 12, 8899)) ############################################################################## # This also means that you can parse a ``time.struct_time``. t6 = parse_time(time.localtime()) ############################################################################## # ``parse_time`` also parses ``datetime`` and ``date`` objects. t7 = parse_time(datetime.now()) t8 = parse_time(date.today()) ############################################################################## # ``parse_time`` can return ``astropy.time.Time`` objects for ``pandas.Timestamp``, # ``pandas.Series`` and ``pandas.DatetimeIndex``. t9 = parse_time(pandas.Timestamp(datetime(1966, 2, 3))) t10 = parse_time( pandas.Series([[datetime(2012, 1, 1, 0, 0), datetime(2012, 1, 2, 0, 0)], [datetime(2012, 1, 3, 0, 0), datetime(2012, 1, 4, 0, 0)]])) t11 = parse_time( pandas.DatetimeIndex([ datetime(2012, 1, 1, 0, 0), datetime(2012, 1, 2, 0, 0), datetime(2012, 1, 3, 0, 0), datetime(2012, 1, 4, 0, 0) ])) ############################################################################## # ``parse_time`` can parse ``numpy.datetime64`` objects. t12 = parse_time(np.datetime64('2014-02-07T16:47:51.008288123-0500')) t13 = parse_time( np.array( ['2014-02-07T16:47:51.008288123', '2014-02-07T18:47:51.008288123'], dtype='datetime64')) ############################################################################## # Parse time returns `~astropy.time.Time` object for every parsable input that # you give to it. # ``parse_time`` can handle all formats that `~astropy.time.Time` can handle. # That is, # ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date', 'datetime', # 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str', 'jyear_str'] # at the time of writing. This can be used by passing format keyword argument # to ``parse_time``. parse_time(1234.0, format='jd') parse_time('B1950.0', format='byear_str')
bsd-2-clause
ekadhanda/bin
python/coda-cont.py
1
7175
#! /usr/bin/env python # Written by Vasaant S/O Krishnan Friday, 19 May 2017 # Run without arguments for instructions. import sys usrFile = sys.argv[1:] if len(usrFile) == 0: print "" print "# Script to read in file of the CODA format and plot a multivariate" print "# distribution with contours." print "# An index.txt and chain.txt file must be provided and the script" print "# will automatically identify them for internal use. Options are:" print "" print "# samp = Sample chain.txt data at this frequency (computational consideration)." print "" print " -->$ coda-cont.py CODAindex.txt CODAchain.txt samp=xx" print "" exit() import re import numpy as np import matplotlib.pyplot as plt import seaborn as sns #===================================================================== # Define variables. # ints = '\s+?([+-]?\d+)' # Integers for regex #floats = '\s+?([+-]?\d+(?:\.\d+)?)' # Floats or int floats = '\s+?([+-]?\d+(?:\.\d+)?|\.\d+)([eE][+-]?\d+)?' # Floats or int or scientific codaFiles = [] # CODAindex and CODAchain files indexFileFnd = False # CODAindex file identified? chainFileFnd = False # CODAchain file identified? indexCodes = {} # Dictionary containing CODAindex info. # chainIndx = [] # Indexes/Column 1 of CODAchain.txt file chainData = [] # Data/Column 2 of CODAchain.txt file varOne = '' # x data varTwo = '' # y data #===================================================================== #===================================================================== # Determine which are the CODAindex and CODAchain files and # automatically assign them to their respective variables. # for i in usrFile: codaSearch = re.search('.txt',i) if codaSearch: codaFiles.append(i) if len(codaFiles) == 2: # Assuming 1 index and 1 chain file for j in codaFiles: with open(j,'r') as chkTyp: # Run a quick check on the first line only firstLine = chkTyp.readline() codaIndex = re.search('^(\S+)' + ints + ints + '$', firstLine) codaChain = re.search('^(\d+)' + floats + '$', firstLine) if codaIndex: indexFile = j indexFileFnd = True if codaChain: chainFile = j chainFileFnd = True else: print "Insfficient files of CODA*.txt format." print "Check your input files." #===================================================================== #===================================================================== # Determine user requested variable from CODAIndex file # for i in usrFile: userReqCodaIndx = re.search('var=(\S+),(\S+)',i) if userReqCodaIndx: varOne = str(userReqCodaIndx.group(1)) varTwo = str(userReqCodaIndx.group(2)) #===================================================================== if indexFileFnd and chainFileFnd: #===================================================================== # Harvest index file for the variable list and corresponding # [start,stop] coords: # for line in open(indexFile, 'r'): reqIndex = re.search('^(\S+)' + ints + ints + '$', line) if reqIndex: key = str(reqIndex.group(1)) value = [int(reqIndex.group(2)), int(reqIndex.group(3))] indexCodes[key] = value maxElement = max(indexCodes, key = indexCodes.get) # The key with the largest value chainLen = max(indexCodes[maxElement]) # The largest value (expected amt. of data) if len(indexCodes) < 2: print "Insufficient variables in %s for contour plot."%(indexFile) contVarsOk = False elif len(indexCodes) == 2: varOne = indexCodes.keys()[0] varTwo = indexCodes.keys()[1] contOne = indexCodes[varOne] contTwo = indexCodes[varTwo] contVarsOk = True else: if varOne == '' or varTwo == '': print "Manually select variables for contour plot." contVarsOk = False else: contOne = indexCodes[varOne] contTwo = indexCodes[varTwo] contVarsOk = True #===================================================================== #===================================================================== # Harvest chain file # for line in open(chainFile, 'r'): reqChain = re.search('^(\d+)' + floats + '$', line) if reqChain: #chainIndx.append( int(reqChain.group(1))) chainData.append(float(reqChain.group(2))) #chainIndx = np.array(chainIndx) chainData = np.array(chainData) #===================================================================== #===================================================================== # Basic check on the harvest by comparing harvested vs. expected # no. of data. # if len(chainData) != chainLen: print " Warning! " print " %10d lines expected from %s."%(chainLen,indexFile) print " %10d lines harvested from %s."%(len(chainData),chainFile) #===================================================================== #===================================================================== # Contour plot # # if contVarsOk: dataOne = chainData[contOne[0]-1:contOne[1]] # Python starts from 0. CODAindex from 1 dataTwo = chainData[contTwo[0]-1:contTwo[1]] # Ensure same amount of data from both variables if (contOne[0]-contOne[1]) != (contTwo[0]-contTwo[1]): print " %10d lines harvested from %s."%(len(dataOne),varOne) print " %10d lines harvested from %s."%(len(dataTwo),varTwo) else: # This section to get data to the ~100s for computational consideration... if len(dataOne) >= 1000: sampleFactor = 10**int(np.floor(np.log10(len(dataOne)) - 2)) elif len(dataOne) > 500 and len(dataOne) < 1000: sampleFactor = int(len(dataOne)/5.0) else: sampleFactor = 1 # ... unless you want a customised option: for i in usrFile: userReqSamp = re.search('samp=(\d+)',i) if userReqSamp: if int(userReqSamp.group(1)) < len(dataOne): sampleFactor = int(userReqSamp.group(1)) dataOne = dataOne[0::sampleFactor] # Select data at intervals dataTwo = dataTwo[0::sampleFactor] dataComb = {varOne:dataOne, # Apparently jointplot likes dict format varTwo:dataTwo} sns.jointplot(x=varOne,y=varTwo,data=dataComb,kind="kde").set_axis_labels(varOne,varTwo) plt.show() #=====================================================================
mit
alephu5/Soundbyte
environment/lib/python3.3/site-packages/pandas/sparse/tests/test_libsparse.py
1
11260
from pandas import Series import nose from numpy import nan import numpy as np import operator from numpy.testing import assert_almost_equal, assert_equal import pandas.util.testing as tm from pandas.core.sparse import SparseSeries from pandas import DataFrame from pandas._sparse import IntIndex, BlockIndex import pandas._sparse as splib TEST_LENGTH = 20 plain_case = dict(xloc=[0, 7, 15], xlen=[3, 5, 5], yloc=[2, 9, 14], ylen=[2, 3, 5], intersect_loc=[2, 9, 15], intersect_len=[1, 3, 4]) delete_blocks = dict(xloc=[0, 5], xlen=[4, 4], yloc=[1], ylen=[4], intersect_loc=[1], intersect_len=[3]) split_blocks = dict(xloc=[0], xlen=[10], yloc=[0, 5], ylen=[3, 7], intersect_loc=[0, 5], intersect_len=[3, 5]) skip_block = dict(xloc=[10], xlen=[5], yloc=[0, 12], ylen=[5, 3], intersect_loc=[12], intersect_len=[3]) no_intersect = dict(xloc=[0, 10], xlen=[4, 6], yloc=[5, 17], ylen=[4, 2], intersect_loc=[], intersect_len=[]) def check_cases(_check_case): def _check_case_dict(case): _check_case(case['xloc'], case['xlen'], case['yloc'], case['ylen'], case['intersect_loc'], case['intersect_len']) _check_case_dict(plain_case) _check_case_dict(delete_blocks) _check_case_dict(split_blocks) _check_case_dict(skip_block) _check_case_dict(no_intersect) # one or both is empty _check_case([0], [5], [], [], [], []) _check_case([], [], [], [], [], []) def test_index_make_union(): def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) bresult = xindex.make_union(yindex) assert(isinstance(bresult, BlockIndex)) assert_equal(bresult.blocs, eloc) assert_equal(bresult.blengths, elen) ixindex = xindex.to_int_index() iyindex = yindex.to_int_index() iresult = ixindex.make_union(iyindex) assert(isinstance(iresult, IntIndex)) assert_equal(iresult.indices, bresult.to_int_index().indices) """ x: ---- y: ---- r: -------- """ xloc = [0] xlen = [5] yloc = [5] ylen = [4] eloc = [0] elen = [9] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ----- ----- y: ----- -- """ xloc = [0, 10] xlen = [5, 5] yloc = [2, 17] ylen = [5, 2] eloc = [0, 10, 17] elen = [7, 5, 2] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ------ y: ------- r: ---------- """ xloc = [1] xlen = [5] yloc = [3] ylen = [5] eloc = [1] elen = [7] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ------ ----- y: ------- r: ------------- """ xloc = [2, 10] xlen = [4, 4] yloc = [4] ylen = [8] eloc = [2] elen = [12] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: --- ----- y: ------- r: ------------- """ xloc = [0, 5] xlen = [3, 5] yloc = [0] ylen = [7] eloc = [0] elen = [10] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ------ ----- y: ------- --- r: ------------- """ xloc = [2, 10] xlen = [4, 4] yloc = [4, 13] ylen = [8, 4] eloc = [2] elen = [15] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ---------------------- y: ---- ---- --- r: ---------------------- """ xloc = [2] xlen = [15] yloc = [4, 9, 14] ylen = [3, 2, 2] eloc = [2] elen = [15] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ---- --- y: --- --- """ xloc = [0, 10] xlen = [3, 3] yloc = [5, 15] ylen = [2, 2] eloc = [0, 5, 10, 15] elen = [3, 2, 3, 2] _check_case(xloc, xlen, yloc, ylen, eloc, elen) # TODO: different-length index objects def test_lookup(): def _check(index): assert(index.lookup(0) == -1) assert(index.lookup(5) == 0) assert(index.lookup(7) == 2) assert(index.lookup(8) == -1) assert(index.lookup(9) == -1) assert(index.lookup(10) == -1) assert(index.lookup(11) == -1) assert(index.lookup(12) == 3) assert(index.lookup(17) == 8) assert(index.lookup(18) == -1) bindex = BlockIndex(20, [5, 12], [3, 6]) iindex = bindex.to_int_index() _check(bindex) _check(iindex) # corner cases def test_intersect(): def _check_correct(a, b, expected): result = a.intersect(b) assert(result.equals(expected)) def _check_length_exc(a, longer): nose.tools.assert_raises(Exception, a.intersect, longer) def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) expected = BlockIndex(TEST_LENGTH, eloc, elen) longer_index = BlockIndex(TEST_LENGTH + 1, yloc, ylen) _check_correct(xindex, yindex, expected) _check_correct(xindex.to_int_index(), yindex.to_int_index(), expected.to_int_index()) _check_length_exc(xindex, longer_index) _check_length_exc(xindex.to_int_index(), longer_index.to_int_index()) check_cases(_check_case) class TestBlockIndex(tm.TestCase): def test_equals(self): index = BlockIndex(10, [0, 4], [2, 5]) self.assert_(index.equals(index)) self.assert_(not index.equals(BlockIndex(10, [0, 4], [2, 6]))) def test_check_integrity(self): locs = [] lengths = [] # 0-length OK index = BlockIndex(0, locs, lengths) # also OK even though empty index = BlockIndex(1, locs, lengths) # block extend beyond end self.assertRaises(Exception, BlockIndex, 10, [5], [10]) # block overlap self.assertRaises(Exception, BlockIndex, 10, [2, 5], [5, 3]) def test_to_int_index(self): locs = [0, 10] lengths = [4, 6] exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15] block = BlockIndex(20, locs, lengths) dense = block.to_int_index() assert_equal(dense.indices, exp_inds) def test_to_block_index(self): index = BlockIndex(10, [0, 5], [4, 5]) self.assert_(index.to_block_index() is index) class TestIntIndex(tm.TestCase): def test_equals(self): index = IntIndex(10, [0, 1, 2, 3, 4]) self.assert_(index.equals(index)) self.assert_(not index.equals(IntIndex(10, [0, 1, 2, 3]))) def test_to_block_index(self): def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) # see if survive the round trip xbindex = xindex.to_int_index().to_block_index() ybindex = yindex.to_int_index().to_block_index() tm.assert_isinstance(xbindex, BlockIndex) self.assert_(xbindex.equals(xindex)) self.assert_(ybindex.equals(yindex)) check_cases(_check_case) def test_to_int_index(self): index = IntIndex(10, [2, 3, 4, 5, 6]) self.assert_(index.to_int_index() is index) class TestSparseOperators(tm.TestCase): def _nan_op_tests(self, sparse_op, python_op): def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) xdindex = xindex.to_int_index() ydindex = yindex.to_int_index() x = np.arange(xindex.npoints) * 10. + 1 y = np.arange(yindex.npoints) * 100. + 1 result_block_vals, rb_index = sparse_op(x, xindex, y, yindex) result_int_vals, ri_index = sparse_op(x, xdindex, y, ydindex) self.assert_(rb_index.to_int_index().equals(ri_index)) assert_equal(result_block_vals, result_int_vals) # check versus Series... xseries = Series(x, xdindex.indices) yseries = Series(y, ydindex.indices) series_result = python_op(xseries, yseries).valid() assert_equal(result_block_vals, series_result.values) assert_equal(result_int_vals, series_result.values) check_cases(_check_case) def _op_tests(self, sparse_op, python_op): def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) xdindex = xindex.to_int_index() ydindex = yindex.to_int_index() x = np.arange(xindex.npoints) * 10. + 1 y = np.arange(yindex.npoints) * 100. + 1 xfill = 0 yfill = 2 result_block_vals, rb_index = sparse_op( x, xindex, xfill, y, yindex, yfill) result_int_vals, ri_index = sparse_op(x, xdindex, xfill, y, ydindex, yfill) self.assert_(rb_index.to_int_index().equals(ri_index)) assert_equal(result_block_vals, result_int_vals) # check versus Series... xseries = Series(x, xdindex.indices) xseries = xseries.reindex(np.arange(TEST_LENGTH)).fillna(xfill) yseries = Series(y, ydindex.indices) yseries = yseries.reindex(np.arange(TEST_LENGTH)).fillna(yfill) series_result = python_op(xseries, yseries) series_result = series_result.reindex(ri_index.indices) assert_equal(result_block_vals, series_result.values) assert_equal(result_int_vals, series_result.values) check_cases(_check_case) # too cute? oh but how I abhor code duplication check_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv'] def make_nanoptestf(op): def f(self): sparse_op = getattr(splib, 'sparse_nan%s' % op) python_op = getattr(operator, op) self._nan_op_tests(sparse_op, python_op) f.__name__ = 'test_nan%s' % op return f def make_optestf(op): def f(self): sparse_op = getattr(splib, 'sparse_%s' % op) python_op = getattr(operator, op) self._op_tests(sparse_op, python_op) f.__name__ = 'test_%s' % op return f for op in check_ops: f = make_nanoptestf(op) g = make_optestf(op) setattr(TestSparseOperators, f.__name__, f) setattr(TestSparseOperators, g.__name__, g) del f del g if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
gpl-3.0
tionn/holo-at-on
code/get.py
1
1360
# -*- coding: utf-8 -*- import os import io import urllib2 import string from BeautifulSoup import BeautifulSoup import pandas as pd import sys city_url = 'http://twblg.dict.edu.tw/holodict_new/index/xiangzhen_level1.jsp?county=1' def extract_items(base_url): html = urllib2.urlopen(base_url).read() soup = BeautifulSoup(html) #print(soup.prettify()) data = [] table = soup.findAll('tr', attrs={'class':['all_space1', 'all_space2']}) for row in table: cols = row.findAll('td') cols = [ele.text.strip() for ele in cols] data.append([ele for ele in cols if ele]) # Get rid of empty values return data def get_area_url(): base_url = 'http://twblg.dict.edu.tw/holodict_new/index/xiangzhen_level1.jsp?county=%s' url = [] for i in string.ascii_uppercase: url.append(base_url % i) return url if __name__=='__main__': # 縣市名稱 data = extract_items(city_url) data.pop() # ignore data from '其他' print 'Cities and countries are done.' # 鄉鎮區名稱 area_url = get_area_url() for i in area_url: area_data = extract_items(i) data.extend(area_data) print 'Townships are done.' #df = pd.DataFrame(data, columns=['name', 'holo']) df = pd.DataFrame(data) df.to_csv('moe_mapping.csv', encoding='utf-8', index=False, header=0) print 'csv file done.'
cc0-1.0
jmetzen/scikit-learn
examples/tree/plot_tree_regression_multioutput.py
22
1848
""" =================================================================== Multi-output Decision Tree Regression =================================================================== An example to illustrate multi-output regression with decision tree. The :ref:`decision trees <tree>` is used to predict simultaneously the noisy x and y observations of a circle given a single underlying feature. As a result, it learns local linear regressions approximating the circle. We can see that if the maximum depth of the tree (controlled by the `max_depth` parameter) is set too high, the decision trees learn too fine details of the training data and learn from the noise, i.e. they overfit. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(100, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y[::5, :] += (0.5 - rng.rand(20, 2)) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_3 = DecisionTreeRegressor(max_depth=8) regr_1.fit(X, y) regr_2.fit(X, y) regr_3.fit(X, y) # Predict X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) y_3 = regr_3.predict(X_test) # Plot the results plt.figure() s = 50 plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data") plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2") plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5") plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8") plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("data") plt.ylabel("target") plt.title("Multi-output Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
wzbozon/scikit-learn
benchmarks/bench_glm.py
297
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
hainm/scikit-learn
sklearn/utils/extmath.py
142
21102
""" Extended math utilities. """ # Authors: Gael Varoquaux # Alexandre Gramfort # Alexandre T. Passos # Olivier Grisel # Lars Buitinck # Stefan van der Walt # Kyle Kastner # License: BSD 3 clause from __future__ import division from functools import partial import warnings import numpy as np from scipy import linalg from scipy.sparse import issparse from . import check_random_state from .fixes import np_version from ._logistic_sigmoid import _log_logistic_sigmoid from ..externals.six.moves import xrange from .sparsefuncs_fast import csr_row_norms from .validation import check_array, NonBLASDotWarning def norm(x): """Compute the Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). More precise than sqrt(squared_norm(x)). """ x = np.asarray(x) nrm2, = linalg.get_blas_funcs(['nrm2'], [x]) return nrm2(x) # Newer NumPy has a ravel that needs less copying. if np_version < (1, 7, 1): _ravel = np.ravel else: _ravel = partial(np.ravel, order='K') def squared_norm(x): """Squared Euclidean or Frobenius norm of x. Returns the Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array). Faster than norm(x) ** 2. """ x = _ravel(x) return np.dot(x, x) def row_norms(X, squared=False): """Row-wise (squared) Euclidean norm of X. Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse matrices and does not create an X.shape-sized temporary. Performs no input validation. """ if issparse(X): norms = csr_row_norms(X) else: norms = np.einsum('ij,ij->i', X, X) if not squared: np.sqrt(norms, norms) return norms def fast_logdet(A): """Compute log(det(A)) for A symmetric Equivalent to : np.log(nl.det(A)) but more robust. It returns -Inf if det(A) is non positive or is not defined. """ sign, ld = np.linalg.slogdet(A) if not sign > 0: return -np.inf return ld def _impose_f_order(X): """Helper Function""" # important to access flags instead of calling np.isfortran, # this catches corner cases. if X.flags.c_contiguous: return check_array(X.T, copy=False, order='F'), True else: return check_array(X, copy=False, order='F'), False def _fast_dot(A, B): if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c' raise ValueError if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64) for x in [A, B]): warnings.warn('Data must be of same type. Supported types ' 'are 32 and 64 bit float. ' 'Falling back to np.dot.', NonBLASDotWarning) raise ValueError if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2: raise ValueError # scipy 0.9 compliant API dot = linalg.get_blas_funcs(['gemm'], (A, B))[0] A, trans_a = _impose_f_order(A) B, trans_b = _impose_f_order(B) return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b) def _have_blas_gemm(): try: linalg.get_blas_funcs(['gemm']) return True except (AttributeError, ValueError): warnings.warn('Could not import BLAS, falling back to np.dot') return False # Only use fast_dot for older NumPy; newer ones have tackled the speed issue. if np_version < (1, 7, 2) and _have_blas_gemm(): def fast_dot(A, B): """Compute fast dot products directly calling BLAS. This function calls BLAS directly while warranting Fortran contiguity. This helps avoiding extra copies `np.dot` would have created. For details see section `Linear Algebra on large Arrays`: http://wiki.scipy.org/PerformanceTips Parameters ---------- A, B: instance of np.ndarray Input arrays. Arrays are supposed to be of the same dtype and to have exactly 2 dimensions. Currently only floats are supported. In case these requirements aren't met np.dot(A, B) is returned instead. To activate the related warning issued in this case execute the following lines of code: >> import warnings >> from sklearn.utils.validation import NonBLASDotWarning >> warnings.simplefilter('always', NonBLASDotWarning) """ try: return _fast_dot(A, B) except ValueError: # Maltyped or malformed data. return np.dot(A, B) else: fast_dot = np.dot def density(w, **kwargs): """Compute density of a sparse vector Return a value between 0 and 1 """ if hasattr(w, "toarray"): d = float(w.nnz) / (w.shape[0] * w.shape[1]) else: d = 0 if w is None else float((w != 0).sum()) / w.size return d def safe_sparse_dot(a, b, dense_output=False): """Dot product that handle the sparse matrix case correctly Uses BLAS GEMM as replacement for numpy.dot where possible to avoid unnecessary copies. """ if issparse(a) or issparse(b): ret = a * b if dense_output and hasattr(ret, "toarray"): ret = ret.toarray() return ret else: return fast_dot(a, b) def randomized_range_finder(A, size, n_iter, random_state=None): """Computes an orthonormal matrix whose range approximates the range of A. Parameters ---------- A: 2D array The input data matrix size: integer Size of the return array n_iter: integer Number of power iterations used to stabilize the result random_state: RandomState or an int seed (0 by default) A random number generator instance Returns ------- Q: 2D array A (size x size) projection matrix, the range of which approximates well the range of the input matrix A. Notes ----- Follows Algorithm 4.3 of Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 """ random_state = check_random_state(random_state) # generating random gaussian vectors r with shape: (A.shape[1], size) R = random_state.normal(size=(A.shape[1], size)) # sampling the range of A using by linear projection of r Y = safe_sparse_dot(A, R) del R # perform power iterations with Y to further 'imprint' the top # singular vectors of A in Y for i in xrange(n_iter): Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y)) # extracting an orthonormal basis of the A range samples Q, R = linalg.qr(Y, mode='economic') return Q def randomized_svd(M, n_components, n_oversamples=10, n_iter=0, transpose='auto', flip_sign=True, random_state=0): """Computes a truncated randomized SVD Parameters ---------- M: ndarray or sparse matrix Matrix to decompose n_components: int Number of singular values and vectors to extract. n_oversamples: int (default is 10) Additional number of random vectors to sample the range of M so as to ensure proper conditioning. The total number of random vectors used to find the range of M is n_components + n_oversamples. n_iter: int (default is 0) Number of power iterations (can be used to deal with very noisy problems). transpose: True, False or 'auto' (default) Whether the algorithm should be applied to M.T instead of M. The result should approximately be the same. The 'auto' mode will trigger the transposition if M.shape[1] > M.shape[0] since this implementation of randomized SVD tend to be a little faster in that case). flip_sign: boolean, (True by default) The output of a singular value decomposition is only unique up to a permutation of the signs of the singular vectors. If `flip_sign` is set to `True`, the sign ambiguity is resolved by making the largest loadings for each component in the left singular vectors positive. random_state: RandomState or an int seed (0 by default) A random number generator instance to make behavior Notes ----- This algorithm finds a (usually very good) approximate truncated singular value decomposition using randomization to speed up the computations. It is particularly fast on large matrices on which you wish to extract only a small number of components. References ---------- * Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061 * A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert """ random_state = check_random_state(random_state) n_random = n_components + n_oversamples n_samples, n_features = M.shape if transpose == 'auto' and n_samples > n_features: transpose = True if transpose: # this implementation is a bit faster with smaller shape[1] M = M.T Q = randomized_range_finder(M, n_random, n_iter, random_state) # project M to the (k + p) dimensional space using the basis vectors B = safe_sparse_dot(Q.T, M) # compute the SVD on the thin matrix: (k + p) wide Uhat, s, V = linalg.svd(B, full_matrices=False) del B U = np.dot(Q, Uhat) if flip_sign: U, V = svd_flip(U, V) if transpose: # transpose back the results according to the input convention return V[:n_components, :].T, s[:n_components], U[:, :n_components].T else: return U[:, :n_components], s[:n_components], V[:n_components, :] def logsumexp(arr, axis=0): """Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 """ arr = np.rollaxis(arr, axis) # Use the max to normalize, as with the log this is what accumulates # the less errors vmax = arr.max(axis=0) out = np.log(np.sum(np.exp(arr - vmax), axis=0)) out += vmax return out def weighted_mode(a, w, axis=0): """Returns an array of the weighted modal (most common) value in a If there is more than one such value, only the first is returned. The bin-count for the modal bins is also returned. This is an extension of the algorithm in scipy.stats.mode. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). w : array_like n-dimensional array of weights for each value axis : int, optional Axis along which to operate. Default is 0, i.e. the first axis. Returns ------- vals : ndarray Array of modal values. score : ndarray Array of weighted counts for each mode. Examples -------- >>> from sklearn.utils.extmath import weighted_mode >>> x = [4, 1, 4, 2, 4, 2] >>> weights = [1, 1, 1, 1, 1, 1] >>> weighted_mode(x, weights) (array([ 4.]), array([ 3.])) The value 4 appears three times: with uniform weights, the result is simply the mode of the distribution. >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's >>> weighted_mode(x, weights) (array([ 2.]), array([ 3.5])) The value 2 has the highest score: it appears twice with weights of 1.5 and 2: the sum of these is 3. See Also -------- scipy.stats.mode """ if axis is None: a = np.ravel(a) w = np.ravel(w) axis = 0 else: a = np.asarray(a) w = np.asarray(w) axis = axis if a.shape != w.shape: w = np.zeros(a.shape, dtype=w.dtype) + w scores = np.unique(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape) oldcounts = np.zeros(testshape) for score in scores: template = np.zeros(a.shape) ind = (a == score) template[ind] = w[ind] counts = np.expand_dims(np.sum(template, axis), axis) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return mostfrequent, oldcounts def pinvh(a, cond=None, rcond=None, lower=True): """Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix. Calculate a generalized inverse of a symmetric matrix using its eigenvalue decomposition and including all 'large' eigenvalues. Parameters ---------- a : array, shape (N, N) Real symmetric or complex hermetian matrix to be pseudo-inverted cond : float or None, default None Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. rcond : float or None, default None (deprecated) Cutoff for 'small' eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : boolean Whether the pertinent array data is taken from the lower or upper triangle of a. (Default: lower) Returns ------- B : array, shape (N, N) Raises ------ LinAlgError If eigenvalue does not converge Examples -------- >>> import numpy as np >>> a = np.random.randn(9, 6) >>> a = np.dot(a, a.T) >>> B = pinvh(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = np.asarray_chkfinite(a) s, u = linalg.eigh(a, lower=lower) if rcond is not None: cond = rcond if cond in [None, -1]: t = u.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps # unlike svd case, eigh can lead to negative eigenvalues above_cutoff = (abs(s) > cond * np.max(abs(s))) psigma_diag = np.zeros_like(s) psigma_diag[above_cutoff] = 1.0 / s[above_cutoff] return np.dot(u * psigma_diag, np.conjugate(u).T) def cartesian(arrays, out=None): """Generate a cartesian product of input arrays. Parameters ---------- arrays : list of array-like 1-D arrays to form the cartesian product of. out : ndarray Array to place the cartesian product in. Returns ------- out : ndarray 2-D array of shape (M, len(arrays)) containing cartesian products formed of input arrays. Examples -------- >>> cartesian(([1, 2, 3], [4, 5], [6, 7])) array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) """ arrays = [np.asarray(x) for x in arrays] shape = (len(x) for x in arrays) dtype = arrays[0].dtype ix = np.indices(shape) ix = ix.reshape(len(arrays), -1).T if out is None: out = np.empty_like(ix, dtype=dtype) for n, arr in enumerate(arrays): out[:, n] = arrays[n][ix[:, n]] return out def svd_flip(u, v, u_based_decision=True): """Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. Parameters ---------- u, v : ndarray u and v are the output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. u_based_decision : boolean, (default=True) If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted, v_adjusted : arrays with the same dimensions as the input. """ if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) signs = np.sign(u[max_abs_cols, xrange(u.shape[1])]) u *= signs v *= signs[:, np.newaxis] else: # rows of v, columns of u max_abs_rows = np.argmax(np.abs(v), axis=1) signs = np.sign(v[xrange(v.shape[0]), max_abs_rows]) u *= signs v *= signs[:, np.newaxis] return u, v def log_logistic(X, out=None): """Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``. This implementation is numerically stable because it splits positive and negative values:: -log(1 + exp(-x_i)) if x_i > 0 x_i - log(1 + exp(x_i)) if x_i <= 0 For the ordinary logistic function, use ``sklearn.utils.fixes.expit``. Parameters ---------- X: array-like, shape (M, N) Argument to the logistic function out: array-like, shape: (M, N), optional: Preallocated output array. Returns ------- out: array, shape (M, N) Log of the logistic function evaluated at every point in x Notes ----- See the blog post describing this implementation: http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/ """ is_1d = X.ndim == 1 X = check_array(X, dtype=np.float) n_samples, n_features = X.shape if out is None: out = np.empty_like(X) _log_logistic_sigmoid(n_samples, n_features, X, out) if is_1d: return np.squeeze(out) return out def safe_min(X): """Returns the minimum value of a dense or a CSR/CSC matrix. Adapated from http://stackoverflow.com/q/13426580 """ if issparse(X): if len(X.data) == 0: return 0 m = X.data.min() return m if X.getnnz() == X.size else min(m, 0) else: return X.min() def make_nonnegative(X, min_value=0): """Ensure `X.min()` >= `min_value`.""" min_ = safe_min(X) if min_ < min_value: if issparse(X): raise ValueError("Cannot make the data matrix" " nonnegative because it is sparse." " Adding a value to every entry would" " make it no longer sparse.") X = X + (min_value - min_) return X def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count): """Calculate an average mean update and a Youngs and Cramer variance update. From the paper "Algorithms for computing the sample variance: analysis and recommendations", by Chan, Golub, and LeVeque. Parameters ---------- X : array-like, shape (n_samples, n_features) Data to use for variance update old_mean : array-like, shape: (n_features,) old_variance : array-like, shape: (n_features,) old_sample_count : int Returns ------- updated_mean : array, shape (n_features,) updated_variance : array, shape (n_features,) updated_sample_count : int References ---------- T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance: recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247 """ new_sum = X.sum(axis=0) new_variance = X.var(axis=0) * X.shape[0] old_sum = old_mean * old_sample_count n_samples = X.shape[0] updated_sample_count = old_sample_count + n_samples partial_variance = old_sample_count / (n_samples * updated_sample_count) * ( n_samples / old_sample_count * old_sum - new_sum) ** 2 unnormalized_variance = old_variance * old_sample_count + new_variance + \ partial_variance return ((old_sum + new_sum) / updated_sample_count, unnormalized_variance / updated_sample_count, updated_sample_count) def _deterministic_vector_sign_flip(u): """Modify the sign of vectors for reproducibility Flips the sign of elements of all the vectors (rows of u) such that the absolute maximum element of each vector is positive. Parameters ---------- u : ndarray Array with vectors as its rows. Returns ------- u_flipped : ndarray with same shape as u Array with the sign flipped vectors as its rows. """ max_abs_rows = np.argmax(np.abs(u), axis=1) signs = np.sign(u[range(u.shape[0]), max_abs_rows]) u *= signs[:, np.newaxis] return u
bsd-3-clause
RMKD/networkx
examples/drawing/unix_email.py
62
2683
#!/usr/bin/env python """ Create a directed graph, allowing multiple edges and self loops, from a unix mailbox. The nodes are email addresses with links that point from the sender to the recievers. The edge data is a Python email.Message object which contains all of the email message data. This example shows the power of XDiGraph to hold edge data of arbitrary Python objects (in this case a list of email messages). By default, load the sample unix email mailbox called "unix_email.mbox". You can load your own mailbox by naming it on the command line, eg python unixemail.py /var/spool/mail/username """ __author__ = """Aric Hagberg ([email protected])""" # Copyright (C) 2005 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. import email from email.utils import getaddresses,parseaddr import mailbox import sys # unix mailbox recipe # see http://www.python.org/doc/current/lib/module-mailbox.html def msgfactory(fp): try: return email.message_from_file(fp) except email.Errors.MessageParseError: # Don't return None since that will stop the mailbox iterator return '' if __name__ == '__main__': import networkx as nx try: import matplotlib.pyplot as plt except: pass if len(sys.argv)==1: filePath = "unix_email.mbox" else: filePath = sys.argv[1] mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox G=nx.MultiDiGraph() # create empty graph # parse each messages and build graph for msg in mbox: # msg is python email.Message.Message object (source_name,source_addr) = parseaddr(msg['From']) # sender # get all recipients # see http://www.python.org/doc/current/lib/module-email.Utils.html tos = msg.get_all('to', []) ccs = msg.get_all('cc', []) resent_tos = msg.get_all('resent-to', []) resent_ccs = msg.get_all('resent-cc', []) all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs) # now add the edges for this mail message for (target_name,target_addr) in all_recipients: G.add_edge(source_addr,target_addr,message=msg) # print edges with message subject for (u,v,d) in G.edges_iter(data=True): print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"])) try: # draw pos=nx.spring_layout(G,iterations=10) nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16) plt.savefig("unix_email.png") plt.show() except: # matplotlib not available pass
bsd-3-clause
kdebrab/pandas
pandas/tests/frame/test_convert_to.py
3
12494
# -*- coding: utf-8 -*- from datetime import datetime import pytest import pytz import collections from collections import OrderedDict, defaultdict import numpy as np from pandas import compat from pandas.compat import long from pandas import (DataFrame, Series, MultiIndex, Timestamp, date_range) import pandas.util.testing as tm from pandas.tests.frame.common import TestData class TestDataFrameConvertTo(TestData): def test_to_dict_timestamp(self): # GH11247 # split/records producing np.datetime64 rather than Timestamps # on datetime64[ns] dtypes only tsmp = Timestamp('20130101') test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]}) test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]}) expected_records = [{'A': tsmp, 'B': tsmp}, {'A': tsmp, 'B': tsmp}] expected_records_mixed = [{'A': tsmp, 'B': 1}, {'A': tsmp, 'B': 2}] assert (test_data.to_dict(orient='records') == expected_records) assert (test_data_mixed.to_dict(orient='records') == expected_records_mixed) expected_series = { 'A': Series([tsmp, tsmp], name='A'), 'B': Series([tsmp, tsmp], name='B'), } expected_series_mixed = { 'A': Series([tsmp, tsmp], name='A'), 'B': Series([1, 2], name='B'), } tm.assert_dict_equal(test_data.to_dict(orient='series'), expected_series) tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'), expected_series_mixed) expected_split = { 'index': [0, 1], 'data': [[tsmp, tsmp], [tsmp, tsmp]], 'columns': ['A', 'B'] } expected_split_mixed = { 'index': [0, 1], 'data': [[tsmp, 1], [tsmp, 2]], 'columns': ['A', 'B'] } tm.assert_dict_equal(test_data.to_dict(orient='split'), expected_split) tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'), expected_split_mixed) def test_to_dict_invalid_orient(self): df = DataFrame({'A': [0, 1]}) pytest.raises(ValueError, df.to_dict, orient='xinvalid') def test_to_records_dt64(self): df = DataFrame([["one", "two", "three"], ["four", "five", "six"]], index=date_range("2012-01-01", "2012-01-02")) # convert_datetime64 defaults to None expected = df.index.values[0] result = df.to_records()['index'][0] assert expected == result # check for FutureWarning if convert_datetime64=False is passed with tm.assert_produces_warning(FutureWarning): expected = df.index.values[0] result = df.to_records(convert_datetime64=False)['index'][0] assert expected == result # check for FutureWarning if convert_datetime64=True is passed with tm.assert_produces_warning(FutureWarning): expected = df.index[0] result = df.to_records(convert_datetime64=True)['index'][0] assert expected == result def test_to_records_with_multindex(self): # GH3189 index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] data = np.zeros((8, 4)) df = DataFrame(data, index=index) r = df.to_records(index=True)['level_0'] assert 'bar' in r assert 'one' not in r def test_to_records_with_Mapping_type(self): import email from email.parser import Parser import collections collections.Mapping.register(email.message.Message) headers = Parser().parsestr('From: <[email protected]>\n' 'To: <[email protected]>\n' 'Subject: Test message\n' '\n' 'Body would go here\n') frame = DataFrame.from_records([headers]) all(x in frame for x in ['Type', 'Subject', 'From']) def test_to_records_floats(self): df = DataFrame(np.random.rand(10, 10)) df.to_records() def test_to_records_index_name(self): df = DataFrame(np.random.randn(3, 3)) df.index.name = 'X' rs = df.to_records() assert 'X' in rs.dtype.fields df = DataFrame(np.random.randn(3, 3)) rs = df.to_records() assert 'index' in rs.dtype.fields df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')]) df.index.names = ['A', None] rs = df.to_records() assert 'level_0' in rs.dtype.fields def test_to_records_with_unicode_index(self): # GH13172 # unicode_literals conflict with to_records result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\ .to_records() expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')]) tm.assert_almost_equal(result, expected) def test_to_records_with_unicode_column_names(self): # xref issue: https://github.com/numpy/numpy/issues/2407 # Issue #11879. to_records used to raise an exception when used # with column names containing non-ascii characters in Python 2 result = DataFrame(data={u"accented_name_é": [1.0]}).to_records() # Note that numpy allows for unicode field names but dtypes need # to be specified using dictionary instead of list of tuples. expected = np.rec.array( [(0, 1.0)], dtype={"names": ["index", u"accented_name_é"], "formats": ['=i8', '=f8']} ) tm.assert_almost_equal(result, expected) def test_to_records_with_categorical(self): # GH8626 # dict creation df = DataFrame({'A': list('abc')}, dtype='category') expected = Series(list('abc'), dtype='category', name='A') tm.assert_series_equal(df['A'], expected) # list-like creation df = DataFrame(list('abc'), dtype='category') expected = Series(list('abc'), dtype='category', name=0) tm.assert_series_equal(df[0], expected) # to record array # this coerces result = df.to_records() expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')], dtype=[('index', '=i8'), ('0', 'O')]) tm.assert_almost_equal(result, expected) @pytest.mark.parametrize('mapping', [ dict, collections.defaultdict(list), collections.OrderedDict]) def test_to_dict(self, mapping): test_data = { 'A': {'1': 1, '2': 2}, 'B': {'1': '1', '2': '2', '3': '3'}, } # GH16122 recons_data = DataFrame(test_data).to_dict(into=mapping) for k, v in compat.iteritems(test_data): for k2, v2 in compat.iteritems(v): assert (v2 == recons_data[k][k2]) recons_data = DataFrame(test_data).to_dict("l", mapping) for k, v in compat.iteritems(test_data): for k2, v2 in compat.iteritems(v): assert (v2 == recons_data[k][int(k2) - 1]) recons_data = DataFrame(test_data).to_dict("s", mapping) for k, v in compat.iteritems(test_data): for k2, v2 in compat.iteritems(v): assert (v2 == recons_data[k][k2]) recons_data = DataFrame(test_data).to_dict("sp", mapping) expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'], 'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]} tm.assert_dict_equal(recons_data, expected_split) recons_data = DataFrame(test_data).to_dict("r", mapping) expected_records = [{'A': 1.0, 'B': '1'}, {'A': 2.0, 'B': '2'}, {'A': np.nan, 'B': '3'}] assert isinstance(recons_data, list) assert (len(recons_data) == 3) for l, r in zip(recons_data, expected_records): tm.assert_dict_equal(l, r) # GH10844 recons_data = DataFrame(test_data).to_dict("i") for k, v in compat.iteritems(test_data): for k2, v2 in compat.iteritems(v): assert (v2 == recons_data[k2][k]) df = DataFrame(test_data) df['duped'] = df[df.columns[0]] recons_data = df.to_dict("i") comp_data = test_data.copy() comp_data['duped'] = comp_data[df.columns[0]] for k, v in compat.iteritems(comp_data): for k2, v2 in compat.iteritems(v): assert (v2 == recons_data[k2][k]) @pytest.mark.parametrize('mapping', [ list, collections.defaultdict, []]) def test_to_dict_errors(self, mapping): # GH16122 df = DataFrame(np.random.randn(3, 3)) with pytest.raises(TypeError): df.to_dict(into=mapping) def test_to_dict_not_unique_warning(self): # GH16927: When converting to a dict, if a column has a non-unique name # it will be dropped, throwing a warning. df = DataFrame([[1, 2, 3]], columns=['a', 'a', 'b']) with tm.assert_produces_warning(UserWarning): df.to_dict() @pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern']) def test_to_records_datetimeindex_with_tz(self, tz): # GH13937 dr = date_range('2016-01-01', periods=10, freq='S', tz=tz) df = DataFrame({'datetime': dr}, index=dr) expected = df.to_records() result = df.tz_convert("UTC").to_records() # both converted to UTC, so they are equal tm.assert_numpy_array_equal(result, expected) def test_to_dict_box_scalars(self): # 14216 # make sure that we are boxing properly d = {'a': [1], 'b': ['b']} result = DataFrame(d).to_dict() assert isinstance(list(result['a'])[0], (int, long)) assert isinstance(list(result['b'])[0], (int, long)) result = DataFrame(d).to_dict(orient='records') assert isinstance(result[0]['a'], (int, long)) def test_frame_to_dict_tz(self): # GH18372 When converting to dict with orient='records' columns of # datetime that are tz-aware were not converted to required arrays data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),), (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)] df = DataFrame(list(data), columns=["d", ]) result = df.to_dict(orient='records') expected = [ {'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)}, {'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)}, ] tm.assert_dict_equal(result[0], expected[0]) tm.assert_dict_equal(result[1], expected[1]) @pytest.mark.parametrize('into, expected', [ (dict, {0: {'int_col': 1, 'float_col': 1.0}, 1: {'int_col': 2, 'float_col': 2.0}, 2: {'int_col': 3, 'float_col': 3.0}}), (OrderedDict, OrderedDict([(0, {'int_col': 1, 'float_col': 1.0}), (1, {'int_col': 2, 'float_col': 2.0}), (2, {'int_col': 3, 'float_col': 3.0})])), (defaultdict(list), defaultdict(list, {0: {'int_col': 1, 'float_col': 1.0}, 1: {'int_col': 2, 'float_col': 2.0}, 2: {'int_col': 3, 'float_col': 3.0}})) ]) def test_to_dict_index_dtypes(self, into, expected): # GH 18580 # When using to_dict(orient='index') on a dataframe with int # and float columns only the int columns were cast to float df = DataFrame({'int_col': [1, 2, 3], 'float_col': [1.0, 2.0, 3.0]}) result = df.to_dict(orient='index', into=into) cols = ['int_col', 'float_col'] result = DataFrame.from_dict(result, orient='index')[cols] expected = DataFrame.from_dict(expected, orient='index')[cols] tm.assert_frame_equal(result, expected)
bsd-3-clause
rrohan/scikit-learn
examples/model_selection/plot_validation_curve.py
229
1823
""" ========================== Plotting Validation Curves ========================== In this plot you can see the training scores and validation scores of an SVM for different values of the kernel parameter gamma. For very low values of gamma, you can see that both the training score and the validation score are low. This is called underfitting. Medium values of gamma will result in high values for both scores, i.e. the classifier is performing fairly well. If gamma is too high, the classifier will overfit, which means that the training score is good but the validation score is poor. """ print(__doc__) import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_digits from sklearn.svm import SVC from sklearn.learning_curve import validation_curve digits = load_digits() X, y = digits.data, digits.target param_range = np.logspace(-6, -1, 5) train_scores, test_scores = validation_curve( SVC(), X, y, param_name="gamma", param_range=param_range, cv=10, scoring="accuracy", n_jobs=1) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.title("Validation Curve with SVM") plt.xlabel("$\gamma$") plt.ylabel("Score") plt.ylim(0.0, 1.1) plt.semilogx(param_range, train_scores_mean, label="Training score", color="r") plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="r") plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="g") plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="g") plt.legend(loc="best") plt.show()
bsd-3-clause
slipguru/palladio
palladio/config_templates/default_config.py
1
2485
# Configuration file example for PALLADIO # version: 2.0 import numpy as np from sklearn.feature_selection import RFE from sklearn.svm import LinearSVC from sklearn.model_selection import GridSearchCV from palladio import datasets import os ##################### # DATASET PATHS ### ##################### # * All the path are w.r.t. config file path # The list of all files required for the experiments data_path = 'data/gedm.csv' target_path = 'data/labels.csv' # pandas.read_csv options data_loading_options = { 'delimiter': ',', 'header': 0, 'index_col': 0 } target_loading_options = data_loading_options dataset = datasets.load_csv(os.path.join(os.path.dirname(__file__),data_path), os.path.join(os.path.dirname(__file__),target_path), data_loading_options=data_loading_options, target_loading_options=target_loading_options, samples_on='col') data, labels = dataset.data, dataset.target feature_names = dataset.feature_names ####################### # SESSION OPTIONS ### ####################### session_folder = 'palladio_test_session' # The learning task, if None palladio tries to guess it # [see sklearn.utils.multiclass.type_of_target] learning_task = None # The number of repetitions of 'regular' experiments n_splits_regular = 50 # The number of repetitions of 'permutation' experiments n_splits_permutation = 50 ####################### # LEARNER OPTIONS ### ####################### model = RFE(LinearSVC(loss='hinge'), step=0.3) # Set the estimator to be a GridSearchCV param_grid = { 'n_features_to_select': [10, 20, 50], 'estimator__C': np.logspace(-4, 0, 5), } estimator = GridSearchCV(model, param_grid=param_grid, cv=3, scoring='accuracy', n_jobs=1) # Set options for ModelAssessment ma_options = { 'test_size': 0.25, 'scoring': 'accuracy', 'n_jobs': -1, 'n_splits': n_splits_regular } # For the Pipeline object, indicate the name of the step from which to # retrieve the list of selected features # For a single estimator which has a `coef_` attributes (e.g., elastic net or # lasso) set to True vs_analysis = True # ~~ Signature Parameters # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ frequency_threshold = 0.75 # ~~ Plotting Options # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ score_surfaces_options = { 'logspace': ['estimator__C'], 'plot_errors': True }
gpl-3.0
DonBeo/scikit-learn
sklearn/svm/tests/test_sparse.py
1
10550
from nose.tools import assert_raises, assert_true, assert_false import numpy as np from scipy import sparse from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) from sklearn import datasets, svm, linear_model, base from sklearn.datasets import make_classification, load_digits from sklearn.svm.tests import test_svm from sklearn.utils import ConvergenceWarning from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.testing import assert_warns # test sample 1 X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) X_sp = sparse.lil_matrix(X) Y = [1, 1, 1, 2, 2, 2] T = np.array([[-1, -1], [2, 2], [3, 2]]) true_result = [1, 2, 2] # test sample 2 X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ], [0, 0, 2], [3, 3, 3]]) X2_sp = sparse.dok_matrix(X2) Y2 = [1, 2, 2, 2, 3] T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]]) true_result2 = [1, 2, 3] iris = datasets.load_iris() # permute rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # sparsify iris.data = sparse.csr_matrix(iris.data) def test_svc(): # Check that sparse SVC gives the same result as SVC clf = svm.SVC(kernel='linear', probability=True, random_state=0) clf.fit(X, Y) sp_clf = svm.SVC(kernel='linear', probability=True, random_state=0) sp_clf.fit(X_sp, Y) assert_array_equal(sp_clf.predict(T), true_result) assert_true(sparse.issparse(sp_clf.support_vectors_)) assert_array_almost_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_true(sparse.issparse(sp_clf.dual_coef_)) assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) assert_true(sparse.issparse(sp_clf.coef_)) assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray()) assert_array_almost_equal(clf.support_, sp_clf.support_) assert_array_almost_equal(clf.predict(T), sp_clf.predict(T)) # refit with a different dataset clf.fit(X2, Y2) sp_clf.fit(X2_sp, Y2) assert_array_almost_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray()) assert_array_almost_equal(clf.support_, sp_clf.support_) assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2)) assert_array_almost_equal(clf.predict_proba(T2), sp_clf.predict_proba(T2), 4) def test_unsorted_indices(): # test that the result with sorted and unsorted indices in csr is the same # we use a subset of digits as iris, blobs or make_classification didn't # show the problem digits = load_digits() X, y = digits.data[:50], digits.target[:50] X_test = sparse.csr_matrix(digits.data[50:100]) X_sparse = sparse.csr_matrix(X) coef_dense = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X, y).coef_ sparse_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse, y) coef_sorted = sparse_svc.coef_ # make sure dense and sparse SVM give the same result assert_array_almost_equal(coef_dense, coef_sorted.toarray()) X_sparse_unsorted = X_sparse[np.arange(X.shape[0])] X_test_unsorted = X_test[np.arange(X_test.shape[0])] # make sure we scramble the indices assert_false(X_sparse_unsorted.has_sorted_indices) assert_false(X_test_unsorted.has_sorted_indices) unsorted_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse_unsorted, y) coef_unsorted = unsorted_svc.coef_ # make sure unsorted indices give same result assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray()) assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted), sparse_svc.predict_proba(X_test)) def test_svc_with_custom_kernel(): kfunc = lambda x, y: safe_sparse_dot(x, y.T) clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y) clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y) assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp)) def test_svc_iris(): # Test the sparse SVC with the iris dataset for k in ('linear', 'poly', 'rbf'): sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target) clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target) assert_array_almost_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) assert_array_almost_equal( clf.predict(iris.data.toarray()), sp_clf.predict(iris.data)) if k == 'linear': assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray()) def test_error(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) assert_raises(ValueError, clf.fit, X_sp, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X_sp, Y2) clf = svm.SVC() clf.fit(X_sp, Y) assert_array_equal(clf.predict(T), true_result) def test_linearsvc(): # Similar to test_SVC clf = svm.LinearSVC(random_state=0).fit(X, Y) sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y) assert_true(sp_clf.fit_intercept) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp)) clf.fit(X2, Y2) sp_clf.fit(X2_sp, Y2) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) def test_linearsvc_iris(): # Test the sparse LinearSVC with the iris dataset sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target) assert_equal(clf.fit_intercept, sp_clf.fit_intercept) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1) assert_array_almost_equal( clf.predict(iris.data.toarray()), sp_clf.predict(iris.data)) # check decision_function pred = np.argmax(sp_clf.decision_function(iris.data), 1) assert_array_almost_equal(pred, clf.predict(iris.data.toarray())) # sparsify the coefficients on both models and check that they still # produce the same results clf.sparsify() assert_array_equal(pred, clf.predict(iris.data)) sp_clf.sparsify() assert_array_equal(pred, sp_clf.predict(iris.data)) def test_weight(): # Test class weights X_, y_ = make_classification(n_samples=200, n_features=100, weights=[0.833, 0.167], random_state=0) X_ = sparse.csr_matrix(X_) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC()): clf.set_params(class_weight={0: 5}) clf.fit(X_[:180], y_[:180]) y_pred = clf.predict(X_[180:]) assert_true(np.sum(y_pred == y_[180:]) >= 11) def test_sample_weights(): # Test weights on individual samples clf = svm.SVC() clf.fit(X_sp, Y) assert_array_equal(clf.predict(X[2]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X_sp, Y, sample_weight=sample_weight) assert_array_equal(clf.predict(X[2]), [2.]) def test_sparse_liblinear_intercept_handling(): # Test that sparse liblinear honours intercept_scaling param test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC) def test_sparse_realdata(): # Test on a subset from the 20newsgroups dataset. # This catchs some bugs if input is not correctly converted into # sparse format or weights are not correctly initialized. data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069]) indices = np.array([6, 5, 35, 31]) indptr = np.array( [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4]) X = sparse.csr_matrix((data, indices, indptr)) y = np.array( [1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2., 0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2., 0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1., 3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2., 0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2., 3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1., 1., 3.]) clf = svm.SVC(kernel='linear').fit(X.toarray(), y) sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y) assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) def test_sparse_svc_clone_with_callable_kernel(): # Test that the "dense_fit" is called even though we use sparse input # meaning that everything works fine. a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True, random_state=0) b = base.clone(a) b.fit(X_sp, Y) pred = b.predict(X_sp) b.predict_proba(X_sp) dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0) pred_dense = dense_svm.fit(X, Y).predict(X) assert_array_equal(pred_dense, pred) # b.decision_function(X_sp) # XXX : should be supported def test_timeout(): sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, sp.fit, X_sp, Y) def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2)
bsd-3-clause
Kongsea/tensorflow
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
10
12872
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Estimator input.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tempfile import numpy as np from tensorflow.python.training import training_util from tensorflow.contrib.layers.python.layers import optimizers from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn import models from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import _sklearn from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.training import input as input_lib from tensorflow.python.training import queue_runner_impl _BOSTON_INPUT_DIM = 13 _IRIS_INPUT_DIM = 4 def boston_input_fn(num_epochs=None): boston = base.load_boston() features = input_lib.limit_epochs( array_ops.reshape( constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]), num_epochs=num_epochs) labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1]) return features, labels def boston_input_fn_with_queue(num_epochs=None): features, labels = boston_input_fn(num_epochs=num_epochs) # Create a minimal queue runner. fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32) queue_runner = queue_runner_impl.QueueRunner(fake_queue, [constant_op.constant(0)]) queue_runner_impl.add_queue_runner(queue_runner) return features, labels def iris_input_fn(): iris = base.load_iris() features = array_ops.reshape( constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM]) labels = array_ops.reshape(constant_op.constant(iris.target), [-1]) return features, labels def iris_input_fn_labels_dict(): iris = base.load_iris() features = array_ops.reshape( constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM]) labels = { 'labels': array_ops.reshape(constant_op.constant(iris.target), [-1]) } return features, labels def boston_eval_fn(): boston = base.load_boston() n_examples = len(boston.target) features = array_ops.reshape( constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM]) labels = array_ops.reshape( constant_op.constant(boston.target), [n_examples, 1]) return array_ops.concat([features, features], 0), array_ops.concat( [labels, labels], 0) def extract(data, key): if isinstance(data, dict): assert key in data return data[key] else: return data def linear_model_params_fn(features, labels, mode, params): features = extract(features, 'input') labels = extract(labels, 'labels') assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=params['learning_rate']) return prediction, loss, train_op def linear_model_fn(features, labels, mode): features = extract(features, 'input') labels = extract(labels, 'labels') assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) if isinstance(features, dict): (_, features), = features.items() prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return prediction, loss, train_op def linear_model_fn_with_model_fn_ops(features, labels, mode): """Same as linear_model_fn, but returns `ModelFnOps`.""" assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return model_fn.ModelFnOps( mode=mode, predictions=prediction, loss=loss, train_op=train_op) def logistic_model_no_mode_fn(features, labels): features = extract(features, 'input') labels = extract(labels, 'labels') labels = array_ops.one_hot(labels, 3, 1, 0) prediction, loss = (models.logistic_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return { 'class': math_ops.argmax(prediction, 1), 'prob': prediction }, loss, train_op VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n' EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n' class EstimatorInputTest(test.TestCase): def testContinueTrainingDictionaryInput(self): boston = base.load_boston() output_dir = tempfile.mkdtemp() est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir) boston_input = {'input': boston.data} float64_target = {'labels': boston.target.astype(np.float64)} est.fit(x=boston_input, y=float64_target, steps=50) scores = est.evaluate( x=boston_input, y=float64_target, metrics={'MSE': metric_ops.streaming_mean_squared_error}) del est # Create another estimator object with the same output dir. est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir) # Check we can evaluate and predict. scores2 = est2.evaluate( x=boston_input, y=float64_target, metrics={'MSE': metric_ops.streaming_mean_squared_error}) self.assertAllClose(scores2['MSE'], scores['MSE']) predictions = np.array(list(est2.predict(x=boston_input))) other_score = _sklearn.mean_squared_error(predictions, float64_target['labels']) self.assertAllClose(other_score, scores['MSE']) def testBostonAll(self): boston = base.load_boston() est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn)) float64_labels = boston.target.astype(np.float64) est.fit(x=boston.data, y=float64_labels, steps=100) scores = est.score( x=boston.data, y=float64_labels, metrics={'MSE': metric_ops.streaming_mean_squared_error}) predictions = np.array(list(est.predict(x=boston.data))) other_score = _sklearn.mean_squared_error(predictions, boston.target) self.assertAllClose(scores['MSE'], other_score) self.assertTrue('global_step' in scores) self.assertEqual(100, scores['global_step']) def testBostonAllDictionaryInput(self): boston = base.load_boston() est = estimator.Estimator(model_fn=linear_model_fn) boston_input = {'input': boston.data} float64_target = {'labels': boston.target.astype(np.float64)} est.fit(x=boston_input, y=float64_target, steps=100) scores = est.evaluate( x=boston_input, y=float64_target, metrics={'MSE': metric_ops.streaming_mean_squared_error}) predictions = np.array(list(est.predict(x=boston_input))) other_score = _sklearn.mean_squared_error(predictions, boston.target) self.assertAllClose(other_score, scores['MSE']) self.assertTrue('global_step' in scores) self.assertEqual(scores['global_step'], 100) def testIrisAll(self): iris = base.load_iris() est = estimator.SKCompat( estimator.Estimator(model_fn=logistic_model_no_mode_fn)) est.fit(iris.data, iris.target, steps=100) scores = est.score( x=iris.data, y=iris.target, metrics={('accuracy', 'class'): metric_ops.streaming_accuracy}) predictions = est.predict(x=iris.data) predictions_class = est.predict(x=iris.data, outputs=['class'])['class'] self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0]) self.assertAllClose(predictions['class'], predictions_class) self.assertAllClose( predictions['class'], np.argmax( predictions['prob'], axis=1)) other_score = _sklearn.accuracy_score(iris.target, predictions['class']) self.assertAllClose(scores['accuracy'], other_score) self.assertTrue('global_step' in scores) self.assertEqual(100, scores['global_step']) def testIrisAllDictionaryInput(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) iris_data = {'input': iris.data} iris_target = {'labels': iris.target} est.fit(iris_data, iris_target, steps=100) scores = est.evaluate( x=iris_data, y=iris_target, metrics={('accuracy', 'class'): metric_ops.streaming_accuracy}) predictions = list(est.predict(x=iris_data)) predictions_class = list(est.predict(x=iris_data, outputs=['class'])) self.assertEqual(len(predictions), iris.target.shape[0]) classes_batch = np.array([p['class'] for p in predictions]) self.assertAllClose(classes_batch, np.array([p['class'] for p in predictions_class])) self.assertAllClose( classes_batch, np.argmax( np.array([p['prob'] for p in predictions]), axis=1)) other_score = _sklearn.accuracy_score(iris.target, classes_batch) self.assertAllClose(other_score, scores['accuracy']) self.assertTrue('global_step' in scores) self.assertEqual(scores['global_step'], 100) def testIrisInputFn(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) est.fit(input_fn=iris_input_fn, steps=100) _ = est.evaluate(input_fn=iris_input_fn, steps=1) predictions = list(est.predict(x=iris.data)) self.assertEqual(len(predictions), iris.target.shape[0]) def testIrisInputFnLabelsDict(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) est.fit(input_fn=iris_input_fn_labels_dict, steps=100) _ = est.evaluate( input_fn=iris_input_fn_labels_dict, steps=1, metrics={ 'accuracy': metric_spec.MetricSpec( metric_fn=metric_ops.streaming_accuracy, prediction_key='class', label_key='labels') }) predictions = list(est.predict(x=iris.data)) self.assertEqual(len(predictions), iris.target.shape[0]) def testTrainInputFn(self): est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=1) _ = est.evaluate(input_fn=boston_eval_fn, steps=1) def testPredictInputFn(self): est = estimator.Estimator(model_fn=linear_model_fn) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) input_fn = functools.partial(boston_input_fn, num_epochs=1) output = list(est.predict(input_fn=input_fn)) self.assertEqual(len(output), boston.target.shape[0]) def testPredictInputFnWithQueue(self): est = estimator.Estimator(model_fn=linear_model_fn) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2) output = list(est.predict(input_fn=input_fn)) self.assertEqual(len(output), boston.target.shape[0] * 2) def testPredictConstInputFn(self): est = estimator.Estimator(model_fn=linear_model_fn) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) def input_fn(): features = array_ops.reshape( constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]) labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1]) return features, labels output = list(est.predict(input_fn=input_fn)) self.assertEqual(len(output), boston.target.shape[0]) if __name__ == '__main__': test.main()
apache-2.0
arabenjamin/scikit-learn
doc/datasets/mldata_fixture.py
367
1183
"""Fixture module to skip the datasets loading when offline Mock urllib2 access to mldata.org and create a temporary data folder. """ from os import makedirs from os.path import join import numpy as np import tempfile import shutil from sklearn import datasets from sklearn.utils.testing import install_mldata_mock from sklearn.utils.testing import uninstall_mldata_mock def globs(globs): # Create a temporary folder for the data fetcher global custom_data_home custom_data_home = tempfile.mkdtemp() makedirs(join(custom_data_home, 'mldata')) globs['custom_data_home'] = custom_data_home return globs def setup_module(): # setup mock urllib2 module to avoid downloading from mldata.org install_mldata_mock({ 'mnist-original': { 'data': np.empty((70000, 784)), 'label': np.repeat(np.arange(10, dtype='d'), 7000), }, 'iris': { 'data': np.empty((150, 4)), }, 'datasets-uci-iris': { 'double0': np.empty((150, 4)), 'class': np.empty((150,)), }, }) def teardown_module(): uninstall_mldata_mock() shutil.rmtree(custom_data_home)
bsd-3-clause
mo-g/iris
lib/iris/quickplot.py
3
8992
# (C) British Crown Copyright 2010 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ High-level plotting extensions to :mod:`iris.plot`. These routines work much like their :mod:`iris.plot` counterparts, but they automatically add a plot title, axis titles, and a colour bar when appropriate. See also: :ref:`matplotlib <matplotlib:users-guide-index>`. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import cf_units import matplotlib.pyplot as plt import iris.config import iris.coords import iris.plot as iplt def _use_symbol(units): # For non-time units use the shortest unit representation. # E.g. prefer 'K' over 'kelvin', but not '0.0174532925199433 rad' # over 'degrees' return (not units.is_time() and not units.is_time_reference() and len(units.symbol) < len(str(units))) def _title(cube_or_coord, with_units): if cube_or_coord is None: title = '' else: title = cube_or_coord.name().replace('_', ' ').capitalize() units = cube_or_coord.units if with_units and not (units.is_unknown() or units.is_no_unit() or units == cf_units.Unit('1')): if _use_symbol(units): units = units.symbol title += ' / {}'.format(units) return title def _label(cube, mode, result=None, ndims=2, coords=None): """Puts labels on the current plot using the given cube.""" plt.title(_title(cube, with_units=False)) if result is not None: draw_edges = mode == iris.coords.POINT_MODE bar = plt.colorbar(result, orientation='horizontal', drawedges=draw_edges) has_known_units = not (cube.units.is_unknown() or cube.units.is_no_unit()) if has_known_units and cube.units != cf_units.Unit('1'): # Use shortest unit representation for anything other than time if _use_symbol(cube.units): bar.set_label(cube.units.symbol) else: bar.set_label(cube.units) # Remove the tick which is put on the colorbar by default. bar.ax.tick_params(length=0) if coords is None: plot_defn = iplt._get_plot_defn(cube, mode, ndims) else: plot_defn = iplt._get_plot_defn_custom_coords_picked( cube, coords, mode, ndims=ndims) if ndims == 2: if not iplt._can_draw_map(plot_defn.coords): plt.ylabel(_title(plot_defn.coords[0], with_units=True)) plt.xlabel(_title(plot_defn.coords[1], with_units=True)) elif ndims == 1: plt.xlabel(_title(plot_defn.coords[0], with_units=True)) plt.ylabel(_title(cube, with_units=True)) else: msg = 'Unexpected number of dimensions (%s) given to _label.' % ndims raise ValueError(msg) def _label_with_bounds(cube, result=None, ndims=2, coords=None): _label(cube, iris.coords.BOUND_MODE, result, ndims, coords) def _label_with_points(cube, result=None, ndims=2, coords=None): _label(cube, iris.coords.POINT_MODE, result, ndims, coords) def _get_titles(u_object, v_object): if u_object is None: u_object = iplt._u_object_from_v_object(v_object) xunits = u_object is not None and not u_object.units.is_time_reference() yunits = not v_object.units.is_time_reference() xlabel = _title(u_object, with_units=xunits) ylabel = _title(v_object, with_units=yunits) title = '' if u_object is None: title = _title(v_object, with_units=False) elif isinstance(u_object, iris.cube.Cube) and \ not isinstance(v_object, iris.cube.Cube): title = _title(u_object, with_units=False) elif isinstance(v_object, iris.cube.Cube) and \ not isinstance(u_object, iris.cube.Cube): title = _title(v_object, with_units=False) return xlabel, ylabel, title def _label_1d_plot(*args): if len(args) > 1 and isinstance(args[1], (iris.cube.Cube, iris.coords.Coord)): xlabel, ylabel, title = _get_titles(*args[:2]) else: xlabel, ylabel, title = _get_titles(None, args[0]) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) def contour(cube, *args, **kwargs): """ Draws contour lines on a labelled plot based on the given Cube. With the basic call signature, contour "level" values are chosen automatically:: contour(cube) Supply a number to use *N* automatically chosen levels:: contour(cube, N) Supply a sequence *V* to use explicitly defined levels:: contour(cube, V) See :func:`iris.plot.contour` for details of valid keyword arguments. """ coords = kwargs.get('coords') result = iplt.contour(cube, *args, **kwargs) _label_with_points(cube, coords=coords) return result def contourf(cube, *args, **kwargs): """ Draws filled contours on a labelled plot based on the given Cube. With the basic call signature, contour "level" values are chosen automatically:: contour(cube) Supply a number to use *N* automatically chosen levels:: contour(cube, N) Supply a sequence *V* to use explicitly defined levels:: contour(cube, V) See :func:`iris.plot.contourf` for details of valid keyword arguments. """ coords = kwargs.get('coords') result = iplt.contourf(cube, *args, **kwargs) _label_with_points(cube, result, coords=coords) return result def outline(cube, coords=None, color='k', linewidth=None): """ Draws cell outlines on a labelled plot based on the given Cube. Kwargs: * coords: list of :class:`~iris.coords.Coord` objects or coordinate names Use the given coordinates as the axes for the plot. The order of the given coordinates indicates which axis to use for each, where the first element is the horizontal axis of the plot and the second element is the vertical axis of the plot. * color: None or mpl color The color of the cell outlines. If None, the matplotlibrc setting patch.edgecolor is used by default. * linewidth: None or number The width of the lines showing the cell outlines. If None, the default width in patch.linewidth in matplotlibrc is used. """ result = iplt.outline(cube, color=color, linewidth=linewidth, coords=coords) _label_with_bounds(cube, coords=coords) return result def pcolor(cube, *args, **kwargs): """ Draws a labelled pseudocolor plot based on the given Cube. See :func:`iris.plot.pcolor` for details of valid keyword arguments. """ coords = kwargs.get('coords') result = iplt.pcolor(cube, *args, **kwargs) _label_with_bounds(cube, result, coords=coords) return result def pcolormesh(cube, *args, **kwargs): """ Draws a labelled pseudocolour plot based on the given Cube. See :func:`iris.plot.pcolormesh` for details of valid keyword arguments. """ coords = kwargs.get('coords') result = iplt.pcolormesh(cube, *args, **kwargs) _label_with_bounds(cube, result, coords=coords) return result def points(cube, *args, **kwargs): """ Draws sample point positions on a labelled plot based on the given Cube. See :func:`iris.plot.points` for details of valid keyword arguments. """ coords = kwargs.get('coords') result = iplt.points(cube, *args, **kwargs) _label_with_points(cube, coords=coords) return result def plot(*args, **kwargs): """ Draws a labelled line plot based on the given cube(s) or coordinate(s). See :func:`iris.plot.plot` for details of valid arguments and keyword arguments. """ result = iplt.plot(*args, **kwargs) _label_1d_plot(*args) return result def scatter(x, y, *args, **kwargs): """ Draws a labelled scatter plot based on the given cubes or coordinates. See :func:`iris.plot.scatter` for details of valid arguments and keyword arguments. """ result = iplt.scatter(x, y, *args, **kwargs) _label_1d_plot(x, y) return result # Provide a convenience show method from pyplot. show = plt.show
gpl-3.0
liyinwei/pandas
quickstart/12_getting_data_in_out.py
1
1191
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author: liyinwei @E-mail: [email protected] @Time: 2016/11/21 11:35 @Description: 1.文件读写,包括: a)csv Writing to a csv file: http://pandas.pydata.org/pandas-docs/stable/io.html#io-store-in-csv Reading from a csv file: http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table b)HDF5: http://pandas.pydata.org/pandas-docs/stable/io.html#io-hdf5 c)Excel: http://pandas.pydata.org/pandas-docs/stable/io.html#io-excel """ """ ID: 12_01 Desc: Writing to a csv file """ # df.to_csv('foo.csv') """ ID: 12_02 Desc: Reading from a csv file """ # pd.read_csv('foo.csv') """ ID: 12_03 Desc: Writing to a HDF5 Store """ # df.to_hdf('foo.h5','df') """ ID: 12_04 Desc: Reading from a HDF5 Store """ # pd.read_hdf('foo.h5','df') """ ID: 12_05 Desc: Writing to an excel file """ # df.to_excel('foo.xlsx', sheet_name='Sheet1') """ ID: 12_06 Desc: Reading from an excel file """ # pd.read_excel('foo.xlsx', 'Sheet1', index_col=None, na_values=['NA']) if __name__ == '__main__': pass
gpl-3.0
ryandougherty/mwa-capstone
MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/legend_auto.py
7
2267
""" This file was written to test matplotlib's autolegend placement algorithm, but shows lots of different ways to create legends so is useful as a general examples Thanks to John Gill and Phil ?? for help at the matplotlib sprint at pycon 2005 where the auto-legend support was written. """ from pylab import * import sys rcParams['legend.loc'] = 'best' N = 100 x = arange(N) def fig_1(): figure(1) t = arange(0, 40.0 * pi, 0.1) l, = plot(t, 100*sin(t), 'r', label='sine') legend() def fig_2(): figure(2) plot(x, 'o', label='x=y') legend() def fig_3(): figure(3) plot(x, -x, 'o', label='x= -y') legend() def fig_4(): figure(4) plot(x, ones(len(x)), 'o', label='y=1') plot(x, -ones(len(x)), 'o', label='y=-1') legend() def fig_5(): figure(5) n, bins, patches = hist(randn(1000), 40, normed=1) l, = plot(bins, normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3) legend([l, patches[0]], ['fit', 'hist']) def fig_6(): figure(6) plot(x, 50-x, 'o', label='y=1') plot(x, x-50, 'o', label='y=-1') legend() def fig_7(): figure(7) xx = x - (N/2.0) plot(xx, (xx*xx)-1225, 'bo', label='$y=x^2$') plot(xx, 25*xx, 'go', label='$y=25x$') plot(xx, -25*xx, 'mo', label='$y=-25x$') legend() def fig_8(): figure(8) b1 = bar(x, x, color='m') b2 = bar(x, x[::-1], color='g') legend([b1[0], b2[0]], ['up', 'down']) def fig_9(): figure(9) b1 = bar(x, -x) b2 = bar(x, -x[::-1], color='r') legend([b1[0], b2[0]], ['down', 'up']) def fig_10(): figure(10) b1 = bar(x, x, bottom=-100, color='m') b2 = bar(x, x[::-1], bottom=-100, color='g') b3 = bar(x, -x, bottom=100) b4 = bar(x, -x[::-1], bottom=100, color='r') legend([b1[0], b2[0], b3[0], b4[0]], ['bottom right', 'bottom left', 'top left', 'top right']) if __name__ == '__main__': nfigs = 10 figures = [] for f in sys.argv[1:]: try: figures.append(int(f)) except ValueError: pass if len(figures) == 0: figures = range(1, nfigs+1) for fig in figures: fn_name = "fig_%d" % fig fn = globals()[fn_name] fn() show()
gpl-2.0
paulrbrenner/GOS
examples/migration/visualization/plotlyviz.py
2
1188
import plotly import pandas as pd def map(dataframe, title = "Map", colorbarName = None): #---The next line and the line at bottom are for the Jupyter Notebook--- #plotly.offline.init_notebook_mode(connected=True) data = [ dict( type = 'choropleth', locations = dataframe['country'], z = dataframe['value'], # text = dataframe['name'], colorscale = [[0,"rgb(215,25,28)"],[0.25,"rgb(253,174,97)"],[0.5,"rgb(255,255,191)"],[0.75,"rgb(166,217,106)"],[1,"rgb(26,150,65)"]], autocolorscale = False, reversescale = False, marker = dict( line = dict ( color = 'rgb(180,180,180)', width = 0.5 ) ), colorbar = dict( autotick = False, # tickprefix = 'V', title = colorbarName), ) ] layout = dict( title = title, titlefont = dict( size = 60 ), geo = dict( showframe = False, showcoastlines = False, showcountries = True, countrycolor = "#f0f0f0", projection = dict( type = 'Mercator' ) ) ) fig = dict( data=data, layout=layout ) #plotly.offline.iplot(fig, validate=False, filename='plotly-map' ) plotly.offline.plot(fig, validate=False, filename='plotly-map' )
apache-2.0
awni/tensorflow
tensorflow/examples/skflow/multiple_gpu.py
1
1664
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import datasets, metrics, cross_validation import tensorflow as tf from tensorflow.contrib import skflow iris = datasets.load_iris() X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) def my_model(X, y): """ This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability. Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and CUDNN 6.5 V2 from NVIDIA need to be installed beforehand. """ with tf.device('/gpu:1'): layers = skflow.ops.dnn(X, [10, 20, 10], keep_prob=0.5) with tf.device('/gpu:2'): return skflow.models.logistic_regression(layers, y) classifier = skflow.TensorFlowEstimator(model_fn=my_model, n_classes=3) classifier.fit(X_train, y_train) score = metrics.accuracy_score(y_test, classifier.predict(X_test)) print('Accuracy: {0:f}'.format(score))
apache-2.0
skudriashev/incubator-airflow
airflow/hooks/base_hook.py
8
2950
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import random from airflow import settings from airflow.models import Connection from airflow.exceptions import AirflowException from airflow.utils.log.logging_mixin import LoggingMixin CONN_ENV_PREFIX = 'AIRFLOW_CONN_' class BaseHook(LoggingMixin): """ Abstract base class for hooks, hooks are meant as an interface to interact with external systems. MySqlHook, HiveHook, PigHook return object that can handle the connection and interaction to specific instances of these systems, and expose consistent methods to interact with them. """ def __init__(self, source): pass @classmethod def _get_connections_from_db(cls, conn_id): session = settings.Session() db = ( session.query(Connection) .filter(Connection.conn_id == conn_id) .all() ) session.expunge_all() session.close() if not db: raise AirflowException( "The conn_id `{0}` isn't defined".format(conn_id)) return db @classmethod def _get_connection_from_env(cls, conn_id): environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper()) conn = None if environment_uri: conn = Connection(conn_id=conn_id, uri=environment_uri) return conn @classmethod def get_connections(cls, conn_id): conn = cls._get_connection_from_env(conn_id) if conn: conns = [conn] else: conns = cls._get_connections_from_db(conn_id) return conns @classmethod def get_connection(cls, conn_id): conn = random.choice(cls.get_connections(conn_id)) if conn.host: log = LoggingMixin().log log.info("Using connection to: %s", conn.host) return conn @classmethod def get_hook(cls, conn_id): connection = cls.get_connection(conn_id) return connection.get_hook() def get_conn(self): raise NotImplementedError() def get_records(self, sql): raise NotImplementedError() def get_pandas_df(self, sql): raise NotImplementedError() def run(self, sql): raise NotImplementedError()
apache-2.0
tinkerinestudio/Tinkerine-Suite
TinkerineSuite/python/Lib/numpy/core/function_base.py
82
5474
__all__ = ['logspace', 'linspace'] import numeric as _nx from numeric import array def linspace(start, stop, num=50, endpoint=True, retstep=False): """ Return evenly spaced numbers over a specified interval. Returns `num` evenly spaced samples, calculated over the interval [`start`, `stop` ]. The endpoint of the interval can optionally be excluded. Parameters ---------- start : scalar The starting value of the sequence. stop : scalar The end value of the sequence, unless `endpoint` is set to False. In that case, the sequence consists of all but the last of ``num + 1`` evenly spaced samples, so that `stop` is excluded. Note that the step size changes when `endpoint` is False. num : int, optional Number of samples to generate. Default is 50. endpoint : bool, optional If True, `stop` is the last sample. Otherwise, it is not included. Default is True. retstep : bool, optional If True, return (`samples`, `step`), where `step` is the spacing between samples. Returns ------- samples : ndarray There are `num` equally spaced samples in the closed interval ``[start, stop]`` or the half-open interval ``[start, stop)`` (depending on whether `endpoint` is True or False). step : float (only if `retstep` is True) Size of spacing between samples. See Also -------- arange : Similiar to `linspace`, but uses a step size (instead of the number of samples). logspace : Samples uniformly distributed in log space. Examples -------- >>> np.linspace(2.0, 3.0, num=5) array([ 2. , 2.25, 2.5 , 2.75, 3. ]) >>> np.linspace(2.0, 3.0, num=5, endpoint=False) array([ 2. , 2.2, 2.4, 2.6, 2.8]) >>> np.linspace(2.0, 3.0, num=5, retstep=True) (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) Graphical illustration: >>> import matplotlib.pyplot as plt >>> N = 8 >>> y = np.zeros(N) >>> x1 = np.linspace(0, 10, N, endpoint=True) >>> x2 = np.linspace(0, 10, N, endpoint=False) >>> plt.plot(x1, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(x2, y + 0.5, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.ylim([-0.5, 1]) (-0.5, 1) >>> plt.show() """ num = int(num) if num <= 0: return array([], float) if endpoint: if num == 1: return array([float(start)]) step = (stop-start)/float((num-1)) y = _nx.arange(0, num) * step + start y[-1] = stop else: step = (stop-start)/float(num) y = _nx.arange(0, num) * step + start if retstep: return y, step else: return y def logspace(start,stop,num=50,endpoint=True,base=10.0): """ Return numbers spaced evenly on a log scale. In linear space, the sequence starts at ``base ** start`` (`base` to the power of `start`) and ends with ``base ** stop`` (see `endpoint` below). Parameters ---------- start : float ``base ** start`` is the starting value of the sequence. stop : float ``base ** stop`` is the final value of the sequence, unless `endpoint` is False. In that case, ``num + 1`` values are spaced over the interval in log-space, of which all but the last (a sequence of length ``num``) are returned. num : integer, optional Number of samples to generate. Default is 50. endpoint : boolean, optional If true, `stop` is the last sample. Otherwise, it is not included. Default is True. base : float, optional The base of the log space. The step size between the elements in ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. Default is 10.0. Returns ------- samples : ndarray `num` samples, equally spaced on a log scale. See Also -------- arange : Similiar to linspace, with the step size specified instead of the number of samples. Note that, when used with a float endpoint, the endpoint may or may not be included. linspace : Similar to logspace, but with the samples uniformly distributed in linear space, instead of log space. Notes ----- Logspace is equivalent to the code >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) ... # doctest: +SKIP >>> power(base, y) ... # doctest: +SKIP Examples -------- >>> np.logspace(2.0, 3.0, num=4) array([ 100. , 215.443469 , 464.15888336, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) array([ 100. , 177.827941 , 316.22776602, 562.34132519]) >>> np.logspace(2.0, 3.0, num=4, base=2.0) array([ 4. , 5.0396842 , 6.34960421, 8. ]) Graphical illustration: >>> import matplotlib.pyplot as plt >>> N = 10 >>> x1 = np.logspace(0.1, 1, N, endpoint=True) >>> x2 = np.logspace(0.1, 1, N, endpoint=False) >>> y = np.zeros(N) >>> plt.plot(x1, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(x2, y + 0.5, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.ylim([-0.5, 1]) (-0.5, 1) >>> plt.show() """ y = linspace(start,stop,num=num,endpoint=endpoint) return _nx.power(base,y)
agpl-3.0
kwentz10/Photosynthesis_Optimization_Modeling
Traits_Physical_Factorial_CummulativeGS.py
1
9910
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Thu May 25 10:10:28 2017 @author: Katherine """ #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue May 23 16:04:21 2017 @author: Katherine """ # -*- coding: utf-8 -*- """ Photosynthesis and Stomatal Conductance Model Created 9/27/2016 Katherine Wentz This is a program that runs photosynthesis and stomatal conductance models given changes in leaf- level traits. The end product is graphs of NUE vs. WUE. Update: I am going to run the model for plants with traits that are distinctive of the meadow moisture gradient in the alpine tundra. Fix: correct for atmospheric pressure differences in co2, o2, and vapor pressure Fix: vcmax temp dependence (pg 63 in plant physiological ecology book) Fix: NEW VARIBALE TRAIT-->make the fraction of leaf N in rubisco go down with increasing SLA, chlorophyll content, and decreasing light (wet meadow)--more N is allocated to thylakoids. The only way for chl/m2 to increase even when g N/m2 goes down or is constant is for the leaf to allocate more of leaf N to chl...also, note that there is more organic N designated to photo in leaf when SLA goes up because less N is used in structure. see "Photosynthesis or persistence: N allocation in leaves of evergreen and deciduous... by Takashima et al. 2004. Also see Photosynthetic nitrogen-use efficiency of species...by Poorter and Evans 1998 Note to self: NUE and WUE relationship flipflops with change in air temperature; NUE makes sense because C:N decreases from dry to wet meadows; WUE increasing in snowbed does not necessarilly make sense--look in the literature for this herbs have a higher NUE """ #---------------Import Modules---------------# import itertools as it import numpy as np from matplotlib import pyplot as plt #Import combinations of variable parameters from uncertain_params import monte_carlo #Import photosynthesis model from Photosynthesis_Model import photo_bound_meso_eqstom as photo #Import functions to switch between Pa and umol/mol at sea level from photo_functions import pa_con_atmfrac #import timeseries of vwc and temp from time_dep_params import surtemp_dm, surtemp_wm, vwc_dm, vwc_wm, na_dm_min_inter,na_wm_min_inter,na_dm_max_inter,na_wm_max_inter #---------------Determine if I Want to Keep Any of the Variable Parameters Constant---------------# const_params=[] for xxx in it.combinations(['ht'],0): #keep ht and t constant for constant vpd const_params+=[xxx] #do this when I do not put any of the variable parameters as constant. instead I #vary each parameter one at a time while keeping the other parameters constant. if const_params==[()]: const_params=[[-999999]] #---------------Begin Looping Through Photosynthesis Model---------------# #each loop is for a constant value, or combinatin of constant values, of variable parameter as determined above for ii in range(len(const_params)): #---------------Run through time series---------------# days=np.linspace(1,365,365) #dry meadow tot_nue_dm_avg=[] tot_wue_dm_avg=[] tot_nue_dm_min=[] tot_wue_dm_min=[] tot_nue_dm_max=[] tot_wue_dm_max=[] tot_A_dm_avg=[] tot_gs_dm_avg=[] #moist meadow tot_nue_mm_avg=[] tot_wue_mm_avg=[] tot_nue_mm_min=[] tot_wue_mm_min=[] tot_nue_mm_max=[] tot_wue_mm_max=[] tot_A_mm_avg=[] tot_gs_mm_avg=[] #wet meadow tot_nue_wm_avg=[] tot_wue_wm_avg=[] tot_nue_wm_min=[] tot_wue_wm_min=[] tot_nue_wm_max=[] tot_wue_wm_max=[] tot_A_wm_avg=[] tot_gs_wm_avg=[] #---------------Photosynthesis + Stomatal Conductance Model---------------# ##---Constant Parameter Arrays for Model---## #----Params Used in Model Currently----# tk_25=298.16; #absolute temperature at 25 C ekc=80500.0 #Activation energy for K of CO2 (J mol-1) eko=14500.0 #Activation energy for K of O2 (J mol-1) etau=-29000.0 #Activation energy for tau (???) (J mol-1) ev=55000.0 #Activation energy for carboxylation (J mol-1) ej=55000.0 #Activation energy for electron transport (J mol-1) toptv=303.0 #Optimum temperature for maximum carboxylation (K) toptj=303.0 #Optimum temperature for maximum electron transport (K) ra=np.zeros(shape=1)+20.7 #specific rubisco activity (umol CO2/g Rub s) flnr=np.zeros(shape=1)+0.1 #fraction of leaf nitrogen in rubisco (g N Rub/g N leaf) frnr=np.zeros(shape=1)+6.25 #weight fraction of nitrogen in rubisco molecule (g Rub/g N Rub) rh=np.zeros(shape=1)+0.5 #relative humidity (kPa/kPa) ca=np.zeros(shape=1)+405 #ambient carbon dioxide (umol CO2/mol air) ko25=np.zeros(shape=1)+30000 #Michaelis-Menten kinetic coefficient for oxygen at 25 C(Pa) kc25=np.zeros(shape=1)+30 #Michaelis-Menten kinetic coefficient for carbon dioxide at 25 C (Pa) o=np.zeros(shape=1)+210000 #concentration of ambient oxygen (umol/mol) g0=np.zeros(shape=1)+0.002 #Ball-Berry stomatal conductance intercept parameter (mol H2O/m2s) a=np.zeros(shape=1)+1.6 #Conversion Coefficient between stomatal conductance to water and carbon dioxide (unitless) ij=np.zeros(shape=1)+1.0 #leaf angle index--downregulates jmax m=np.zeros(shape=1)+9.0 #ball-berry parameter (unitless) b=1.37 #Conversion Coefficient between boundary layer conductance to water and carbon dioxide u=5.0 #windspeed (m/s) qeff=0.32 #leaf quantum yield, electrons PAR=2000 #photosynthetic active radiation (umol/m2s) jm=2.68 #slope coefficient vwc_min=0.08 #minimum soil water content for photosynthesis to occur (permanent wilting point) (cm3/cm3) vwc_max=0.68 #maximum soil water content where increases in soil water do not affect photosynthesis (field capacity?) (cm3/cm3) q=0.2 #parameter for soil water affect on photosynthesis (unitless) #------constant variable params for sensitivty analysis-----# chl_c=np.zeros(shape=1)+(np.mean([396,465,476])) #Chlorophyll Content of the Leaf (umol chl/m2) ht_c=np.zeros(shape=1)+(np.mean([9.2,19.5,20.0])) #Temperature of the Leaf (K) dia_c=np.zeros(shape=1)+(np.mean([1.4,2.3,2.6])/100.) #Mean diameter or size of leaf (m) na_c=np.zeros(shape=1)+(np.mean([2.5,5.6,6.3])) #leaf nitrogen (g N/ m2) t_c=np.zeros(shape=1)+15.0 #temp (C) #-----which timeseries should I use--based on factorial meadow type---# na_min=[na_dm_min_inter, na_dm_min_inter, na_wm_min_inter, na_wm_min_inter] na_max=[na_dm_max_inter, na_dm_max_inter, na_wm_max_inter, na_wm_max_inter] vwc_type=[vwc_dm,vwc_dm,vwc_wm,vwc_wm] temp_type=[surtemp_dm,surtemp_dm,surtemp_wm,surtemp_wm] A_tot_all=[] chl_mean=[[395.7132],[475.8913],[395.7132],[475.8913]] chl_sd=[[24.410199999999975],[29.185099999999977],[24.410199999999975],[29.185099999999977]] dia_mean=[[1.6/100.],[3.0/100.],[1.6/100.],[3.0/100.]] dia_sd=[[0.9/100.0],[1.2/100.0],[0.9/100.0],[1.2/100.0]] ht_mean=[[9.183549],[19.98519],[9.183549],[19.98519]] ht_sd=[[1.5],[3.1],[1.5],[3.1]] depth=[0.2,0.2,0.2,0.4,0.4,0.4] #---------------Import Variable Parameter Arrays from Leaf Parameter File---------------# for iii in range(len(chl_mean)): A_tot=0 for time in range(129): params=monte_carlo(chl_mean[iii], chl_sd[iii], dia_mean[iii], dia_sd[iii], [na_min[iii][time]], [na_max[iii][time]], ht_mean[iii], ht_sd[iii]) A_day=[] for xx in range(len(params)): for yy in range(len(params[xx])): for key,val in params[xx][yy].items(): exec(key + '=val') #set variable parameters constant if I specify this above if 'na' in const_params[ii]: na=na_c if 'dia' in const_params[ii]: dia=dia_c if 'chl' in const_params[ii]: chl=chl_c if 'ht' in const_params[ii]: ht=ht_c #------calculate vapor pressure-----# pa_v=611*np.exp((17.27*temp_type[iii][time])/(temp_type[iii][time]+237.3)) #saturation vapor pressure of air (Pa) ea_str=pa_con_atmfrac(pa_v,3528) #saturation vapor pressure of air (Pa-->umol h20/mol air) ea=rh*ea_str #vapor pressure (umol h2O/mol air) #correct for leaf temperatures using leaf height t_diff=18-0.4*ht tl=temp_type[iii][time]+t_diff z=depth[iii] #---------------Photosynthesis Function---------------# #alter this line of code for when implementing different photosynthesis functions wue, nue, A, E, cs, ci, gsw, gs, gbw, gb, gm, cc,dd =photo(tk_25,ekc,eko,etau,ev,ej,toptv,toptj,na, qeff, PAR,tl,ea,chl,ij,kc25,ko25,o,ca,rh,m,a,frnr,flnr,ra,jm,g0,b,dia,u,q,vwc_min,vwc_max,vwc_type[iii][time],z) #test to make sure wue and nue are positive at not 'nan' if wue[0]==-999 and nue[0]==-999: continue if np.isnan(A[0]): A[0]=0.0 A_day+=[(A[0]*3600*6)/1000000.*44.] A_tot+=np.mean(A_day) A_tot_all+=[A_tot] print A_tot_all print A_tot_all
mit
tu-rbo/omip
shape_reconstruction/src/plot_statistics.py
1
1935
#!/usr/bin/python ############## # View the statistics for a single experiment (bag file) # # Statistics are plotted separately, i.e. one window per object per variant, # showing precision and recall. import numpy as np import matplotlib.pylab as plt import matplotlib matplotlib.rcParams['ps.useafm'] = True matplotlib.rcParams['pdf.use14corefonts'] = True matplotlib.rcParams['text.usetex'] = True import sys import os import os.path def plot_statistics(folder): for f in os.listdir(folder): if f[-3:] != "txt": continue filename = os.path.join(folder, f) data = np.genfromtxt(filename, dtype=float, delimiter=' ', names=True) precision = data['tp'] / (data['tp']+data['fp']) recall = data['tp'] / (data['tp']+data['fn']) seg_acc = data['tp'] / (data['tp']+data['fp']+data['fn']) time = data['time'] - data['time'][0] # clean up precision[np.where(np.isnan(precision))] = 0. recall[np.where(np.isnan(recall))] = 0. seg_acc[np.where(np.isnan(seg_acc))] = 0. plt.figure(figsize=(7.5,5)) for y in np.arange(0.0, 1.1, 0.2): plt.plot(time, [y] * len(time), "--", lw=0.5, color="black", alpha=0.3) plt.plot(time, seg_acc, "k", label="Segmentation Accuracy", lw=3.0) plt.plot(time, precision, "b", label="Precision", lw=2.0, ls="--") plt.plot(time, recall, "r", label="Recall", lw=2.0, ls="--") plt.xlim(0, data['time'][-1] - data['time'][0]) plt.ylim(-0.1, 1.1) plt.xlabel("time") plt.title(os.path.basename(filename[:-3])) plt.legend(loc=4) img_path = os.path.join(folder, f[:-3]+"pdf") print ("Saving image at %s" % img_path) plt.savefig(img_path, bbox_inches="tight") if __name__ == "__main__": if len(sys.argv) != 2: print "Usage: plot_statistics.py <experiment folder>" sys.exit() print ("Starting plot statistics") plot_statistics(sys.argv[1]) plt.show()
mit
nrhine1/scikit-learn
examples/decomposition/plot_kernel_pca.py
353
2011
""" ========== Kernel PCA ========== This example shows that Kernel PCA is able to find a projection of the data that makes data linearly separable. """ print(__doc__) # Authors: Mathieu Blondel # Andreas Mueller # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles np.random.seed(0) X, y = make_circles(n_samples=400, factor=.3, noise=.05) kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10) X_kpca = kpca.fit_transform(X) X_back = kpca.inverse_transform(X_kpca) pca = PCA() X_pca = pca.fit_transform(X) # Plot results plt.figure() plt.subplot(2, 2, 1, aspect='equal') plt.title("Original space") reds = y == 0 blues = y == 1 plt.plot(X[reds, 0], X[reds, 1], "ro") plt.plot(X[blues, 0], X[blues, 1], "bo") plt.xlabel("$x_1$") plt.ylabel("$x_2$") X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50)) X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T # projection on the first principal component (in the phi space) Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape) plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower') plt.subplot(2, 2, 2, aspect='equal') plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro") plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo") plt.title("Projection by PCA") plt.xlabel("1st principal component") plt.ylabel("2nd component") plt.subplot(2, 2, 3, aspect='equal') plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro") plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo") plt.title("Projection by KPCA") plt.xlabel("1st principal component in space induced by $\phi$") plt.ylabel("2nd component") plt.subplot(2, 2, 4, aspect='equal') plt.plot(X_back[reds, 0], X_back[reds, 1], "ro") plt.plot(X_back[blues, 0], X_back[blues, 1], "bo") plt.title("Original space after inverse transform") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35) plt.show()
bsd-3-clause
cjayb/mne-python
mne/decoding/receptive_field.py
2
20189
# -*- coding: utf-8 -*- # Authors: Chris Holdgraf <[email protected]> # Eric Larson <[email protected]> # License: BSD (3-clause) import numbers import numpy as np from scipy import linalg from .base import get_coef, BaseEstimator, _check_estimator from .time_delaying_ridge import TimeDelayingRidge from ..fixes import is_regressor from ..utils import _validate_type, verbose class ReceptiveField(BaseEstimator): """Fit a receptive field model. This allows you to fit an encoding model (stimulus to brain) or a decoding model (brain to stimulus) using time-lagged input features (for example, a spectro- or spatio-temporal receptive field, or STRF). Parameters ---------- tmin : float The starting lag, in seconds (or samples if ``sfreq`` == 1). tmax : float The ending lag, in seconds (or samples if ``sfreq`` == 1). Must be >= tmin. sfreq : float The sampling frequency used to convert times into samples. feature_names : array, shape (n_features,) | None Names for input features to the model. If None, feature names will be auto-generated from the shape of input data after running `fit`. estimator : instance of sklearn.base.BaseEstimator | float | None The model used in fitting inputs and outputs. This can be any scikit-learn-style model that contains a fit and predict method. If a float is passed, it will be interpreted as the ``alpha`` parameter to be passed to a Ridge regression model. If `None`, then a Ridge regression model with an alpha of 0 will be used. fit_intercept : bool | None If True (default), the sample mean is removed before fitting. If ``estimator`` is a :class:`sklearn.base.BaseEstimator`, this must be None or match ``estimator.fit_intercept``. scoring : ['r2', 'corrcoef'] Defines how predictions will be scored. Currently must be one of 'r2' (coefficient of determination) or 'corrcoef' (the correlation coefficient). patterns : bool If True, inverse coefficients will be computed upon fitting using the covariance matrix of the inputs, and the cross-covariance of the inputs/outputs, according to [5]_. Defaults to False. n_jobs : int | str Number of jobs to run in parallel. Can be 'cuda' if CuPy is installed properly and ``estimator is None``. .. versionadded:: 0.18 edge_correction : bool If True (default), correct the autocorrelation coefficients for non-zero delays for the fact that fewer samples are available. Disabling this speeds up performance at the cost of accuracy depending on the relationship between epoch length and model duration. Only used if ``estimator`` is float or None. .. versionadded:: 0.18 verbose : bool, str, int, or None If not None, override default verbose level (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>` for more). Attributes ---------- coef_ : array, shape ([n_outputs, ]n_features, n_delays) The coefficients from the model fit, reshaped for easy visualization. During :meth:`mne.decoding.ReceptiveField.fit`, if ``y`` has one dimension (time), the ``n_outputs`` dimension here is omitted. patterns_ : array, shape ([n_outputs, ]n_features, n_delays) If fit, the inverted coefficients from the model. delays_ : array, shape (n_delays,), dtype int The delays used to fit the model, in indices. To return the delays in seconds, use ``self.delays_ / self.sfreq`` valid_samples_ : slice The rows to keep during model fitting after removing rows with missing values due to time delaying. This can be used to get an output equivalent to using :func:`numpy.convolve` or :func:`numpy.correlate` with ``mode='valid'``. See Also -------- mne.decoding.TimeDelayingRidge Notes ----- For a causal system, the encoding model will have significant non-zero values only at positive lags. In other words, lags point backward in time relative to the input, so positive lags correspond to previous input time samples, while negative lags correspond to future input time samples. References ---------- .. [1] Theunissen, F. E. et al. Estimating spatio-temporal receptive fields of auditory and visual neurons from their responses to natural stimuli. Network 12, 289-316 (2001). .. [2] Willmore, B. & Smyth, D. Methods for first-order kernel estimation: simple-cell receptive fields from responses to natural scenes. Network 14, 553-77 (2003). .. [3] Crosse, M. J., Di Liberto, G. M., Bednar, A. & Lalor, E. C. (2016). The Multivariate Temporal Response Function (mTRF) Toolbox: A MATLAB Toolbox for Relating Neural Signals to Continuous Stimuli. Frontiers in Human Neuroscience 10, 604. doi:10.3389/fnhum.2016.00604 .. [4] Holdgraf, C. R. et al. Rapid tuning shifts in human auditory cortex enhance speech intelligibility. Nature Communications, 7, 13654 (2016). doi:10.1038/ncomms13654 .. [5] Haufe, S., Meinecke, F., Goergen, K., Daehne, S., Haynes, J.-D., Blankertz, B., & Biessmann, F. (2014). On the interpretation of weight vectors of linear models in multivariate neuroimaging. NeuroImage, 87, 96-110. doi:10.1016/j.neuroimage.2013.10.067 """ @verbose def __init__(self, tmin, tmax, sfreq, feature_names=None, estimator=None, fit_intercept=None, scoring='r2', patterns=False, n_jobs=1, edge_correction=True, verbose=None): self.feature_names = feature_names self.sfreq = float(sfreq) self.tmin = tmin self.tmax = tmax self.estimator = 0. if estimator is None else estimator self.fit_intercept = fit_intercept self.scoring = scoring self.patterns = patterns self.n_jobs = n_jobs self.edge_correction = edge_correction self.verbose = verbose def __repr__(self): # noqa: D105 s = "tmin, tmax : (%.3f, %.3f), " % (self.tmin, self.tmax) estimator = self.estimator if not isinstance(estimator, str): estimator = type(self.estimator) s += "estimator : %s, " % (estimator,) if hasattr(self, 'coef_'): if self.feature_names is not None: feats = self.feature_names if len(feats) == 1: s += "feature: %s, " % feats[0] else: s += "features : [%s, ..., %s], " % (feats[0], feats[-1]) s += "fit: True" else: s += "fit: False" if hasattr(self, 'scores_'): s += "scored (%s)" % self.scoring return "<ReceptiveField | %s>" % s def _delay_and_reshape(self, X, y=None): """Delay and reshape the variables.""" if not isinstance(self.estimator_, TimeDelayingRidge): # X is now shape (n_times, n_epochs, n_feats, n_delays) X = _delay_time_series(X, self.tmin, self.tmax, self.sfreq, fill_mean=self.fit_intercept) X = _reshape_for_est(X) # Concat times + epochs if y is not None: y = y.reshape(-1, y.shape[-1], order='F') return X, y @verbose def fit(self, X, y): """Fit a receptive field model. Parameters ---------- X : array, shape (n_times[, n_epochs], n_features) The input features for the model. y : array, shape (n_times[, n_epochs][, n_outputs]) The output features for the model. Returns ------- self : instance The instance so you can chain operations. """ if self.scoring not in _SCORERS.keys(): raise ValueError('scoring must be one of %s, got' '%s ' % (sorted(_SCORERS.keys()), self.scoring)) from sklearn.base import clone X, y, _, self._y_dim = self._check_dimensions(X, y) if self.tmin > self.tmax: raise ValueError('tmin (%s) must be at most tmax (%s)' % (self.tmin, self.tmax)) # Initialize delays self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq) # Define the slice that we should use in the middle self.valid_samples_ = _delays_to_slice(self.delays_) if isinstance(self.estimator, numbers.Real): if self.fit_intercept is None: self.fit_intercept = True estimator = TimeDelayingRidge( self.tmin, self.tmax, self.sfreq, alpha=self.estimator, fit_intercept=self.fit_intercept, n_jobs=self.n_jobs, edge_correction=self.edge_correction) elif is_regressor(self.estimator): estimator = clone(self.estimator) if self.fit_intercept is not None and \ estimator.fit_intercept != self.fit_intercept: raise ValueError( 'Estimator fit_intercept (%s) != initialization ' 'fit_intercept (%s), initialize ReceptiveField with the ' 'same fit_intercept value or use fit_intercept=None' % (estimator.fit_intercept, self.fit_intercept)) self.fit_intercept = estimator.fit_intercept else: raise ValueError('`estimator` must be a float or an instance' ' of `BaseEstimator`,' ' got type %s.' % type(self.estimator)) self.estimator_ = estimator del estimator _check_estimator(self.estimator_) # Create input features n_times, n_epochs, n_feats = X.shape n_outputs = y.shape[-1] n_delays = len(self.delays_) # Update feature names if we have none if ((self.feature_names is not None) and (len(self.feature_names) != n_feats)): raise ValueError('n_features in X does not match feature names ' '(%s != %s)' % (n_feats, len(self.feature_names))) # Create input features X, y = self._delay_and_reshape(X, y) self.estimator_.fit(X, y) coef = get_coef(self.estimator_, 'coef_') # (n_targets, n_features) shape = [n_feats, n_delays] if self._y_dim > 1: shape.insert(0, -1) self.coef_ = coef.reshape(shape) # Inverse-transform model weights if self.patterns: if isinstance(self.estimator_, TimeDelayingRidge): cov_ = self.estimator_.cov_ / float(n_times * n_epochs - 1) y = y.reshape(-1, y.shape[-1], order='F') else: X = X - X.mean(0, keepdims=True) cov_ = np.cov(X.T) del X # Inverse output covariance if y.ndim == 2 and y.shape[1] != 1: y = y - y.mean(0, keepdims=True) inv_Y = linalg.pinv(np.cov(y.T)) else: inv_Y = 1. / float(n_times * n_epochs - 1) del y # Inverse coef according to Haufe's method # patterns has shape (n_feats * n_delays, n_outputs) coef = np.reshape(self.coef_, (n_feats * n_delays, n_outputs)) patterns = cov_.dot(coef.dot(inv_Y)) self.patterns_ = patterns.reshape(shape) return self def predict(self, X): """Generate predictions with a receptive field. Parameters ---------- X : array, shape (n_times[, n_epochs], n_channels) The input features for the model. Returns ------- y_pred : array, shape (n_times[, n_epochs][, n_outputs]) The output predictions. "Note that valid samples (those unaffected by edge artifacts during the time delaying step) can be obtained using ``y_pred[rf.valid_samples_]``. """ if not hasattr(self, 'delays_'): raise ValueError('Estimator has not been fit yet.') X, _, X_dim = self._check_dimensions(X, None, predict=True)[:3] del _ # convert to sklearn and back pred_shape = X.shape[:-1] if self._y_dim > 1: pred_shape = pred_shape + (self.coef_.shape[0],) X, _ = self._delay_and_reshape(X) y_pred = self.estimator_.predict(X) y_pred = y_pred.reshape(pred_shape, order='F') shape = list(y_pred.shape) if X_dim <= 2: shape.pop(1) # epochs extra = 0 else: extra = 1 shape = shape[:self._y_dim + extra] y_pred.shape = shape return y_pred def score(self, X, y): """Score predictions generated with a receptive field. This calls ``self.predict``, then masks the output of this and ``y` with ``self.mask_prediction_``. Finally, it passes this to a :mod:`sklearn.metrics` scorer. Parameters ---------- X : array, shape (n_times[, n_epochs], n_channels) The input features for the model. y : array, shape (n_times[, n_epochs][, n_outputs]) Used for scikit-learn compatibility. Returns ------- scores : list of float, shape (n_outputs,) The scores estimated by the model for each output (e.g. mean R2 of ``predict(X)``). """ # Create our scoring object scorer_ = _SCORERS[self.scoring] # Generate predictions, then reshape so we can mask time X, y = self._check_dimensions(X, y, predict=True)[:2] n_times, n_epochs, n_outputs = y.shape y_pred = self.predict(X) y_pred = y_pred[self.valid_samples_] y = y[self.valid_samples_] # Re-vectorize and call scorer y = y.reshape([-1, n_outputs], order='F') y_pred = y_pred.reshape([-1, n_outputs], order='F') assert y.shape == y_pred.shape scores = scorer_(y, y_pred, multioutput='raw_values') return scores def _check_dimensions(self, X, y, predict=False): X_dim = X.ndim y_dim = y.ndim if y is not None else 0 if X_dim == 2: # Ensure we have a 3D input by adding singleton epochs dimension X = X[:, np.newaxis, :] if y is not None: if y_dim == 1: y = y[:, np.newaxis, np.newaxis] # epochs, outputs elif y_dim == 2: y = y[:, np.newaxis, :] # epochs else: raise ValueError('y must be shape (n_times[, n_epochs]' '[,n_outputs], got %s' % (y.shape,)) elif X.ndim == 3: if y is not None: if y.ndim == 2: y = y[:, :, np.newaxis] # Add an outputs dim elif y.ndim != 3: raise ValueError('If X has 3 dimensions, ' 'y must have 2 or 3 dimensions') else: raise ValueError('X must be shape (n_times[, n_epochs],' ' n_features), got %s' % (X.shape,)) if y is not None: if X.shape[0] != y.shape[0]: raise ValueError('X and y do not have the same n_times\n' '%s != %s' % (X.shape[0], y.shape[0])) if X.shape[1] != y.shape[1]: raise ValueError('X and y do not have the same n_epochs\n' '%s != %s' % (X.shape[1], y.shape[1])) if predict and y.shape[-1] != len(self.estimator_.coef_): raise ValueError('Number of outputs does not match' ' estimator coefficients dimensions') return X, y, X_dim, y_dim def _delay_time_series(X, tmin, tmax, sfreq, fill_mean=False): """Return a time-lagged input time series. Parameters ---------- X : array, shape (n_times[, n_epochs], n_features) The time series to delay. Must be 2D or 3D. tmin : int | float The starting lag. tmax : int | float The ending lag. Must be >= tmin. sfreq : int | float The sampling frequency of the series. Defaults to 1.0. fill_mean : bool If True, the fill value will be the mean along the time dimension of the feature, and each cropped and delayed segment of data will be shifted to have the same mean value (ensuring that mean subtraction works properly). If False, the fill value will be zero. Returns ------- delayed : array, shape(n_times[, n_epochs][, n_features], n_delays) The delayed data. It has the same shape as X, with an extra dimension appended to the end. Examples -------- >>> tmin, tmax = -0.1, 0.2 >>> sfreq = 10. >>> x = np.arange(1, 6) >>> x_del = _delay_time_series(x, tmin, tmax, sfreq) >>> print(x_del) # doctest:+SKIP [[2. 1. 0. 0.] [3. 2. 1. 0.] [4. 3. 2. 1.] [5. 4. 3. 2.] [0. 5. 4. 3.]] """ _check_delayer_params(tmin, tmax, sfreq) delays = _times_to_delays(tmin, tmax, sfreq) # Iterate through indices and append delayed = np.zeros(X.shape + (len(delays),)) if fill_mean: mean_value = X.mean(axis=0) if X.ndim == 3: mean_value = np.mean(mean_value, axis=0) delayed[:] = mean_value[:, np.newaxis] for ii, ix_delay in enumerate(delays): # Create zeros to populate w/ delays if ix_delay < 0: out = delayed[:ix_delay, ..., ii] use_X = X[-ix_delay:] elif ix_delay > 0: out = delayed[ix_delay:, ..., ii] use_X = X[:-ix_delay] else: # == 0 out = delayed[..., ii] use_X = X out[:] = use_X if fill_mean: out[:] += (mean_value - use_X.mean(axis=0)) return delayed def _times_to_delays(tmin, tmax, sfreq): """Convert a tmin/tmax in seconds to delays.""" # Convert seconds to samples delays = np.arange(int(np.round(tmin * sfreq)), int(np.round(tmax * sfreq) + 1)) return delays def _delays_to_slice(delays): """Find the slice to be taken in order to remove missing values.""" # Negative values == cut off rows at the end min_delay = None if delays[-1] <= 0 else delays[-1] # Positive values == cut off rows at the end max_delay = None if delays[0] >= 0 else delays[0] return slice(min_delay, max_delay) def _check_delayer_params(tmin, tmax, sfreq): """Check delayer input parameters. For future custom delay support.""" _validate_type(sfreq, 'numeric', '`sfreq`') for tlim in (tmin, tmax): _validate_type(tlim, 'numeric', 'tmin/tmax') if not tmin <= tmax: raise ValueError('tmin must be <= tmax') def _reshape_for_est(X_del): """Convert X_del to a sklearn-compatible shape.""" n_times, n_epochs, n_feats, n_delays = X_del.shape X_del = X_del.reshape(n_times, n_epochs, -1) # concatenate feats X_del = X_del.reshape(n_times * n_epochs, -1, order='F') return X_del # Create a correlation scikit-learn-style scorer def _corr_score(y_true, y, multioutput=None): from scipy.stats import pearsonr assert multioutput == 'raw_values' for this_y in (y_true, y): if this_y.ndim != 2: raise ValueError('inputs must be shape (samples, outputs), got %s' % (this_y.shape,)) return np.array([pearsonr(y_true[:, ii], y[:, ii])[0] for ii in range(y.shape[-1])]) def _r2_score(y_true, y, multioutput=None): from sklearn.metrics import r2_score return r2_score(y_true, y, multioutput=multioutput) _SCORERS = {'r2': _r2_score, 'corrcoef': _corr_score}
bsd-3-clause
IntelLabs/hpat
sdc/datatypes/hpat_pandas_stringmethods_functions.py
1
44761
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** """ | :class:`pandas.core.strings.StringMethods` functions and operators implementations in HPAT .. only:: developer This is autogenerated sources for all Unicode string functions supported by Numba. Currently tested 45 functions only. List of functions obtained automatically from `numba.types.misc.UnicodeType` class Example of the generated method (for method upper()): `hpat_pandas_stringmethods_upper_parallel_impl` is paralell version (required additional import mentioned in the body) @sdc_overload_method(StringMethodsType, 'upper') def hpat_pandas_stringmethods_upper(self): ty_checker = TypeChecker('Method stringmethods.upper().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_upper_parallel_impl(self): from numba.parfor import (init_prange, min_checker, internal_prange) init_prange() result = [] item_count = len(self._data) min_checker(item_count) for i in internal_prange(item_count): item = self._data[i] item_method = item.upper() result.append(item_method) return pandas.Series(result) return hpat_pandas_stringmethods_upper_parallel_impl def hpat_pandas_stringmethods_upper_impl(self): result = [] item_count = len(self._data) for i in range(item_count): item = self._data[i] item_method = item.upper() result.append(item_method) return pandas.Series(result) return hpat_pandas_stringmethods_upper_impl Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_split_filter """ import numpy import pandas import numba from numba.types import (Boolean, Integer, NoneType, Omitted, StringLiteral, UnicodeType) from sdc.utilities.sdc_typing_utils import TypeChecker from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType from sdc.utilities.utils import sdc_overload_method from sdc.hiframes.api import get_nan_mask from sdc.str_arr_ext import str_arr_set_na_by_mask, create_str_arr_from_list _hpat_pandas_stringmethods_autogen_global_dict = { 'pandas': pandas, 'numpy': numpy, 'numba': numba, 'StringMethodsType': StringMethodsType, 'TypeChecker': TypeChecker } _hpat_pandas_stringmethods_functions_params = { 'cat': ', others=None, sep=None, na_rep=None, join="left"', 'center': ', width, fillchar=" "', 'contains': ', pat, case=True, flags=0, na=numpy.nan, regex=True', 'count': ', pat, flags=0', 'decode': ', encoding, errors="strict"', 'encode': ', encoding, errors="strict"', 'endswith': ', pat, na=numpy.nan', 'extractall': ', pat, flags=0', 'extract': ', pat, flags=0, expand=True', 'findall': ', pat, flags=0', 'find': ', sub, start=0, end=None', 'get': ', i', 'get_dummies': ', sep="|"', 'index': ', sub, start=0, end=None', 'join': ', sep', 'ljust': ', width, fillchar=" "', 'lstrip': ', to_strip=None', 'match': ', pat, case=True, flags=0, na=numpy.nan', 'normalize': ', form', 'pad': ', width, side="left", fillchar=" "', 'partition': ', sep=" ", expand=True', 'repeat': ', repeats', 'replace': ', pat, repl, n=-1, case=None, flags=0, regex=True', 'rfind': ', sub, start=0, end=None', 'rindex': ', sub, start=0, end=None', 'rjust': ', width, fillchar=" "', 'rpartition': ', sep=" ", expand=True', 'rsplit': ', pat=None, n=-1, expand=False', 'rstrip': ', to_strip=None', 'slice_replace': ', start=None, stop=None, repl=None', 'slice': ', start=None, stop=None, step=None', 'split': ', pat=None, n=-1, expand=False', 'startswith': ', pat, na=numpy.nan', 'strip': ', to_strip=None', 'translate': ', table', 'wrap': ', width', 'zfill': ', width', } _hpat_pandas_stringmethods_functions_template = """ # @sdc_overload_method(StringMethodsType, '{methodname}') def hpat_pandas_stringmethods_{methodname}(self{methodparams}): \"\"\" Pandas Series method :meth:`pandas.core.strings.StringMethods.{methodname}()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests sdc.tests.test_strings.TestStrings.test_str2str python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str2str python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_get python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_replace_noregex python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_split python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_contains_regex Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg other: {methodparams} input arguments decription in https://pandas.pydata.org/pandas-docs/version/0.25/reference/series.html#string-handling Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object \"\"\" ty_checker = TypeChecker('Method {methodname}().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_{methodname}_impl(self{methodparams}): item_count = len(self._data) result = [''] * item_count # result = numba.typed.List.empty_list(numba.types.unicode_type) for it in range(item_count): item = self._data._data[it] if len(item) > 0: result[it] = item.{methodname}({methodparams_call}) else: result[it] = item return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_{methodname}_impl """ @sdc_overload_method(StringMethodsType, 'center') def hpat_pandas_stringmethods_center(self, width, fillchar=' '): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.center Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_center.py :language: python :lines: 27- :caption: Filling left and right side of strings in the Series with an additional character :name: ex_series_str_center .. command-output:: python ./series/str/series_str_center.py :cwd: ../../../examples .. todo:: Add support of 32-bit Unicode for `str.center()` Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.center()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_center Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg width: :obj:`int` Minimum width of resulting string fillchar: :obj:`str` Additional character for filling, default is whitespace Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method center().') ty_checker.check(self, StringMethodsType) if not isinstance(width, Integer): ty_checker.raise_exc(width, 'int', 'width') accepted_types = (Omitted, StringLiteral, UnicodeType) if not isinstance(fillchar, accepted_types) and fillchar != ' ': ty_checker.raise_exc(fillchar, 'str', 'fillchar') def hpat_pandas_stringmethods_center_impl(self, width, fillchar=' '): item_count = len(self._data) result = [''] * item_count for idx, item in enumerate(self._data._data): result[idx] = item.center(width, fillchar) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_center_impl @sdc_overload_method(StringMethodsType, 'endswith') def hpat_pandas_stringmethods_endswith(self, pat, na=None): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.endswith Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_endswith.py :language: python :lines: 27- :caption: Test if the end of each string element matches a string :name: ex_series_str_endswith .. command-output:: python ./series/str/series_str_endswith.py :cwd: ../../../examples .. todo:: - Add support of matching the end of each string by a pattern - Add support of parameter ``na`` .. seealso:: `str.endswith <https://docs.python.org/3/library/stdtypes.html#str.endswith>`_ Python standard library string method. :ref:`Series.str.startswith <pandas.Series.str.startswith>` Same as endswith, but tests the start of string. :ref:`Series.str.contains <pandas.Series.str.contains>` Tests if string element contains a pattern. Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.endswith()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_endswith Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg pat: :obj:`str` Character sequence na: :obj:`bool` Object shown if element tested is not a string *unsupported* Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method endswith().') ty_checker.check(self, StringMethodsType) if not isinstance(pat, (StringLiteral, UnicodeType)): ty_checker.raise_exc(pat, 'str', 'pat') if not isinstance(na, (Boolean, NoneType, Omitted)) and na is not None: ty_checker.raise_exc(na, 'bool', 'na') def hpat_pandas_stringmethods_endswith_impl(self, pat, na=None): if na is not None: msg = 'Method endswith(). The object na\n expected: None' raise ValueError(msg) item_endswith = len(self._data) result = numpy.empty(item_endswith, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.endswith(pat) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_endswith_impl @sdc_overload_method(StringMethodsType, 'find') def hpat_pandas_stringmethods_find(self, sub, start=0, end=None): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.find Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_find.py :language: python :lines: 27- :caption: Return lowest indexes in each strings in the Series :name: ex_series_str_find .. command-output:: python ./series/str/series_str_find.py :cwd: ../../../examples .. todo:: Add support of parameters ``start`` and ``end`` .. seealso:: :ref:`Series.str.rfind <pandas.Series.str.rfind>` Return highest indexes in each strings. Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.find()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_find Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg sub: :obj:`str` Substring being searched start: :obj:`int` Left edge index *unsupported* end: :obj:`int` Right edge index *unsupported* Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method find().') ty_checker.check(self, StringMethodsType) if not isinstance(sub, (StringLiteral, UnicodeType)): ty_checker.raise_exc(sub, 'str', 'sub') accepted_types = (Integer, NoneType, Omitted) if not isinstance(start, accepted_types) and start != 0: ty_checker.raise_exc(start, 'None, int', 'start') if not isinstance(end, accepted_types) and end is not None: ty_checker.raise_exc(end, 'None, int', 'end') def hpat_pandas_stringmethods_find_impl(self, sub, start=0, end=None): if start != 0: raise ValueError('Method find(). The object start\n expected: 0') if end is not None: raise ValueError('Method find(). The object end\n expected: None') item_count = len(self._data) result = numpy.empty(item_count, numba.types.int64) for idx, item in enumerate(self._data._data): result[idx] = item.find(sub) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_find_impl @sdc_overload_method(StringMethodsType, 'isupper') def hpat_pandas_stringmethods_isupper(self): ty_checker = TypeChecker('Method isupper().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_isupper_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.isupper() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_isupper_impl @sdc_overload_method(StringMethodsType, 'len') def hpat_pandas_stringmethods_len(self): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.len Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_len.py :language: python :lines: 27- :caption: Compute the length of each element in the Series :name: ex_series_str_len .. command-output:: python ./series/str/series_str_len.py :cwd: ../../../examples .. seealso:: `str.len` Python built-in function returning the length of an object. :ref:`Series.size <pandas.Series.size>` Returns the length of the Series. Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.len()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str_len1 Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method len().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_len_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.int64) for idx, item in enumerate(self._data._data): result[idx] = len(item) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_len_impl @sdc_overload_method(StringMethodsType, 'ljust') def hpat_pandas_stringmethods_ljust(self, width, fillchar=' '): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.ljust Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_ljust.py :language: python :lines: 27- :caption: Filling right side of strings in the Series with an additional character :name: ex_series_str_ljust .. command-output:: python ./series/str/series_str_ljust.py :cwd: ../../../examples .. todo:: Add support of 32-bit Unicode for `str.ljust()` Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.ljust()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_ljust Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg width: :obj:`int` Minimum width of resulting string fillchar: :obj:`str` Additional character for filling, default is whitespace Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method ljust().') ty_checker.check(self, StringMethodsType) if not isinstance(width, Integer): ty_checker.raise_exc(width, 'int', 'width') accepted_types = (Omitted, StringLiteral, UnicodeType) if not isinstance(fillchar, accepted_types) and fillchar != ' ': ty_checker.raise_exc(fillchar, 'str', 'fillchar') def hpat_pandas_stringmethods_ljust_impl(self, width, fillchar=' '): item_count = len(self._data) result = [''] * item_count for idx, item in enumerate(self._data._data): result[idx] = item.ljust(width, fillchar) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_ljust_impl @sdc_overload_method(StringMethodsType, 'rjust') def hpat_pandas_stringmethods_rjust(self, width, fillchar=' '): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.rjust Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_rjust.py :language: python :lines: 27- :caption: Filling left side of strings in the Series with an additional character :name: ex_series_str_rjust .. command-output:: python ./series/str/series_str_rjust.py :cwd: ../../../examples .. todo:: Add support of 32-bit Unicode for `str.rjust()` Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.rjust()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_rjust Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg width: :obj:`int` Minimum width of resulting string fillchar: :obj:`str` Additional character for filling, default is whitespace Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method rjust().') ty_checker.check(self, StringMethodsType) if not isinstance(width, Integer): ty_checker.raise_exc(width, 'int', 'width') accepted_types = (Omitted, StringLiteral, UnicodeType) if not isinstance(fillchar, accepted_types) and fillchar != ' ': ty_checker.raise_exc(fillchar, 'str', 'fillchar') def hpat_pandas_stringmethods_rjust_impl(self, width, fillchar=' '): item_count = len(self._data) result = [''] * item_count for idx, item in enumerate(self._data._data): result[idx] = item.rjust(width, fillchar) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_rjust_impl @sdc_overload_method(StringMethodsType, 'startswith') def hpat_pandas_stringmethods_startswith(self, pat, na=None): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.startswith Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_startswith.py :language: python :lines: 27- :caption: Test if the start of each string element matches a string :name: ex_series_str_startswith .. command-output:: python ./series/str/series_str_startswith.py :cwd: ../../../examples .. todo:: - Add support of matching the start of each string by a pattern - Add support of parameter ``na`` .. seealso:: `str.startswith <https://docs.python.org/3/library/stdtypes.html#str.startswith>`_ Python standard library string method. :ref:`Series.str.endswith <pandas.Series.str.endswith>` Same as startswith, but tests the end of string. :ref:`Series.str.contains <pandas.Series.str.contains>` Tests if string element contains a pattern. Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.startswith()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_startswith Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg pat: :obj:`str` Character sequence na: :obj:`bool` Object shown if element tested is not a string *unsupported* Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method startswith().') ty_checker.check(self, StringMethodsType) if not isinstance(pat, (StringLiteral, UnicodeType)): ty_checker.raise_exc(pat, 'str', 'pat') if not isinstance(na, (Boolean, NoneType, Omitted)) and na is not None: ty_checker.raise_exc(na, 'bool', 'na') def hpat_pandas_stringmethods_startswith_impl(self, pat, na=None): if na is not None: msg = 'Method startswith(). The object na\n expected: None' raise ValueError(msg) item_startswith = len(self._data) result = numpy.empty(item_startswith, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.startswith(pat) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_startswith_impl @sdc_overload_method(StringMethodsType, 'zfill') def hpat_pandas_stringmethods_zfill(self, width): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.zfill Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_zfill.py :language: python :lines: 27- :caption: Pad strings in the Series by prepending '0' characters :name: ex_series_str_zfill .. command-output:: python ./series/str/series_str_zfill.py :cwd: ../../../examples .. todo:: Add support of 32-bit Unicode for `str.zfill()` .. seealso:: :ref:`Series.str.rjust <pandas.Series.str.rjust>` Fills the left side of strings with an arbitrary character. :ref:`Series.str.ljust <pandas.Series.str.ljust>` Fills the right side of strings with an arbitrary character. :ref:`Series.str.pad <pandas.Series.str.pad>` Fills the specified sides of strings with an arbitrary character. :ref:`Series.str.center <pandas.Series.str.center>` Fills boths sides of strings with an arbitrary character. Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.zfill()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_zfill Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg width: :obj:`int` Minimum width of resulting string Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ ty_checker = TypeChecker('Method zfill().') ty_checker.check(self, StringMethodsType) if not isinstance(width, Integer): ty_checker.raise_exc(width, 'int', 'width') def hpat_pandas_stringmethods_zfill_impl(self, width): item_count = len(self._data) result = [''] * item_count for idx, item in enumerate(self._data._data): result[idx] = item.zfill(width) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_zfill_impl def _hpat_pandas_stringmethods_autogen(method_name): """" The function generates a function for 'method_name' from source text that is created on the fly. """ params = "" params_call = "" # get function parameters by name params_dict = _hpat_pandas_stringmethods_functions_params.get(method_name) if params_dict is not None: params = params_dict if len(params) > 0: """ Translate parameters string for method For example: parameters for split(): ', pat=None, n=-1, expand=False' translate into: 'pat, n, expand' """ params_call_splitted = params.split(',') params_call_list = [] for item in params_call_splitted: params_call_list.append(item.split("=")[0]) params_call = ",".join(params_call_list) if len(params_call) > 1: params_call = params_call[2:] sourcecode = _hpat_pandas_stringmethods_functions_template.format(methodname=method_name, methodparams=params, methodparams_call=params_call) exec(sourcecode, _hpat_pandas_stringmethods_autogen_global_dict) global_dict_name = 'hpat_pandas_stringmethods_{methodname}'.format(methodname=method_name) return _hpat_pandas_stringmethods_autogen_global_dict[global_dict_name] sdc_pandas_series_str_docstring_template = """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.Series.str.{method_name} Limitations ----------- Series elements are expected to be Unicode strings. Elements cannot be NaN. Examples -------- .. literalinclude:: ../../../examples/series/str/series_str_{method_name}.py :language: python :lines: 27- :caption: {caption} :name: ex_series_str_{method_name} .. command-output:: python ./series/str/series_str_{method_name}.py :cwd: ../../../examples .. seealso:: {seealso} Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas Series method :meth:`pandas.core.strings.StringMethods.{method_name}()` implementation. Note: Unicode type of list elements are supported only. Numpy.NaN is not supported as elements. .. only:: developer Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_{method_name}_str Parameters ---------- self: :class:`pandas.core.strings.StringMethods` input arg Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ @sdc_overload_method(StringMethodsType, 'istitle') def hpat_pandas_stringmethods_istitle(self): ty_checker = TypeChecker('Method istitle().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_istitle_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.istitle() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_istitle_impl @sdc_overload_method(StringMethodsType, 'isspace') def hpat_pandas_stringmethods_isspace(self): ty_checker = TypeChecker('Method isspace().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_isspace_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.isspace() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_isspace_impl @sdc_overload_method(StringMethodsType, 'isalpha') def hpat_pandas_stringmethods_isalpha(self): ty_checker = TypeChecker('Method isalpha().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_isalpha_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.isalpha() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_isalpha_impl @sdc_overload_method(StringMethodsType, 'islower') def hpat_pandas_stringmethods_islower(self): ty_checker = TypeChecker('Method islower().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_islower_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.islower() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_islower_impl @sdc_overload_method(StringMethodsType, 'isalnum') def hpat_pandas_stringmethods_isalnum(self): ty_checker = TypeChecker('Method isalnum().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_isalnum_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.isalnum() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_isalnum_impl @sdc_overload_method(StringMethodsType, 'isnumeric') def hpat_pandas_stringmethods_isnumeric(self): ty_checker = TypeChecker('Method isnumeric().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_isnumeric_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.isnumeric() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_isnumeric_impl @sdc_overload_method(StringMethodsType, 'isdigit') def hpat_pandas_stringmethods_isdigit(self): ty_checker = TypeChecker('Method isdigit().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_isdigit_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.isdigit() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_isdigit_impl @sdc_overload_method(StringMethodsType, 'isdecimal') def hpat_pandas_stringmethods_isdecimal(self): ty_checker = TypeChecker('Method isdecimal().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_isdecimal_impl(self): item_count = len(self._data) result = numpy.empty(item_count, numba.types.boolean) for idx, item in enumerate(self._data._data): result[idx] = item.isdecimal() return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_isdecimal_impl @sdc_overload_method(StringMethodsType, 'capitalize') def hpat_pandas_stringmethods_capitalize(self): ty_checker = TypeChecker('Method capitalize().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_capitalize_impl(self): mask = get_nan_mask(self._data._data) item_count = len(self._data) res_list = [''] * item_count for idx in numba.prange(item_count): res_list[idx] = self._data._data[idx].capitalize() str_arr = create_str_arr_from_list(res_list) result = str_arr_set_na_by_mask(str_arr, mask) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_capitalize_impl @sdc_overload_method(StringMethodsType, 'title') def hpat_pandas_stringmethods_title(self): ty_checker = TypeChecker('Method title().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_title_impl(self): mask = get_nan_mask(self._data._data) item_count = len(self._data) res_list = [''] * item_count for idx in numba.prange(item_count): res_list[idx] = self._data._data[idx].title() str_arr = create_str_arr_from_list(res_list) result = str_arr_set_na_by_mask(str_arr, mask) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_title_impl @sdc_overload_method(StringMethodsType, 'swapcase') def hpat_pandas_stringmethods_swapcase(self): ty_checker = TypeChecker('Method swapcase().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_swapcase_impl(self): mask = get_nan_mask(self._data._data) item_count = len(self._data) res_list = [''] * item_count for idx in numba.prange(item_count): res_list[idx] = self._data._data[idx].swapcase() str_arr = create_str_arr_from_list(res_list) result = str_arr_set_na_by_mask(str_arr, mask) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_swapcase_impl @sdc_overload_method(StringMethodsType, 'casefold') def hpat_pandas_stringmethods_casefold(self): ty_checker = TypeChecker('Method casefold().') ty_checker.check(self, StringMethodsType) def hpat_pandas_stringmethods_casefold_impl(self): mask = get_nan_mask(self._data._data) item_count = len(self._data) res_list = [''] * item_count for idx in numba.prange(item_count): res_list[idx] = self._data._data[idx].casefold() str_arr = create_str_arr_from_list(res_list) result = str_arr_set_na_by_mask(str_arr, mask) return pandas.Series(result, self._data._index, name=self._data._name) return hpat_pandas_stringmethods_casefold_impl seealso_check_methods = """ :ref:`Series.str.isalpha <pandas.Series.str.isalpha>` Check whether all characters are alphabetic. :ref:`Series.str.isnumeric <pandas.Series.str.isnumeric>` Check whether all characters are numeric. :ref:`Series.str.isalnum <pandas.Series.str.isalnum>` Check whether all characters are alphanumeric. :ref:`Series.str.isdigit <pandas.Series.str.isdigit>` Check whether all characters are digits. :ref:`Series.str.isdecimal <pandas.Series.str.isdecimal>` Check whether all characters are decimal. :ref:`Series.str.isspace <pandas.Series.str.isspace>` Check whether all characters are whitespace. :ref:`Series.str.islower <pandas.Series.str.islower>` Check whether all characters are lowercase. :ref:`Series.str.isupper <pandas.Series.str.isupper>` Check whether all characters are uppercase. :ref:`Series.str.istitle <pandas.Series.str.istitle>` Check whether all characters are titlecase. """ seealso_transform_methods = """ :ref:`Series.str.lower <pandas.Series.str.lower>` Converts all characters to lowercase. :ref:`Series.str.upper <pandas.Series.str.upper>` Converts all characters to uppercase. :ref:`Series.str.title <pandas.Series.str.title>` Converts first character of each word to uppercase and remaining to lowercase. :ref:`Series.str.capitalize <pandas.Series.str.capitalize>` Converts first character to uppercase and remaining to lowercase. :ref:`Series.str.swapcase <pandas.Series.str.swapcase>` Converts uppercase to lowercase and lowercase to uppercase. :ref:`Series.str.casefold <pandas.Series.str.casefold>` Removes all case distinctions in the string. """ stringmethods_funcs = { 'istitle': {'method': hpat_pandas_stringmethods_istitle, 'caption': 'Check if each word start with an upper case letter', 'seealso': seealso_check_methods}, 'isspace': {'method': hpat_pandas_stringmethods_isspace, 'caption': 'Check if all the characters in the text are whitespaces', 'seealso': seealso_check_methods}, 'isalpha': {'method': hpat_pandas_stringmethods_isalpha, 'caption': 'Check whether all characters in each string are alphabetic', 'seealso': seealso_check_methods}, 'islower': {'method': hpat_pandas_stringmethods_islower, 'caption': 'Check if all the characters in the text are alphanumeric', 'seealso': seealso_check_methods}, 'isalnum': {'method': hpat_pandas_stringmethods_isalnum, 'caption': 'Check if all the characters in the text are alphanumeric', 'seealso': seealso_check_methods}, 'isnumeric': {'method': hpat_pandas_stringmethods_isnumeric, 'caption': 'Check whether all characters in each string are numeric.', 'seealso': seealso_check_methods}, 'isdigit': {'method': hpat_pandas_stringmethods_isdigit, 'caption': 'Check whether all characters in each string in the Series/Index are digits.', 'seealso': seealso_check_methods}, 'isdecimal': {'method': hpat_pandas_stringmethods_isdecimal, 'caption': 'Check whether all characters in each string are decimal.', 'seealso': seealso_check_methods}, 'isupper': {'method': hpat_pandas_stringmethods_isupper, 'caption': 'Check whether all characters in each string are uppercase.', 'seealso': seealso_check_methods}, 'capitalize': {'method': hpat_pandas_stringmethods_capitalize, 'caption': 'Convert strings in the Series/Index to be capitalized.', 'seealso': seealso_transform_methods}, 'title': {'method': hpat_pandas_stringmethods_title, 'caption': 'Convert strings in the Series/Index to titlecase.', 'seealso': seealso_transform_methods}, 'swapcase': {'method': hpat_pandas_stringmethods_swapcase, 'caption': 'Convert strings in the Series/Index to be swapcased.', 'seealso': seealso_transform_methods}, 'casefold': {'method': hpat_pandas_stringmethods_casefold, 'caption': 'Convert strings in the Series/Index to be casefolded.', 'seealso': seealso_transform_methods}, } for name, data in stringmethods_funcs.items(): data['method'].__doc__ = sdc_pandas_series_str_docstring_template.format(**{'method_name': name, 'caption': data['caption'], 'seealso': data['seealso']}) # _hpat_pandas_stringmethods_autogen_methods = sorted(dir(numba.types.misc.UnicodeType.__getattribute__.__qualname__)) _hpat_pandas_stringmethods_autogen_methods = ['upper', 'lower', 'lstrip', 'rstrip', 'strip'] """ This is the list of function which are autogenerated to be used from Numba directly. """ _hpat_pandas_stringmethods_autogen_exceptions = ['split', 'get', 'replace'] for method_name in _hpat_pandas_stringmethods_autogen_methods: if not (method_name.startswith('__') or method_name in _hpat_pandas_stringmethods_autogen_exceptions): sdc_overload_method(StringMethodsType, method_name)(_hpat_pandas_stringmethods_autogen(method_name))
bsd-2-clause
abimannans/scikit-learn
examples/datasets/plot_iris_dataset.py
283
1928
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= The Iris Dataset ========================================================= This data sets consists of 3 different types of irises' (Setosa, Versicolour, and Virginica) petal and sepal length, stored in a 150x4 numpy.ndarray The rows being the samples and the columns being: Sepal Length, Sepal Width, Petal Length and Petal Width. The below plot uses the first two features. See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more information on this dataset. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.decomposition import PCA # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 plt.figure(2, figsize=(8, 6)) plt.clf() # Plot the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) # To getter a better understanding of interaction of the dimensions # plot the first three PCA dimensions fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) X_reduced = PCA(n_components=3).fit_transform(iris.data) ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y, cmap=plt.cm.Paired) ax.set_title("First three PCA directions") ax.set_xlabel("1st eigenvector") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("2nd eigenvector") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("3rd eigenvector") ax.w_zaxis.set_ticklabels([]) plt.show()
bsd-3-clause
mdeff/ntds_2017
projects/reports/arab_springs/lib/clustering.py
1
1319
import pickle import pandas as pd import json import numpy as np import matplotlib.pyplot as plt from scipy import spatial, sparse import scipy.sparse.linalg import scipy from sklearn.cluster import KMeans from pygsp import graphs, filters, plotting import operator import io from lib import models, graph, coarsening, utils get_ipython().magic('matplotlib inline') def take_eigenvectors(laplacian, K=5): eigenvalues, eigenvectors = sparse.linalg.eigsh(laplacian, k=K, which = 'SA') return eigenvalues, eigenvectors def do_kmeans(eigenvectors, K=5): #kmeans to find clusters kmeans = KMeans(n_clusters=K, random_state=0).fit(eigenvectors) return kmeans.labels_ def label_data(df, kmeans_labels, K=5, NUMBER = 40): counts = [dict() for x in range(K)] for i, label in enumerate(kmeans_labels): words = df.loc[i].Tokens for w in words: try: counts[label][w]+=1 except: counts[label][w]=1 total = {} for k in range(K): sorted_words = sorted(counts[k], key=operator.itemgetter(1), reverse=True)[:NUMBER] for w in sorted_words: try: total[w]+=1 except: total[w]=1 labels = [[] for i in range(K)] for k in range(K): sorted_words = sorted(counts[k], key=operator.itemgetter(1), reverse=True)[:NUMBER] for w in sorted_words: if total[w]==1: labels[k].append(w) return labels
mit
bubae/gazeAssistRecognize
train_model.py
1
5633
import init_path import rcnnModule from sklearn import svm import numpy as np import os, sys, cv2 import csv from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import LinearSVC from utils.timer import Timer from sklearn.externals import joblib CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') NETS = {'vgg_cnn_m_1024': ('VGG_CNN_M_1024', 'vgg_cnn_m_1024_fast_rcnn_iter_40000.caffemodel'), 'caffenet': ('CaffeNet', 'caffenet_fast_rcnn_iter_40000.caffemodel'), 'zf': ('ZF', 'ZF_faster_rcnn_final.caffemodel')} def init_train(): print "Init Train..." setting = {} setting['NET'] = 'zf' setting['ROOT_DIR'] = os.getcwd() setting['DATA_DIR'] = os.path.join(setting['ROOT_DIR'], 'data') setting['IMAGE_DIR'] = os.path.join(setting['DATA_DIR'], 'imageNet', 'images') setting['TEST_DIR'] = os.path.join(setting['DATA_DIR'], 'Test') setting['DST_DIR'] = os.path.join(setting['DATA_DIR'], 'result') setting['DST_MODEL_DIR'] = os.path.join(setting['DST_DIR'], 'imageNet', setting['NET']) setting['featureDstDir'] = os.path.join(setting['DST_MODEL_DIR'], "FEATURE") categories = sorted([f for f in os.listdir(setting['IMAGE_DIR'])]) categoryDirPath = [os.path.join(setting['IMAGE_DIR'], f) for f in categories] cid2name = categories cid2path = categoryDirPath iid2path = np.array([]) iid2name = np.array([]) iid2cid = np.array([]) cNum = len(cid2path) cid = 0 for dirPath in categoryDirPath: # dirPath = cid2path[i] imList = np.array(sorted([f for f in os.listdir(dirPath)])) imPath = np.array([os.path.join(dirPath, im) for im in imList]) iid2name = np.append(iid2name, imList) iid2path = np.append(iid2path, imPath) iid2cid = np.append(iid2cid, np.ones(len(imPath))*cid) cid = cid + 1 iid2cid = iid2cid.astype(int) cid2name = np.array(cid2name) cid2path = np.array(cid2path) return setting, cid2name, cid2path, iid2path, iid2name, iid2cid def train_SVM(setting, y): print "train SVM" # SVM Training # SVM options # svm_kernel = 'rbf'; # svm_C = 1.0; # svm_loss = 'squared_hinge' # svm_penalty = 'l2' # svm_multi_class = 'ovr' # svm_random_state = 0 filePath = os.path.join(setting['DST_MODEL_DIR'], "svm_trained.pkl") try: clf = joblib.load(filePath) print "using trained model" except: print "building svm model" X = loadDesc(setting) X = X.astype('float') timer = Timer() timer.tic() clf = OneVsRestClassifier(LinearSVC(random_state=0)).fit(X, y) timer.toc() print timer.total_time joblib.dump(clf, filePath) # TEST # print clf.decision_function(X[0]) # print clf.predict(X[5000]) return clf def loadDesc(setting): print "Load Desc..." timer = Timer() featureDstDir = setting['featureDstDir'] sortedList = sorted([ f for f in os.listdir(featureDstDir)]) descPath = np.array([ os.path.join(featureDstDir, x) for x in sortedList]) X = [] cnt = 0 size = len(descPath) timer.tic() for path in descPath: feature = readCSV(path) X.append(feature) print "%d / %d file loaded" % (cnt, size) cnt = cnt + 1 timer.toc() # print timer.total_time X = np.array(X) X = np.reshape(X, X.shape[0:2]) return X def readCSV(path): rlist = [] with open(path, 'rb') as f: reader = csv.reader(f, delimiter=' ') for row in reader: rlist.append(row) return np.array(rlist) def writeCSV(data, path): with open(path, 'wb') as fout: writer = csv.writer(fout, delimiter=',') for d in data: writer.writerow([d]) def featureExtraction(setting, cid2name, cid2path, iid2path, iid2name, iid2cid, rcnnModel): print "Feature Extraction.." featureDstDir = setting['featureDstDir'] if not os.path.exists(featureDstDir): os.makedirs(featureDstDir) numIm = len(iid2path) descExist = np.zeros(numIm) fList = np.array([ int(x[0:-4]) for x in os.listdir(featureDstDir) ]) for i in fList: descExist[i] = 1 nonDescList = np.where(descExist == 0)[0] numDesc = len(nonDescList) if numDesc==0: print "No image to desc." cnt = 0 for i in nonDescList: print i, cid2name[iid2cid[i]], iid2name[i],": %0.2f percent finished" % (cnt*100.0/numDesc) im = cv2.imread(iid2path[i]) [features, bbox] = rcnnModel.getFeatureIm(im) feature = np.mean(features, axis=0) fileName = "%06d.csv" % i filePath = os.path.join(featureDstDir, fileName) writeCSV(feature, filePath) cnt = cnt+1 def TestModel(setting, rcnnModel, clf): print "Test trained Model" testDir = setting['TEST_DIR'] sortedList = sorted([ f for f in os.listdir(testDir)]) imPath = np.array([ os.path.join(testDir, x) for x in sortedList]) for path in imPath: im = cv2.imread(path) [features, bbox] = rcnnModel.getFeatureIm(im) feature = np.mean(features, axis=0) predict_result = clf.predict(features) print clf.predict(feature) print len(np.where(predict_result==0)[0]) # print imPath def main(): [setting, cid2name, cid2path, iid2path, iid2name, iid2cid] = init_train(); print "rcnnModel loading..." rcnnModel = rcnnModule.RcnnObject('zf', False); featureExtraction(setting, cid2name, cid2path, iid2path, iid2name, iid2cid, rcnnModel) clf = train_SVM(setting, iid2cid) TestModel(setting, rcnnModel, clf) if __name__ == '__main__': main()
mit
robbymeals/scikit-learn
sklearn/ensemble/partial_dependence.py
251
15097
"""Partial dependence plots for tree ensembles. """ # Authors: Peter Prettenhofer # License: BSD 3 clause from itertools import count import numbers import numpy as np from scipy.stats.mstats import mquantiles from ..utils.extmath import cartesian from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import map, range, zip from ..utils import check_array from ..tree._tree import DTYPE from ._gradient_boosting import _partial_dependence_tree from .gradient_boosting import BaseGradientBoosting def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100): """Generate a grid of points based on the ``percentiles of ``X``. The grid is generated by placing ``grid_resolution`` equally spaced points between the ``percentiles`` of each column of ``X``. Parameters ---------- X : ndarray The data percentiles : tuple of floats The percentiles which are used to construct the extreme values of the grid axes. grid_resolution : int The number of equally spaced points that are placed on the grid. Returns ------- grid : ndarray All data points on the grid; ``grid.shape[1] == X.shape[1]`` and ``grid.shape[0] == grid_resolution * X.shape[1]``. axes : seq of ndarray The axes with which the grid has been created. """ if len(percentiles) != 2: raise ValueError('percentile must be tuple of len 2') if not all(0. <= x <= 1. for x in percentiles): raise ValueError('percentile values must be in [0, 1]') axes = [] for col in range(X.shape[1]): uniques = np.unique(X[:, col]) if uniques.shape[0] < grid_resolution: # feature has low resolution use unique vals axis = uniques else: emp_percentiles = mquantiles(X, prob=percentiles, axis=0) # create axis based on percentiles and grid resolution axis = np.linspace(emp_percentiles[0, col], emp_percentiles[1, col], num=grid_resolution, endpoint=True) axes.append(axis) return cartesian(axes), axes def partial_dependence(gbrt, target_variables, grid=None, X=None, percentiles=(0.05, 0.95), grid_resolution=100): """Partial dependence of ``target_variables``. Partial dependence plots show the dependence between the joint values of the ``target_variables`` and the function represented by the ``gbrt``. Read more in the :ref:`User Guide <partial_dependence>`. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. target_variables : array-like, dtype=int The target features for which the partial dependecy should be computed (size should be smaller than 3 for visual renderings). grid : array-like, shape=(n_points, len(target_variables)) The grid of ``target_variables`` values for which the partial dependecy should be evaluated (either ``grid`` or ``X`` must be specified). X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. It is used to generate a ``grid`` for the ``target_variables``. The ``grid`` comprises ``grid_resolution`` equally spaced points between the two ``percentiles``. percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the ``grid``. Only if ``X`` is not None. grid_resolution : int, default=100 The number of equally spaced points on the ``grid``. Returns ------- pdp : array, shape=(n_classes, n_points) The partial dependence function evaluated on the ``grid``. For regression and binary classification ``n_classes==1``. axes : seq of ndarray or None The axes with which the grid has been created or None if the grid has been given. Examples -------- >>> samples = [[0, 0, 2], [1, 0, 0]] >>> labels = [0, 1] >>> from sklearn.ensemble import GradientBoostingClassifier >>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels) >>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2) >>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) """ if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) if (grid is None and X is None) or (grid is not None and X is not None): raise ValueError('Either grid or X must be specified') target_variables = np.asarray(target_variables, dtype=np.int32, order='C').ravel() if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]): raise ValueError('target_variables must be in [0, %d]' % (gbrt.n_features - 1)) if X is not None: X = check_array(X, dtype=DTYPE, order='C') grid, axes = _grid_from_X(X[:, target_variables], percentiles, grid_resolution) else: assert grid is not None # dont return axes if grid is given axes = None # grid must be 2d if grid.ndim == 1: grid = grid[:, np.newaxis] if grid.ndim != 2: raise ValueError('grid must be 2d but is %dd' % grid.ndim) grid = np.asarray(grid, dtype=DTYPE, order='C') assert grid.shape[1] == target_variables.shape[0] n_trees_per_stage = gbrt.estimators_.shape[1] n_estimators = gbrt.estimators_.shape[0] pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64, order='C') for stage in range(n_estimators): for k in range(n_trees_per_stage): tree = gbrt.estimators_[stage, k].tree_ _partial_dependence_tree(tree, grid, target_variables, gbrt.learning_rate, pdp[k]) return pdp, axes def plot_partial_dependence(gbrt, X, features, feature_names=None, label=None, n_cols=3, grid_resolution=100, percentiles=(0.05, 0.95), n_jobs=1, verbose=0, ax=None, line_kw=None, contour_kw=None, **fig_kw): """Partial dependence plots for ``features``. The ``len(features)`` plots are arranged in a grid with ``n_cols`` columns. Two-way partial dependence plots are plotted as contour plots. Read more in the :ref:`User Guide <partial_dependence>`. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. features : seq of tuples or ints If seq[i] is an int or a tuple with one int value, a one-way PDP is created; if seq[i] is a tuple of two ints, a two-way PDP is created. feature_names : seq of str Name of each feature; feature_names[i] holds the name of the feature with index i. label : object The class label for which the PDPs should be computed. Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``. n_cols : int The number of columns in the grid plot (default: 3). percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used to create the extreme values for the PDP axes. grid_resolution : int, default=100 The number of equally spaced points on the axes. n_jobs : int The number of CPUs to use to compute the PDs. -1 means 'all CPUs'. Defaults to 1. verbose : int Verbose output during PD computations. Defaults to 0. ax : Matplotlib axis object, default None An axis object onto which the plots will be drawn. line_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For one-way partial dependence plots. contour_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For two-way partial dependence plots. fig_kw : dict Dict with keywords passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns ------- fig : figure The Matplotlib Figure object. axs : seq of Axis objects A seq of Axis objects, one for each subplot. Examples -------- >>> from sklearn.datasets import make_friedman1 >>> from sklearn.ensemble import GradientBoostingRegressor >>> X, y = make_friedman1() >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP ... """ import matplotlib.pyplot as plt from matplotlib import transforms from matplotlib.ticker import MaxNLocator from matplotlib.ticker import ScalarFormatter if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) # set label_idx for multi-class GBRT if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2: if label is None: raise ValueError('label is not given for multi-class PDP') label_idx = np.searchsorted(gbrt.classes_, label) if gbrt.classes_[label_idx] != label: raise ValueError('label %s not in ``gbrt.classes_``' % str(label)) else: # regression and binary classification label_idx = 0 X = check_array(X, dtype=DTYPE, order='C') if gbrt.n_features != X.shape[1]: raise ValueError('X.shape[1] does not match gbrt.n_features') if line_kw is None: line_kw = {'color': 'green'} if contour_kw is None: contour_kw = {} # convert feature_names to list if feature_names is None: # if not feature_names use fx indices as name feature_names = [str(i) for i in range(gbrt.n_features)] elif isinstance(feature_names, np.ndarray): feature_names = feature_names.tolist() def convert_feature(fx): if isinstance(fx, six.string_types): try: fx = feature_names.index(fx) except ValueError: raise ValueError('Feature %s not in feature_names' % fx) return fx # convert features into a seq of int tuples tmp_features = [] for fxs in features: if isinstance(fxs, (numbers.Integral,) + six.string_types): fxs = (fxs,) try: fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32) except TypeError: raise ValueError('features must be either int, str, or tuple ' 'of int/str') if not (1 <= np.size(fxs) <= 2): raise ValueError('target features must be either one or two') tmp_features.append(fxs) features = tmp_features names = [] try: for fxs in features: l = [] # explicit loop so "i" is bound for exception below for i in fxs: l.append(feature_names[i]) names.append(l) except IndexError: raise ValueError('features[i] must be in [0, n_features) ' 'but was %d' % i) # compute PD functions pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(partial_dependence)(gbrt, fxs, X=X, grid_resolution=grid_resolution, percentiles=percentiles) for fxs in features) # get global min and max values of PD grouped by plot type pdp_lim = {} for pdp, axes in pd_result: min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max() n_fx = len(axes) old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) # create contour levels for two-way plots if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) if ax is None: fig = plt.figure(**fig_kw) else: fig = ax.get_figure() fig.clear() n_cols = min(n_cols, len(features)) n_rows = int(np.ceil(len(features) / float(n_cols))) axs = [] for i, fx, name, (pdp, axes) in zip(count(), features, names, pd_result): ax = fig.add_subplot(n_rows, n_cols, i + 1) if len(axes) == 1: ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw) else: # make contour plot assert len(axes) == 2 XX, YY = np.meshgrid(axes[0], axes[1]) Z = pdp[label_idx].reshape(list(map(np.size, axes))).T CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors='k') ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], alpha=0.75, **contour_kw) ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True) # plot data deciles + axes labels deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) ylim = ax.get_ylim() ax.vlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_xlabel(name[0]) ax.set_ylim(ylim) # prevent x-axis ticks from overlapping ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower')) tick_formatter = ScalarFormatter() tick_formatter.set_powerlimits((-3, 4)) ax.xaxis.set_major_formatter(tick_formatter) if len(axes) > 1: # two-way PDP - y-axis deciles + labels deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) xlim = ax.get_xlim() ax.hlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_ylabel(name[1]) # hline erases xlim ax.set_xlim(xlim) else: ax.set_ylabel('Partial dependence') if len(axes) == 1: ax.set_ylim(pdp_lim[1]) axs.append(ax) fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4, hspace=0.3) return fig, axs
bsd-3-clause
BeatsonLab-MicrobialGenomics/DiscoPlot
discoplot/DiscoPlot.py
1
23712
#!/usr/bin/env python # DiscoPlot: identify genomic rearrangements, misassemblies and sequencing # artefacts in NGS data # Copyright (C) 2013-2015 Mitchell Sullivan # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Mitchell Sullivan # [email protected] # School of Chemistry & Molecular Biosciences # The University of Queensland # Brisbane, QLD 4072. # Australia __title__ = 'DiscoPlot' __version__ = '1.0.2' __description__ = ("DiscoPlot: identify genomic rearrangements, misassemblies " "and sequencing artefacts in NGS data") __author__ = 'Mitchell Sullivan' __license__ = 'GPLv3' __author_email__ = "[email protected]" __url__ = 'https://github.com/BeatsonLab-MicrobialGenomics/DiscoPlot' import argparse import numpy import sys import subprocess def read_sbam(args): import pysam if not args.bam_file is None: sam = pysam.Samfile(args.bam_file, 'rb') elif not args.sam_file: sam = pysam.Samfile(args.sam_file) global refpos global cuta global cutb cuta = 0 cutb = float('inf') refpos = {} if not args.subsection is None: if len(args.subsection) == 1: refpos[args.subsection[0]] = 0 totallength = None for i in range(0, len(sam.references)): if sam.references[i] == args.subsection[0]: totallength = sam.lengths[i] if totallength is None: sys.stderr.write('Selected reference not found.') sys.exit() elif len(args.subsection) == 2: refpos[sam.references[0]] = 0 cuta = int(args.subsection[0]) cutb = int(args.subsection[1]) totallength = cutb - cuta elif len(args.subsection) == 3: refpos[args.subsection[0]] = 0 cuta = int(args.subsection[1]) cutb = int(args.subsection[2]) totallength = cutb - cuta else: sys.stderr.write('Too many arguments given for subsection') sys.exit() if args.bin_size is None: args.bin_size = totallength / args.size + 1 else: args.size = totallength / args.bin_size + 1 else: references = sam.references reflengths = sam.lengths currpos = 0 if args.bin_size is None: args.bin_size = sum(reflengths) / (args.size - (len(reflengths) -1) * (args.gap + 1)) + 1 else: args.size = sum(map(lambda x: x/args.bin_size, reflengths)) + (len(reflengths) -1) * args.gap + 1 for i in range(len(references)): refpos[references[i]] = currpos currpos += reflengths[i] / args.bin_size + args.gap global invgrid, dirgrid, unmapped_for, unmapped_rev unmapped_rev = {} unmapped_for = {} invgrid = {} dirgrid = {} for read in sam.fetch(): ref = sam.getrname(read.tid) if ref in refpos: if read.is_read1: if cuta <= read.pos <= cutb: pos1 = (read.pos - cuta) / args.bin_size + refpos[ref] if read.mate_is_unmapped: if read.is_reverse: if pos1 in unmapped_rev: unmapped_rev[pos1] += 1 else: unmapped_rev[pos1] = 1 else: if pos1 in unmapped_for: unmapped_for[pos1] += 1 else: unmapped_for[pos1] = 1 else: mref = sam.getrname(read.rnext) if mref in refpos: if cuta <= read.pnext <= cutb: pos2 = (read.pnext - cuta) / args.bin_size + refpos[mref] if read.is_reverse: if read.mate_is_reverse: if pos1 < pos2: if pos2 in dirgrid and pos1 in dirgrid[pos2]: dirgrid[pos2][pos1] += 1 elif pos2 in dirgrid: dirgrid[pos2][pos1] = 1 else: dirgrid[pos2] = {pos1:1} else: if pos1 in dirgrid and pos2 in dirgrid[pos1]: dirgrid[pos1][pos2] += 1 elif pos1 in dirgrid: dirgrid[pos1][pos2] = 1 else: dirgrid[pos1] = {pos2:1} else: if pos2 in invgrid and pos1 in invgrid[pos2]: invgrid[pos2][pos1] += 1 elif pos2 in invgrid: invgrid[pos2][pos1] = 1 else: invgrid[pos2] = {pos1:1} else: if read.mate_is_reverse: if pos1 in invgrid and pos2 in invgrid[pos1]: invgrid[pos1][pos2] += 1 elif pos1 in invgrid: invgrid[pos1][pos2] = 1 else: invgrid[pos1] = {pos2:1} else: if pos1 < pos2: if pos1 in dirgrid and pos2 in dirgrid[pos1]: dirgrid[pos1][pos2] += 1 elif pos1 in dirgrid: dirgrid[pos1][pos2] = 1 else: dirgrid[pos1] = {pos2:1} else: if pos2 in dirgrid and pos1 in dirgrid[pos2]: dirgrid[pos2][pos1] += 1 elif pos2 in dirgrid: dirgrid[pos2][pos1] = 1 else: dirgrid[pos2] = {pos1:1} else: if read.mate_is_unmapped: ref = sam.getrname(read.tid) if ref in refpos: if cuta <= read.pos <= cutb: pos = (read.pos - cuta) / args.bin_size + refpos[ref] if read.is_reverse: if pos in unmapped_rev: unmapped_rev[pos] += 1 else: unmapped_rev[pos] = 1 else: if pos in unmapped_for: unmapped_for[pos] += 1 else: unmapped_for[pos] = 1 def read_sing(args): readlen = None if not args.read_file is None: reads = open(args.read_file) first = True getfq = 0 readlen = {} for line in reads: if first: first = False if line.startswith('@'): getfq = 2 name = line.rstrip()[1:] seq = '' elif line.startswith('>'): readlen[name] = len(seq) name = line.rstrip()[1:] seq = '' elif getfq == 0: seq += line.rstrip() elif getfq == 1: readlen[name] = len(seq) name = line.rstrip() seq = '' elif getfq == 2: seq += line.rstrip() getfq = 3 elif getfq == 3: getfq = 4 elif getfq == 4: getfq = 1 readlen[name] = len(seq) if not args.reference_file is None: ref = open(args.reference_file) first = True references = [] reflengths = [] for line in ref: if line.startswith('>'): if first: first = False else: references.append(name) reflengths.append(len(seq)) name = line.rstrip()[1:] seq = '' else: seq += line references.append(name) reflengths.append(len(seq)) else: blast = open(args.blast_file) refdict = {} for line in blast: if line.split()[1] in refdict: if max([int(line.split()[8]), int(line.split()[9])]) > refdict[line.split()[1]]: refdict[line.split()[1]] = max([int(line.split()[8]), int(line.split()[9])]) else: refdict[line.split()[1]] = max([int(line.split()[8]), int(line.split()[9])]) blast.close() references = [] reflengths = [] for i in refdict: references.append(i) reflengths.append(refdict[i]) cuta = 0 cutb = float('inf') refpos = {} if not args.subsection is None: if len(args.subsection) == 1: refpos[args.subsection[0]] = 0 totallength = None for i in range(0, len(references)): if references[i] == args.subsection[0]: totallength = reflengths[i] if totallength is None: sys.stderr.write('Selected reference not found.') sys.exit() elif len(args.subsection) == 2: refpos[references[0]] = 0 cuta = int(args.subsection[0]) cutb = int(args.subsection[1]) totallength = cutb - cuta elif len(args.subsection) == 3: refpos[args.subsection[0]] = 0 cuta = int(args.subsection[0]) cutb = int(args.subsection[1]) totallength = cutb - cuta else: sys.stderr.write('Too many arguments given for subsection') sys.exit() if args.bin_size is None: args.bin_size = totallength / args.size else: args.size = totallength / args.bin_size else: currpos = 0 if args.bin_size is None: args.bin_size = sum(reflengths) / (args.size - (len(reflengths) -1) * (args.gap + 1)) else: args.size = sum(map(lambda x: x/args.bin_size, reflengths)) + (len(reflengths) -1) * args.gap for i in range(len(references)): refpos[references[i]] = currpos currpos += reflengths[i] / args.bin_size + args.gap global invgrid, dirgrid, unmapped_for, unmapped_rev unmapped_rev = {} unmapped_for = {} invgrid = {} dirgrid = {} blast = open(args.blast_file) lastquery = '' hits = [] for line in blast: query, subject, ident, length, mm, indel, qstart, qstop, rstart, rstop, eval, bitscore = line.split() qstart, qstop, rstart, rstop, length, mm, indel = map(int, [qstart, qstop, rstart, rstop, length, mm, indel]) if query != lastquery and lastquery != '': hits.sort(reverse=True) newhits = [hits[0]] qtaken = set() for i in range(hits[2], hits[3] + 1): qtaken.add(i) for i in hits[1:]: if i[:-3] == newhits[-1][:-3]: newhits.append(i) else: getit = False for j in range(hits[2], hits[3] + 1): if not j in qtaken: getit = True qtaken.add(j) if getit: newhits.append(i) anchor = None revseq = None for i in newhits: bitscore, length, qstart, qstop, rstart, rstop, subject = i if anchor is None: if rstart < rstop: anchor = rstart revseq = False else: anchor = rstop revseq = True if min(qtaken) >= args.unmapped: if revseq: if anchor in unmapped_for: unmapped_for[anchor] += 1 else: unmapped_for[anchor] = 1 else: if anchor in unmapped_rev: unmapped_rev[anchor] += 1 else: unmapped_rev[anchor] = 1 if max(qtaken) <= readlen[lastquery] - args.unmapped: if revseq: if anchor in unmapped_rev: unmapped_rev[anchor] += 1 else: unmapped_rev[anchor] = 1 else: if anchor in unmapped_for: unmapped_for[anchor] += 1 else: unmapped_for[anchor] = 1 lastxpos = None lastypos = None oldstart, oldstop = qstart, qstop if revseq: rstart, rstop = rstop, rstart qstart = readlen[lastquery] - qstop qstop = readlen[lastquery] - oldstart for j in range(qstart, qstop): xpos = refpos[subject] + (anchor + j - cuta) / args.bin_size ypos = refpos[subject] + (rstart + int(((j - qstart) * 1.0 / (qstop - qstart)) * (rstop - rstart))) / args.bin_size if xpos != lastxpos or ypos != lastypos: if rstart < rstop: if xpos in dirgrid: if ypos in dirgrid[xpos]: dirgrid[xpos][ypos] += 1 else: dirgrid[xpos][ypos] = 1 else: dirgrid[xpos] = {ypos:1} else: if xpos in invgrid: if ypos in invgrid[xpos]: invgrid[xpos][ypos] += 1 else: invgrid[xpos][ypos] = 1 else: invgrid[xpos] = {ypos:1} lastxpos, lastypos = xpos, ypos if ident >= args.min_ident and length >= args.min_length and subject in refpos and ((cuta <= rstart <= cutb) or (cuta <= rstop <= cutb)): hits.append((float(bitscore), length, qstart, qstop, rstart, rstop, subject)) lastquery = query def generate_blast(args): subprocess.Popen('makeblastdb -dbtype nucl -out ' + args.gen_blast + '.db -in ' + args.reference_file, shell=True, stdout=subprocess.PIPE).wait() subprocess.Popen('blastn -db ' + args.gen_blast + '.db -outfmt 6 -query ' + args.read_file + ' -out ' + args.gen_blast + '.out', shell=True).wait() args.blast_file = args.gen_blast + '.out' def draw_dotplot(args): global refpos global cuta global cutb vals1, vals2 = [], [] for i in invgrid: for j in invgrid[i]: vals1.append(invgrid[i][j]) vals2.append(invgrid[i][j]) for i in dirgrid: for j in dirgrid[i]: vals1.append(dirgrid[i][j]) vals2.append(dirgrid[i][j]) vals2 = numpy.array(vals2) for i in unmapped_rev: vals1.append(unmapped_rev[i]) for i in unmapped_for: vals1.append(unmapped_for[i]) vals1 = numpy.array(vals1) med = numpy.median(vals2) numvals = numpy.size(vals1) sizemod = 2000.0 / args.size / med fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111, aspect='equal') x = numpy.zeros(numvals, dtype='u4') y = numpy.zeros(numvals, dtype='u4') sizes = numpy.zeros(numvals, dtype='f4') colours = numpy.array(['x' for i in range(numvals)]) count = 0 for i in dirgrid: for j in dirgrid[i]: if args.max_hits >= dirgrid[i][j] >= args.min_hits: x[count] = i * args.bin_size + cuta y[count] = j * args.bin_size + cuta sizes[count] = dirgrid[i][j] * sizemod colours[count] = 'r' count += 1 for i in invgrid: for j in invgrid[i]: if args.max_hits >= invgrid[i][j] >= args.min_hits: x[count] = i * args.bin_size + cuta y[count] = j * args.bin_size + cuta sizes[count] = invgrid[i][j] * sizemod colours[count] = 'b' count += 1 for i in unmapped_for: if args.max_hits >= unmapped_for[i] >= args.min_hits: x[count] = cuta y[count] = i * args.bin_size + cuta sizes[count] = unmapped_for[i] * sizemod colours[count] = 'g' count += 1 for i in unmapped_rev: if args.max_hits >= unmapped_rev[i] >= args.min_hits: x[count] = i * args.bin_size + cuta y[count] = cuta sizes[count] = unmapped_rev[i] * sizemod colours[count] = 'g' count += 1 count1, count2, count3 = 0, 0, 0 for i in colours: if i == 'b': count1 += 1 elif i == 'r': count2 += 1 elif i == 'g': count3 += 1 ax.scatter(x, y, s=sizes, c=colours, edgecolor='none', alpha=0.3) sizes = [] names = [] for i in [10, 25, 50, 75, 90]: sizes.append(numpy.percentile(vals2, i)) names.append(str(i) + '% Normal ' + str(sizes[-1])) names.append('50% Inverted ' + str(sizes[2])) a = plt.scatter(-100, -100, s=sizes[2] * sizemod, c='b', edgecolor='none', alpha=0.3) b = plt.scatter(-100, -100, s=sizes[0] * sizemod, c='r', edgecolor='none', alpha=0.3) c = plt.scatter(-100, -100, s=sizes[1] * sizemod, c='r', edgecolor='none', alpha=0.3) d = plt.scatter(-100, -100, s=sizes[2] * sizemod, c='r', edgecolor='none', alpha=0.3) e = plt.scatter(-100, -100, s=sizes[3] * sizemod, c='r', edgecolor='none', alpha=0.3) f = plt.scatter(-100, -100, s=sizes[4] * sizemod, c='r', edgecolor='none', alpha=0.3) leg = ax.legend([b, c, d, e, f, a], names, loc=4) leg.draggable(state=True) for i in refpos: if not refpos[i] == 0: ax.axhspan(refpos[i] * args.bin_size, refpos[i] * args.bin_size - args.gap * args.bin_size, facecolor='g', alpha=0.3) ax.axvspan(refpos[i] * args.bin_size, refpos[i] * args.bin_size - args.gap * args.bin_size, facecolor='g', alpha=0.3) if cutb == float('inf'): cutb = args.size * args.bin_size + cuta plt.xlim([cuta - args.bin_size * 10, cutb]) plt.ylim([cuta - args.bin_size * 10, cutb]) plt.grid(True) if not args.output_file is None: plt.savefig(args.output_file, dpi=args.image_quality) else: plt.show() parser = argparse.ArgumentParser(prog='DiscoPlot', formatter_class=argparse.RawDescriptionHelpFormatter, description=''' DiscoPlot - read mapping visualisation in the large USAGE: DiscoPlot -bam bamfile.bam -o output_file.bmp -size 5000 Create a bmp file from a bamfile of paired-end reads with a width and height of 5000px DiscoPlot -r reads.fa -B blast_prefix -r reference -o output_file.png -bin bin_size Create a png file from reads.fa, generate blast file. Image size will be reference length / bin_size ''', epilog="Thanks for using DiscoPlot") parser.add_argument('-r', '--read_file', action='store', default=None, help='read file') parser.add_argument('-ref', '--reference_file', action='store', default=None, help='reference file') parser.add_argument('-bam', '--bam_file', action='store', default=None, help='bam file') parser.add_argument('-sam', '--sam_file', action='store', default=None, help='sam file') parser.add_argument('-B', '--gen_blast', action='store', default=None, help='Generate blast files, use argument as prefix for output.') parser.add_argument('-b', '--blast_file', action='store', default=None, help='Blast file (output format 6)') parser.add_argument('-o', '--output_file', action='store', default=None, help='output file [gif/bmp/png]') parser.add_argument('-s', '--size', action='store', type=int, default=None, help='Number of bins') parser.add_argument('-bin', '--bin_size', action='store', type=int, default=None, help='Bin size (in bp)') parser.add_argument('-g', '--gap', action='store', type=int, default=5, help='Gap size') parser.add_argument('-sub', '--subsection', nargs='+', action='store', default=None, help='Only display subection of genome [ref]/[min_cutoff max_cutoff]/[ref min_cutoff max_cutoff]') parser.add_argument('-c', '--min_hits', action='store', type=int, default=1, help='Min hits to be shown') parser.add_argument('-m', '--max_hits', action='store', type=float, default=float('inf'), help='Bins with more hits than this will be skipped.') parser.add_argument('-dpi', '--image_quality', action='store', type=int, default=1600, help='Image quality (in DPI)') args = parser.parse_args() if args.size is None and args.bin_size is None: sys.stderr.write('Please give a image size or bin size.') sys.exit() if not args.gen_blast is None: if args.reference_file is None: sys.stderr.write('Please provide a reference file') sys.exit() if args.read_file is None: sys.stderr.write('Please provide a read file (FASTA)') sys.exit() generate_blast(args) if not args.output_file is None: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt if not args.size is None and not args.bin_size is None: sys.stderr.write('Only provide bin size or image size, not both.') sys.exit() if not args.sam_file is None or not args.bam_file is None: read_sbam(args) elif args.blast_file is None: sys.stderr.write('Please either generate or provide a BLAST comparison') sys.exit() else: read_sing(args) draw_dotplot(args)
gpl-3.0
SSDS-Croatia/SSDS-2017
Day-5/util.py
1
5277
import os, sys, gzip, math, urllib import numpy as np from PIL import Image import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec class Dataset: def __init__(self, data, labels=None): self.data = data if type(labels) == None: self.supervised = False else: self.supervised = True self.labels = labels self.n = len(data) self.batches_complete = 0 self.position_in_epoch = 0 def next_batch(self, batch_size, return_labels=False): new_epoch = False if self.position_in_epoch + batch_size >= self.n: self.position_in_epoch = 0 self.batches_complete += 1 new_epoch = True batch = self.data[self.position_in_epoch:self.position_in_epoch + batch_size] if self.supervised and return_labels: batch_labels = self.labels[self.position_in_epoch, self.position_in_epoch + batch_size] batch = (batch, batch_labels) self.position_in_epoch += batch_size return new_epoch, batch def plot(samples): fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='Greys_r') return fig def plot_single(sample, epoch=0): plt.axis('off') ax = plt.gca() ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='Greys_r') def download_mnist(data_folder, dataset): """ Download and extract database :param database_name: Database name """ image_files = ['train-images-idx3-ubyte.gz', 't10k-images-idx3-ubyte.gz'] label_files = ['train-labels-idx1-ubyte.gz', 't10k-labels-idx1-ubyte.gz'] url = 'http://yann.lecun.com/exdb/mnist/' dataset_folder = os.path.join(data_folder, dataset) if not os.path.exists(dataset_folder): os.makedirs(dataset_folder) for filename in image_files + label_files: filepath = os.path.join(dataset_folder, filename) filepath, _ = urllib.request.urlretrieve(url + filename, filepath) statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') else: print('Found {} Data'.format(dataset)) return dataset_folder def extract_data(filename, num_data, head_size, data_size): with gzip.open(filename) as bytestream: bytestream.read(head_size) buf = bytestream.read(data_size * num_data) data = np.frombuffer(buf, dtype=np.uint8).astype(np.float) return data def load_mnist(dataset_folder): data = extract_data(dataset_folder + '/train-images-idx3-ubyte.gz', 60000, 16, 28 * 28) trX = data.reshape((60000, 28, 28, 1)) data = extract_data(dataset_folder + '/train-labels-idx1-ubyte.gz', 60000, 8, 1) trY = data.reshape((60000)) data = extract_data(dataset_folder + '/t10k-images-idx3-ubyte.gz', 10000, 16, 28 * 28) teX = data.reshape((10000, 28, 28, 1)) data = extract_data(dataset_folder + '/t10k-labels-idx1-ubyte.gz', 10000, 8, 1) teY = data.reshape((10000)) trY = np.asarray(trY) teY = np.asarray(teY) X = np.concatenate((trX, teX), axis=0) y = np.concatenate((trY, teY), axis=0).astype(np.int) seed = 547 np.random.seed(seed) np.random.shuffle(X) np.random.seed(seed) np.random.shuffle(y) y_vec = np.zeros((len(y), 10), dtype=np.float) for i, label in enumerate(y): y_vec[i, y[i]] = 1.0 return X / 255., y_vec def images_square_grid(images, mode): """ Save images as a square grid :param images: Images to be used for the grid :param mode: The mode to use for images :return: Image of images in a square grid """ # Get maximum size for square grid of images save_size = math.floor(np.sqrt(images.shape[0])) # Scale to 0-255 images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8) # Put images in a square arrangement images_in_square = np.reshape( images[:save_size*save_size], (save_size, save_size, images.shape[1], images.shape[2], images.shape[3])) if mode == 'L': images_in_square = np.squeeze(images_in_square, 4) # Combine images to grid image new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size)) for col_i, col_images in enumerate(images_in_square): for image_i, image in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2])) return new_im def get_sample_images(data, dataset='mnist', n=25): """ Get a sample of n images from a dataset, able to be displayed with matplotlib :param data_dir: Root directory of the dataset :param dataset: """ # Display options if dataset == 'mnist': mode = 'L' else: mode = 'RGB' return data[:n], mode
mit
miku/siskin
docs/btag-2017/scripts/pie.py
2
1614
# coding: utf-8 """ Sources and sizes. """ import base64 import json import requests import matplotlib.pyplot as plt import numpy as np from matplotlib import cm addr = base64.b64decode("""aHR0cDovLzE3Mi4xOC4xMTMuNzo4MDg1L3NvbHIvYmlibGlv""") def total(): """ Return the total number of docs. """ r = requests.get('%s/select?wt=json&q=*:*' % (addr, label)) if r.status_code >= 300: raise RuntimeError("got HTTP %s on %s" % (r.status_code, r.url)) doc = json.loads(r.text) return doc['response']['numFound'] sources = ( ('28', 'DOAJ'), ('48', 'WISO'), ('49', 'Crossref'), ('50', 'De Gruyter'), ('55', 'JSTOR'), ('60', 'Thieme'), ('85', 'Elsevier'), ('89', 'IEEE'), ('105', 'Springer'), ('121', 'Arxiv'), ) labels, names, sizes = [s[0] for s in sources], [s[1] for s in sources], [] for label in labels: r = requests.get('%s/select?wt=json&q=source_id:%s' % (addr, label)) if r.status_code >= 300: raise RuntimeError("got HTTP %s on %s" % (r.status_code, r.url)) doc = json.loads(r.text) found = doc['response']['numFound'] sizes.append(found) explode = [0 for _ in range(len(labels))] explode[2] = 0.1 fig1, ax1 = plt.subplots() cmap = plt.get_cmap('Set1') colors = [cmap(i) for i in np.linspace(0, 1, len(labels))] patches, texts = plt.pie(sizes, startangle=90, colors=colors, shadow=False, explode=explode) plt.legend(patches, names, loc="lower left") ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.title('Article Metadata Index Sources (2017)') plt.savefig('pie.png')
gpl-3.0
pratapvardhan/pandas
pandas/tests/indexes/timedeltas/test_timedelta_range.py
3
3021
import pytest import numpy as np import pandas as pd import pandas.util.testing as tm from pandas.tseries.offsets import Day, Second from pandas import to_timedelta, timedelta_range class TestTimedeltas(object): def test_timedelta_range(self): expected = to_timedelta(np.arange(5), unit='D') result = timedelta_range('0 days', periods=5, freq='D') tm.assert_index_equal(result, expected) expected = to_timedelta(np.arange(11), unit='D') result = timedelta_range('0 days', '10 days', freq='D') tm.assert_index_equal(result, expected) expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day() result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02', freq='D') tm.assert_index_equal(result, expected) expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2) result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D') tm.assert_index_equal(result, expected) expected = to_timedelta(np.arange(50), unit='T') * 30 result = timedelta_range('0 days', freq='30T', periods=50) tm.assert_index_equal(result, expected) # GH 11776 arr = np.arange(10).reshape(2, 5) df = pd.DataFrame(np.arange(10).reshape(2, 5)) for arg in (arr, df): with tm.assert_raises_regex(TypeError, "1-d array"): to_timedelta(arg) for errors in ['ignore', 'raise', 'coerce']: with tm.assert_raises_regex(TypeError, "1-d array"): to_timedelta(arg, errors=errors) # issue10583 df = pd.DataFrame(np.random.normal(size=(10, 4))) df.index = pd.timedelta_range(start='0s', periods=10, freq='s') expected = df.loc[pd.Timedelta('0s'):, :] result = df.loc['0s':, :] tm.assert_frame_equal(expected, result) @pytest.mark.parametrize('periods, freq', [ (3, '2D'), (5, 'D'), (6, '19H12T'), (7, '16H'), (9, '12H')]) def test_linspace_behavior(self, periods, freq): # GH 20976 result = timedelta_range(start='0 days', end='4 days', periods=periods) expected = timedelta_range(start='0 days', end='4 days', freq=freq) tm.assert_index_equal(result, expected) def test_errors(self): # not enough params msg = ('Of the four parameters: start, end, periods, and freq, ' 'exactly three must be specified') with tm.assert_raises_regex(ValueError, msg): timedelta_range(start='0 days') with tm.assert_raises_regex(ValueError, msg): timedelta_range(end='5 days') with tm.assert_raises_regex(ValueError, msg): timedelta_range(periods=2) with tm.assert_raises_regex(ValueError, msg): timedelta_range() # too many params with tm.assert_raises_regex(ValueError, msg): timedelta_range(start='0 days', end='5 days', periods=10, freq='H')
bsd-3-clause
tttr222/autumn_ner
test.py
1
5277
#!/usr/bin/env python import sys, os, random, pickle, json, codecs, time import numpy as np import sklearn.metrics as skm import argparse from model import AutumnNER from utility import load_dataset from utility import load_embeddings from utility import report_performance parser = argparse.ArgumentParser(description='Train and evaluate BiLSTM on a given dataset') parser.add_argument('--datapath', dest='datapath', type=str, default='CoNLL2003', help='path to the datasets') parser.add_argument('--embeddings', dest='embeddings_path', type=str, default=None, help='path to the testing dataset') parser.add_argument('--optimizer', dest='optimizer', type=str, default='default', help='choose the optimizer: default, rmsprop, adagrad, adam.') parser.add_argument('--batch-size', dest='batch_size', type=int, default=64, help='number of instances in a minibatch') parser.add_argument('--num-epoch', dest='num_epoch', type=int, default=50, help='number of passes over the training set') parser.add_argument('--learning-rate', dest='learning_rate', type=str, default='default', help='learning rate') parser.add_argument('--embedding-factor', dest='embedding_factor', type=float, default=1.0, help='learning rate multiplier for embeddings') parser.add_argument('--decay', dest='decay_rate', type=float, default=0.95, help='exponential decay for learning rate') parser.add_argument('--keep-prob', dest='keep_prob', type=float, default=0.7, help='dropout keep rate') parser.add_argument('--num-cores', dest='num_cores', type=int, default=5, help='seed for training') parser.add_argument('--seed', dest='seed', type=int, default=1, help='seed for training') def main(args): print >> sys.stderr, "Running Autumn NER model testing module" print >> sys.stderr, args random.seed(args.seed) trainset = [] devset = [] testset_standalone = {} word_vocab = [] print "Loading dataset.." assert(os.path.isdir(args.datapath)) for fname in sorted(os.listdir(args.datapath)): if os.path.isdir(fname): continue if fname.endswith('.ner.txt'): dataset, vocab = load_dataset(os.path.join(args.datapath,fname)) word_vocab += vocab if fname.endswith('train.ner.txt'): trainset += dataset if fname.endswith('dev.ner.txt'): devset += dataset if fname.endswith('test.ner.txt'): testset_standalone[fname] = dataset print "Loaded {} instances with a vocab size of {} from {}".format(len(dataset),len(vocab),fname) word_vocab = sorted(set(word_vocab)) if args.embeddings_path: embeddings = load_embeddings(args.embeddings_path, word_vocab, 300) else: embeddings = None print "Loaded {}/{} instances from training/dev set".format(len(trainset),len(devset)) X_train, y_train = zip(*trainset) X_dev, y_dev = zip(*devset) labels = [] for lb in y_train + y_dev: labels += lb labels = sorted(set(labels)) # Create the model, passing in relevant parameters bilstm = AutumnNER(labels=labels, word_vocab=word_vocab, word_embeddings=embeddings, optimizer=args.optimizer, embedding_size=300, char_embedding_size=32, lstm_dim=200, num_cores=args.num_cores, embedding_factor=args.embedding_factor, learning_rate=args.learning_rate, decay_rate=args.decay_rate, dropout_keep=args.keep_prob) model_path = './scratch/saved_model_d{}_s{}'.format(hash(args.datapath),args.seed) if not os.path.exists(model_path + '.meta'): if not os.path.exists('./scratch'): os.mkdir('./scratch') print "Training.." bilstm.fit(X_train,y_train, X_dev, y_dev, num_epoch=args.num_epoch, batch_size=args.batch_size, seed=args.seed) bilstm.save(model_path) else: print "Loading saved model.." bilstm.restore(model_path) print "Evaluating.." print "Performance on DEV set ----------------------------" report_performance(bilstm, X_dev,y_dev, 'evaluation/devset_predictions.txt') print "Performance on TEST set(s) ----------------------------" overall_testset = [] for key, testset in testset_standalone: X_test, y_test = zip(*testset) report_performance(bilstm, X_test,y_test, 'evaluation/testset_{}_predictions.txt'.format(key)) overall_testset += testset X_test, y_test = zip(*overall_testset) report_performance(bilstm, X_test,y_test, 'evaluation/testset_overall_predictions.txt') if __name__ == '__main__': main(parser.parse_args())
mit
RachitKansal/scikit-learn
doc/conf.py
210
8446
# -*- coding: utf-8 -*- # # scikit-learn documentation build configuration file, created by # sphinx-quickstart on Fri Jan 8 09:13:42 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import print_function import sys import os from sklearn.externals.six import u # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) from github_link import make_linkcode_resolve # -- General configuration --------------------------------------------------- # Try to override the matplotlib configuration as early as possible try: import gen_rst except: pass # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['gen_rst', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.pngmath', 'numpy_ext.numpydoc', 'sphinx.ext.linkcode', ] autosummary_generate = True autodoc_default_flags = ['members', 'inherited-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # generate autosummary even if no references autosummary_generate = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # Generate the plots for the gallery plot_gallery = True # The master toctree document. master_doc = 'index' # General information about the project. project = u('scikit-learn') copyright = u('2010 - 2014, scikit-learn developers (BSD License)') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import sklearn version = sklearn.__version__ # The full version, including alpha/beta/rc tags. release = sklearn.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be # searched for source files. exclude_trees = ['_build', 'templates', 'includes'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'scikit-learn' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'oldversion': False, 'collapsiblesidebar': True, 'google_analytics': True, 'surveybanner': False, 'sprintbanner': True} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'scikit-learn' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'logos/scikit-learn-logo-small.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'logos/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['images'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'scikit-learndoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'), u('scikit-learn developers'), 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = "logos/scikit-learn-logo.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. latex_preamble = r""" \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} \usepackage{enumitem} \setlistdepth{10} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False trim_doctests_flags = True def generate_example_rst(app, what, name, obj, options, lines): # generate empty examples files, so that we don't get # inclusion errors if there are no examples for a class / module examples_path = os.path.join(app.srcdir, "modules", "generated", "%s.examples" % name) if not os.path.exists(examples_path): # touch file open(examples_path, 'w').close() def setup(app): # to hide/show the prompt in code examples: app.add_javascript('js/copybutton.js') app.connect('autodoc-process-docstring', generate_example_rst) # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve('sklearn', u'https://github.com/scikit-learn/' 'scikit-learn/blob/{revision}/' '{package}/{path}#L{lineno}')
bsd-3-clause
KonradBreitsprecher/espresso
samples/lb_profile.py
1
1902
import numpy as np import matplotlib.pyplot as plt import espressomd import espressomd.lb import espressomd.observables import espressomd.shapes import espressomd.lbboundaries import espressomd.accumulators system = espressomd.System(box_l=[10.0, 10.0, 5.0]) system.time_step = 0.01 system.cell_system.skin = 0.4 lb_fluid = espressomd.lb.LBFluidGPU(agrid=1.0, fric=1.0, dens=1.0, visc=1.0, tau=0.01, ext_force=[0, 0, 0.15]) system.actors.add(lb_fluid) system.thermostat.set_lb(kT=1.0) fluid_obs = espressomd.observables.CylindricalLBVelocityProfile( center = [5.0, 5.0, 0.0], axis = 'z', n_r_bins = 100, n_phi_bins = 1, n_z_bins = 1, min_r = 0.0, max_r = 4.0, min_phi = -np.pi, max_phi = np.pi, min_z = 0.0, max_z = 10.0, sampling_delta_x = 0.05, sampling_delta_y = 0.05, sampling_delta_z = 1.0) cylinder_shape = espressomd.shapes.Cylinder( center = [5.0, 5.0, 5.0], axis = [0, 0, 1], direction = -1, radius = 4.0, length = 20.0) cylinder_boundary = espressomd.lbboundaries.LBBoundary(shape=cylinder_shape) system.lbboundaries.add(cylinder_boundary) system.integrator.run(5000) accumulator = espressomd.accumulators.MeanVarianceCalculator(obs=fluid_obs) system.auto_update_accumulators.add(accumulator) system.integrator.run(5000) lb_fluid_profile = accumulator.get_mean() lb_fluid_profile = np.reshape(lb_fluid_profile, (100, 1, 1, 3)) def poiseuille_flow(r, R, ext_force): return ext_force * 1./4 * (R**2.0-r**2.0) # Please note that due to symmetry and interpolation a plateau is seen near r=0. n_bins = len(lb_fluid_profile[:, 0, 0, 2]) r_max = 4.0 r = np.linspace(0.0, r_max, n_bins) plt.plot(r, lb_fluid_profile[:, 0, 0, 2], label='LB profile') plt.plot(r, poiseuille_flow(r, r_max, 0.15), label='analytical solution') plt.show()
gpl-3.0
sambitgaan/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/axes.py
69
259904
from __future__ import division, generators import math, sys, warnings, datetime, new import numpy as np from numpy import ma import matplotlib rcParams = matplotlib.rcParams import matplotlib.artist as martist import matplotlib.axis as maxis import matplotlib.cbook as cbook import matplotlib.collections as mcoll import matplotlib.colors as mcolors import matplotlib.contour as mcontour import matplotlib.dates as mdates import matplotlib.font_manager as font_manager import matplotlib.image as mimage import matplotlib.legend as mlegend import matplotlib.lines as mlines import matplotlib.mlab as mlab import matplotlib.patches as mpatches import matplotlib.quiver as mquiver import matplotlib.scale as mscale import matplotlib.table as mtable import matplotlib.text as mtext import matplotlib.ticker as mticker import matplotlib.transforms as mtransforms iterable = cbook.iterable is_string_like = cbook.is_string_like def _process_plot_format(fmt): """ Process a matlab(TM) style color/line style format string. Return a (*linestyle*, *color*) tuple as a result of the processing. Default values are ('-', 'b'). Example format strings include: * 'ko': black circles * '.b': blue dots * 'r--': red dashed lines .. seealso:: :func:`~matplotlib.Line2D.lineStyles` and :func:`~matplotlib.pyplot.colors`: for all possible styles and color format string. """ linestyle = None marker = None color = None # Is fmt just a colorspec? try: color = mcolors.colorConverter.to_rgb(fmt) return linestyle, marker, color # Yes. except ValueError: pass # No, not just a color. # handle the multi char special cases and strip them from the # string if fmt.find('--')>=0: linestyle = '--' fmt = fmt.replace('--', '') if fmt.find('-.')>=0: linestyle = '-.' fmt = fmt.replace('-.', '') if fmt.find(' ')>=0: linestyle = 'None' fmt = fmt.replace(' ', '') chars = [c for c in fmt] for c in chars: if c in mlines.lineStyles: if linestyle is not None: raise ValueError( 'Illegal format string "%s"; two linestyle symbols' % fmt) linestyle = c elif c in mlines.lineMarkers: if marker is not None: raise ValueError( 'Illegal format string "%s"; two marker symbols' % fmt) marker = c elif c in mcolors.colorConverter.colors: if color is not None: raise ValueError( 'Illegal format string "%s"; two color symbols' % fmt) color = c else: raise ValueError( 'Unrecognized character %c in format string' % c) if linestyle is None and marker is None: linestyle = rcParams['lines.linestyle'] if linestyle is None: linestyle = 'None' if marker is None: marker = 'None' return linestyle, marker, color def set_default_color_cycle(clist): """ Change the default cycle of colors that will be used by the plot command. This must be called before creating the :class:`Axes` to which it will apply; it will apply to all future axes. *clist* is a sequence of mpl color specifiers """ _process_plot_var_args.defaultColors = clist[:] rcParams['lines.color'] = clist[0] class _process_plot_var_args: """ Process variable length arguments to the plot command, so that plot commands like the following are supported:: plot(t, s) plot(t1, s1, t2, s2) plot(t1, s1, 'ko', t2, s2) plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3) an arbitrary number of *x*, *y*, *fmt* are allowed """ defaultColors = ['b','g','r','c','m','y','k'] def __init__(self, axes, command='plot'): self.axes = axes self.command = command self._clear_color_cycle() def _clear_color_cycle(self): self.colors = _process_plot_var_args.defaultColors[:] # if the default line color is a color format string, move it up # in the que try: ind = self.colors.index(rcParams['lines.color']) except ValueError: self.firstColor = rcParams['lines.color'] else: self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0] self.firstColor = self.colors[0] self.Ncolors = len(self.colors) self.count = 0 def set_color_cycle(self, clist): self.colors = clist[:] self.firstColor = self.colors[0] self.Ncolors = len(self.colors) self.count = 0 def _get_next_cycle_color(self): if self.count==0: color = self.firstColor else: color = self.colors[int(self.count % self.Ncolors)] self.count += 1 return color def __call__(self, *args, **kwargs): if self.axes.xaxis is not None and self.axes.yaxis is not None: xunits = kwargs.pop( 'xunits', self.axes.xaxis.units) yunits = kwargs.pop( 'yunits', self.axes.yaxis.units) if xunits!=self.axes.xaxis.units: self.axes.xaxis.set_units(xunits) if yunits!=self.axes.yaxis.units: self.axes.yaxis.set_units(yunits) ret = self._grab_next_args(*args, **kwargs) return ret def set_lineprops(self, line, **kwargs): assert self.command == 'plot', 'set_lineprops only works with "plot"' for key, val in kwargs.items(): funcName = "set_%s"%key if not hasattr(line,funcName): raise TypeError, 'There is no line property "%s"'%key func = getattr(line,funcName) func(val) def set_patchprops(self, fill_poly, **kwargs): assert self.command == 'fill', 'set_patchprops only works with "fill"' for key, val in kwargs.items(): funcName = "set_%s"%key if not hasattr(fill_poly,funcName): raise TypeError, 'There is no patch property "%s"'%key func = getattr(fill_poly,funcName) func(val) def _xy_from_y(self, y): if self.axes.yaxis is not None: b = self.axes.yaxis.update_units(y) if b: return np.arange(len(y)), y, False if not ma.isMaskedArray(y): y = np.asarray(y) if len(y.shape) == 1: y = y[:,np.newaxis] nr, nc = y.shape x = np.arange(nr) if len(x.shape) == 1: x = x[:,np.newaxis] return x,y, True def _xy_from_xy(self, x, y): if self.axes.xaxis is not None and self.axes.yaxis is not None: bx = self.axes.xaxis.update_units(x) by = self.axes.yaxis.update_units(y) # right now multicol is not supported if either x or y are # unit enabled but this can be fixed.. if bx or by: return x, y, False x = ma.asarray(x) y = ma.asarray(y) if len(x.shape) == 1: x = x[:,np.newaxis] if len(y.shape) == 1: y = y[:,np.newaxis] nrx, ncx = x.shape nry, ncy = y.shape assert nrx == nry, 'Dimensions of x and y are incompatible' if ncx == ncy: return x, y, True if ncx == 1: x = np.repeat(x, ncy, axis=1) if ncy == 1: y = np.repeat(y, ncx, axis=1) assert x.shape == y.shape, 'Dimensions of x and y are incompatible' return x, y, True def _plot_1_arg(self, y, **kwargs): assert self.command == 'plot', 'fill needs at least 2 arguments' ret = [] x, y, multicol = self._xy_from_y(y) if multicol: for j in xrange(y.shape[1]): color = self._get_next_cycle_color() seg = mlines.Line2D(x, y[:,j], color = color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) else: color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color = color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) return ret def _plot_2_args(self, tup2, **kwargs): ret = [] if is_string_like(tup2[1]): assert self.command == 'plot', ('fill needs at least 2 non-string ' 'arguments') y, fmt = tup2 x, y, multicol = self._xy_from_y(y) linestyle, marker, color = _process_plot_format(fmt) def makeline(x, y): _color = color if _color is None: _color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=_color, linestyle=linestyle, marker=marker, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) if multicol: for j in xrange(y.shape[1]): makeline(x[:,j], y[:,j]) else: makeline(x, y) return ret else: x, y = tup2 x, y, multicol = self._xy_from_xy(x, y) def makeline(x, y): color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) def makefill(x, y): x = self.axes.convert_xunits(x) y = self.axes.convert_yunits(y) facecolor = self._get_next_cycle_color() seg = mpatches.Polygon(np.hstack( (x[:,np.newaxis],y[:,np.newaxis])), facecolor = facecolor, fill=True, closed=closed ) self.set_patchprops(seg, **kwargs) ret.append(seg) if self.command == 'plot': func = makeline else: closed = kwargs.get('closed', True) func = makefill if multicol: for j in xrange(y.shape[1]): func(x[:,j], y[:,j]) else: func(x, y) return ret def _plot_3_args(self, tup3, **kwargs): ret = [] x, y, fmt = tup3 x, y, multicol = self._xy_from_xy(x, y) linestyle, marker, color = _process_plot_format(fmt) def makeline(x, y): _color = color if _color is None: _color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=_color, linestyle=linestyle, marker=marker, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) def makefill(x, y): facecolor = color x = self.axes.convert_xunits(x) y = self.axes.convert_yunits(y) seg = mpatches.Polygon(np.hstack( (x[:,np.newaxis],y[:,np.newaxis])), facecolor = facecolor, fill=True, closed=closed ) self.set_patchprops(seg, **kwargs) ret.append(seg) if self.command == 'plot': func = makeline else: closed = kwargs.get('closed', True) func = makefill if multicol: for j in xrange(y.shape[1]): func(x[:,j], y[:,j]) else: func(x, y) return ret def _grab_next_args(self, *args, **kwargs): remaining = args while 1: if len(remaining)==0: return if len(remaining)==1: for seg in self._plot_1_arg(remaining[0], **kwargs): yield seg remaining = [] continue if len(remaining)==2: for seg in self._plot_2_args(remaining, **kwargs): yield seg remaining = [] continue if len(remaining)==3: if not is_string_like(remaining[2]): raise ValueError, 'third arg must be a format string' for seg in self._plot_3_args(remaining, **kwargs): yield seg remaining=[] continue if is_string_like(remaining[2]): for seg in self._plot_3_args(remaining[:3], **kwargs): yield seg remaining=remaining[3:] else: for seg in self._plot_2_args(remaining[:2], **kwargs): yield seg remaining=remaining[2:] class Axes(martist.Artist): """ The :class:`Axes` contains most of the figure elements: :class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`, :class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`, :class:`~matplotlib.patches.Polygon`, etc., and sets the coordinate system. The :class:`Axes` instance supports callbacks through a callbacks attribute which is a :class:`~matplotlib.cbook.CallbackRegistry` instance. The events you can connect to are 'xlim_changed' and 'ylim_changed' and the callback will be called with func(*ax*) where *ax* is the :class:`Axes` instance. """ name = "rectilinear" _shared_x_axes = cbook.Grouper() _shared_y_axes = cbook.Grouper() def __str__(self): return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds) def __init__(self, fig, rect, axisbg = None, # defaults to rc axes.facecolor frameon = True, sharex=None, # use Axes instance's xaxis info sharey=None, # use Axes instance's yaxis info label='', **kwargs ): """ Build an :class:`Axes` instance in :class:`~matplotlib.figure.Figure` *fig* with *rect=[left, bottom, width, height]* in :class:`~matplotlib.figure.Figure` coordinates Optional keyword arguments: ================ ========================================= Keyword Description ================ ========================================= *adjustable* [ 'box' | 'datalim' ] *alpha* float: the alpha transparency *anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W' ] *aspect* [ 'auto' | 'equal' | aspect_ratio ] *autoscale_on* [ *True* | *False* ] whether or not to autoscale the *viewlim* *axis_bgcolor* any matplotlib color, see :func:`~matplotlib.pyplot.colors` *axisbelow* draw the grids and ticks below the other artists *cursor_props* a (*float*, *color*) tuple *figure* a :class:`~matplotlib.figure.Figure` instance *frame_on* a boolean - draw the axes frame *label* the axes label *navigate* [ *True* | *False* ] *navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation toolbar button status *position* [left, bottom, width, height] in class:`~matplotlib.figure.Figure` coords *sharex* an class:`~matplotlib.axes.Axes` instance to share the x-axis with *sharey* an class:`~matplotlib.axes.Axes` instance to share the y-axis with *title* the title string *visible* [ *True* | *False* ] whether the axes is visible *xlabel* the xlabel *xlim* (*xmin*, *xmax*) view limits *xscale* [%(scale)s] *xticklabels* sequence of strings *xticks* sequence of floats *ylabel* the ylabel strings *ylim* (*ymin*, *ymax*) view limits *yscale* [%(scale)s] *yticklabels* sequence of strings *yticks* sequence of floats ================ ========================================= """ % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])} martist.Artist.__init__(self) if isinstance(rect, mtransforms.Bbox): self._position = rect else: self._position = mtransforms.Bbox.from_bounds(*rect) self._originalPosition = self._position.frozen() self.set_axes(self) self.set_aspect('auto') self._adjustable = 'box' self.set_anchor('C') self._sharex = sharex self._sharey = sharey if sharex is not None: self._shared_x_axes.join(self, sharex) if sharex._adjustable == 'box': sharex._adjustable = 'datalim' #warnings.warn( # 'shared axes: "adjustable" is being changed to "datalim"') self._adjustable = 'datalim' if sharey is not None: self._shared_y_axes.join(self, sharey) if sharey._adjustable == 'box': sharey._adjustable = 'datalim' #warnings.warn( # 'shared axes: "adjustable" is being changed to "datalim"') self._adjustable = 'datalim' self.set_label(label) self.set_figure(fig) # this call may differ for non-sep axes, eg polar self._init_axis() if axisbg is None: axisbg = rcParams['axes.facecolor'] self._axisbg = axisbg self._frameon = frameon self._axisbelow = rcParams['axes.axisbelow'] self._hold = rcParams['axes.hold'] self._connected = {} # a dict from events to (id, func) self.cla() # funcs used to format x and y - fall back on major formatters self.fmt_xdata = None self.fmt_ydata = None self.set_cursor_props((1,'k')) # set the cursor properties for axes self._cachedRenderer = None self.set_navigate(True) self.set_navigate_mode(None) if len(kwargs): martist.setp(self, **kwargs) if self.xaxis is not None: self._xcid = self.xaxis.callbacks.connect('units finalize', self.relim) if self.yaxis is not None: self._ycid = self.yaxis.callbacks.connect('units finalize', self.relim) def get_window_extent(self, *args, **kwargs): ''' get the axes bounding box in display space; *args* and *kwargs* are empty ''' return self.bbox def _init_axis(self): "move this out of __init__ because non-separable axes don't use it" self.xaxis = maxis.XAxis(self) self.yaxis = maxis.YAxis(self) self._update_transScale() def set_figure(self, fig): """ Set the class:`~matplotlib.axes.Axes` figure accepts a class:`~matplotlib.figure.Figure` instance """ martist.Artist.set_figure(self, fig) self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure) #these will be updated later as data is added self.dataLim = mtransforms.Bbox.unit() self.viewLim = mtransforms.Bbox.unit() self.transScale = mtransforms.TransformWrapper( mtransforms.IdentityTransform()) self._set_lim_and_transforms() def _set_lim_and_transforms(self): """ set the *dataLim* and *viewLim* :class:`~matplotlib.transforms.Bbox` attributes and the *transScale*, *transData*, *transLimits* and *transAxes* transformations. """ self.transAxes = mtransforms.BboxTransformTo(self.bbox) # Transforms the x and y axis separately by a scale factor # It is assumed that this part will have non-linear components self.transScale = mtransforms.TransformWrapper( mtransforms.IdentityTransform()) # An affine transformation on the data, generally to limit the # range of the axes self.transLimits = mtransforms.BboxTransformFrom( mtransforms.TransformedBbox(self.viewLim, self.transScale)) # The parentheses are important for efficiency here -- they # group the last two (which are usually affines) separately # from the first (which, with log-scaling can be non-affine). self.transData = self.transScale + (self.transLimits + self.transAxes) self._xaxis_transform = mtransforms.blended_transform_factory( self.axes.transData, self.axes.transAxes) self._yaxis_transform = mtransforms.blended_transform_factory( self.axes.transAxes, self.axes.transData) def get_xaxis_transform(self): """ Get the transformation used for drawing x-axis labels, ticks and gridlines. The x-direction is in data coordinates and the y-direction is in axis coordinates. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return self._xaxis_transform def get_xaxis_text1_transform(self, pad_points): """ Get the transformation used for drawing x-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._xaxis_transform + mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0, self.figure.dpi_scale_trans), "top", "center") def get_xaxis_text2_transform(self, pad_points): """ Get the transformation used for drawing the secondary x-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._xaxis_transform + mtransforms.ScaledTranslation(0, pad_points / 72.0, self.figure.dpi_scale_trans), "bottom", "center") def get_yaxis_transform(self): """ Get the transformation used for drawing y-axis labels, ticks and gridlines. The x-direction is in axis coordinates and the y-direction is in data coordinates. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return self._yaxis_transform def get_yaxis_text1_transform(self, pad_points): """ Get the transformation used for drawing y-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._yaxis_transform + mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0, self.figure.dpi_scale_trans), "center", "right") def get_yaxis_text2_transform(self, pad_points): """ Get the transformation used for drawing the secondary y-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._yaxis_transform + mtransforms.ScaledTranslation(pad_points / 72.0, 0, self.figure.dpi_scale_trans), "center", "left") def _update_transScale(self): self.transScale.set( mtransforms.blended_transform_factory( self.xaxis.get_transform(), self.yaxis.get_transform())) if hasattr(self, "lines"): for line in self.lines: line._transformed_path.invalidate() def get_position(self, original=False): 'Return the a copy of the axes rectangle as a Bbox' if original: return self._originalPosition.frozen() else: return self._position.frozen() def set_position(self, pos, which='both'): """ Set the axes position with:: pos = [left, bottom, width, height] in relative 0,1 coords, or *pos* can be a :class:`~matplotlib.transforms.Bbox` There are two position variables: one which is ultimately used, but which may be modified by :meth:`apply_aspect`, and a second which is the starting point for :meth:`apply_aspect`. Optional keyword arguments: *which* ========== ==================== value description ========== ==================== 'active' to change the first 'original' to change the second 'both' to change both ========== ==================== """ if not isinstance(pos, mtransforms.BboxBase): pos = mtransforms.Bbox.from_bounds(*pos) if which in ('both', 'active'): self._position.set(pos) if which in ('both', 'original'): self._originalPosition.set(pos) def reset_position(self): 'Make the original position the active position' pos = self.get_position(original=True) self.set_position(pos, which='active') def _set_artist_props(self, a): 'set the boilerplate props for artists added to axes' a.set_figure(self.figure) if not a.is_transform_set(): a.set_transform(self.transData) a.set_axes(self) def _gen_axes_patch(self): """ Returns the patch used to draw the background of the axes. It is also used as the clipping path for any data elements on the axes. In the standard axes, this is a rectangle, but in other projections it may not be. .. note:: Intended to be overridden by new projection types. """ return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0) def cla(self): 'Clear the current axes' # Note: this is called by Axes.__init__() self.xaxis.cla() self.yaxis.cla() self.ignore_existing_data_limits = True self.callbacks = cbook.CallbackRegistry(('xlim_changed', 'ylim_changed')) if self._sharex is not None: # major and minor are class instances with # locator and formatter attributes self.xaxis.major = self._sharex.xaxis.major self.xaxis.minor = self._sharex.xaxis.minor x0, x1 = self._sharex.get_xlim() self.set_xlim(x0, x1, emit=False) self.xaxis.set_scale(self._sharex.xaxis.get_scale()) else: self.xaxis.set_scale('linear') if self._sharey is not None: self.yaxis.major = self._sharey.yaxis.major self.yaxis.minor = self._sharey.yaxis.minor y0, y1 = self._sharey.get_ylim() self.set_ylim(y0, y1, emit=False) self.yaxis.set_scale(self._sharey.yaxis.get_scale()) else: self.yaxis.set_scale('linear') self._autoscaleon = True self._update_transScale() # needed? self._get_lines = _process_plot_var_args(self) self._get_patches_for_fill = _process_plot_var_args(self, 'fill') self._gridOn = rcParams['axes.grid'] self.lines = [] self.patches = [] self.texts = [] self.tables = [] self.artists = [] self.images = [] self.legend_ = None self.collections = [] # collection.Collection instances self.grid(self._gridOn) props = font_manager.FontProperties(size=rcParams['axes.titlesize']) self.titleOffsetTrans = mtransforms.ScaledTranslation( 0.0, 5.0 / 72.0, self.figure.dpi_scale_trans) self.title = mtext.Text( x=0.5, y=1.0, text='', fontproperties=props, verticalalignment='bottom', horizontalalignment='center', ) self.title.set_transform(self.transAxes + self.titleOffsetTrans) self.title.set_clip_box(None) self._set_artist_props(self.title) # the patch draws the background of the axes. we want this to # be below the other artists; the axesPatch name is # deprecated. We use the frame to draw the edges so we are # setting the edgecolor to None self.patch = self.axesPatch = self._gen_axes_patch() self.patch.set_figure(self.figure) self.patch.set_facecolor(self._axisbg) self.patch.set_edgecolor('None') self.patch.set_linewidth(0) self.patch.set_transform(self.transAxes) # the frame draws the border around the axes and we want this # above. this is a place holder for a more sophisticated # artist that might just draw a left, bottom frame, or a # centered frame, etc the axesFrame name is deprecated self.frame = self.axesFrame = self._gen_axes_patch() self.frame.set_figure(self.figure) self.frame.set_facecolor('none') self.frame.set_edgecolor(rcParams['axes.edgecolor']) self.frame.set_linewidth(rcParams['axes.linewidth']) self.frame.set_transform(self.transAxes) self.frame.set_zorder(2.5) self.axison = True self.xaxis.set_clip_path(self.patch) self.yaxis.set_clip_path(self.patch) self._shared_x_axes.clean() self._shared_y_axes.clean() def clear(self): 'clear the axes' self.cla() def set_color_cycle(self, clist): """ Set the color cycle for any future plot commands on this Axes. clist is a list of mpl color specifiers. """ self._get_lines.set_color_cycle(clist) def ishold(self): 'return the HOLD status of the axes' return self._hold def hold(self, b=None): """ call signature:: hold(b=None) Set the hold state. If *hold* is *None* (default), toggle the *hold* state. Else set the *hold* state to boolean value *b*. Examples: * toggle hold: >>> hold() * turn hold on: >>> hold(True) * turn hold off >>> hold(False) When hold is True, subsequent plot commands will be added to the current axes. When hold is False, the current axes and figure will be cleared on the next plot command """ if b is None: self._hold = not self._hold else: self._hold = b def get_aspect(self): return self._aspect def set_aspect(self, aspect, adjustable=None, anchor=None): """ *aspect* ======== ================================================ value description ======== ================================================ 'auto' automatic; fill position rectangle with data 'normal' same as 'auto'; deprecated 'equal' same scaling from data to plot units for x and y num a circle will be stretched such that the height is num times the width. aspect=1 is the same as aspect='equal'. ======== ================================================ *adjustable* ========= ============================ value description ========= ============================ 'box' change physical size of axes 'datalim' change xlim or ylim ========= ============================ *anchor* ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== """ if aspect in ('normal', 'auto'): self._aspect = 'auto' elif aspect == 'equal': self._aspect = 'equal' else: self._aspect = float(aspect) # raise ValueError if necessary if adjustable is not None: self.set_adjustable(adjustable) if anchor is not None: self.set_anchor(anchor) def get_adjustable(self): return self._adjustable def set_adjustable(self, adjustable): """ ACCEPTS: [ 'box' | 'datalim' ] """ if adjustable in ('box', 'datalim'): if self in self._shared_x_axes or self in self._shared_y_axes: if adjustable == 'box': raise ValueError( 'adjustable must be "datalim" for shared axes') self._adjustable = adjustable else: raise ValueError('argument must be "box", or "datalim"') def get_anchor(self): return self._anchor def set_anchor(self, anchor): """ *anchor* ===== ============ value description ===== ============ 'C' Center 'SW' bottom left 'S' bottom 'SE' bottom right 'E' right 'NE' top right 'N' top 'NW' top left 'W' left ===== ============ """ if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2: self._anchor = anchor else: raise ValueError('argument must be among %s' % ', '.join(mtransforms.BBox.coefs.keys())) def get_data_ratio(self): """ Returns the aspect ratio of the raw data. This method is intended to be overridden by new projection types. """ xmin,xmax = self.get_xbound() xsize = max(math.fabs(xmax-xmin), 1e-30) ymin,ymax = self.get_ybound() ysize = max(math.fabs(ymax-ymin), 1e-30) return ysize/xsize def apply_aspect(self, position=None): ''' Use :meth:`_aspect` and :meth:`_adjustable` to modify the axes box or the view limits. ''' if position is None: position = self.get_position(original=True) aspect = self.get_aspect() if aspect == 'auto': self.set_position( position , which='active') return if aspect == 'equal': A = 1 else: A = aspect #Ensure at drawing time that any Axes involved in axis-sharing # does not have its position changed. if self in self._shared_x_axes or self in self._shared_y_axes: if self._adjustable == 'box': self._adjustable = 'datalim' warnings.warn( 'shared axes: "adjustable" is being changed to "datalim"') figW,figH = self.get_figure().get_size_inches() fig_aspect = figH/figW if self._adjustable == 'box': box_aspect = A * self.get_data_ratio() pb = position.frozen() pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect) self.set_position(pb1.anchored(self.get_anchor(), pb), 'active') return # reset active to original in case it had been changed # by prior use of 'box' self.set_position(position, which='active') xmin,xmax = self.get_xbound() xsize = max(math.fabs(xmax-xmin), 1e-30) ymin,ymax = self.get_ybound() ysize = max(math.fabs(ymax-ymin), 1e-30) l,b,w,h = position.bounds box_aspect = fig_aspect * (h/w) data_ratio = box_aspect / A y_expander = (data_ratio*xsize/ysize - 1.0) #print 'y_expander', y_expander # If y_expander > 0, the dy/dx viewLim ratio needs to increase if abs(y_expander) < 0.005: #print 'good enough already' return dL = self.dataLim xr = 1.05 * dL.width yr = 1.05 * dL.height xmarg = xsize - xr ymarg = ysize - yr Ysize = data_ratio * xsize Xsize = ysize / data_ratio Xmarg = Xsize - xr Ymarg = Ysize - yr xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help. ym = 0 #print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax #print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize changex = (self in self._shared_y_axes and self not in self._shared_x_axes) changey = (self in self._shared_x_axes and self not in self._shared_y_axes) if changex and changey: warnings.warn("adjustable='datalim' cannot work with shared " "x and y axes") return if changex: adjust_y = False else: #print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg if xmarg > xm and ymarg > ym: adjy = ((Ymarg > 0 and y_expander < 0) or (Xmarg < 0 and y_expander > 0)) else: adjy = y_expander > 0 #print 'y_expander, adjy', y_expander, adjy adjust_y = changey or adjy #(Ymarg > xmarg) if adjust_y: yc = 0.5*(ymin+ymax) y0 = yc - Ysize/2.0 y1 = yc + Ysize/2.0 self.set_ybound((y0, y1)) #print 'New y0, y1:', y0, y1 #print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize else: xc = 0.5*(xmin+xmax) x0 = xc - Xsize/2.0 x1 = xc + Xsize/2.0 self.set_xbound((x0, x1)) #print 'New x0, x1:', x0, x1 #print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0) def axis(self, *v, **kwargs): ''' Convenience method for manipulating the x and y view limits and the aspect ratio of the plot. *kwargs* are passed on to :meth:`set_xlim` and :meth:`set_ylim` ''' if len(v)==1 and is_string_like(v[0]): s = v[0].lower() if s=='on': self.set_axis_on() elif s=='off': self.set_axis_off() elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'): self.set_autoscale_on(True) self.set_aspect('auto') self.autoscale_view() # self.apply_aspect() if s=='equal': self.set_aspect('equal', adjustable='datalim') elif s == 'scaled': self.set_aspect('equal', adjustable='box', anchor='C') self.set_autoscale_on(False) # Req. by Mark Bakker elif s=='tight': self.autoscale_view(tight=True) self.set_autoscale_on(False) elif s == 'image': self.autoscale_view(tight=True) self.set_autoscale_on(False) self.set_aspect('equal', adjustable='box', anchor='C') else: raise ValueError('Unrecognized string %s to axis; ' 'try on or off' % s) xmin, xmax = self.get_xlim() ymin, ymax = self.get_ylim() return xmin, xmax, ymin, ymax try: v[0] except IndexError: emit = kwargs.get('emit', True) xmin = kwargs.get('xmin', None) xmax = kwargs.get('xmax', None) xmin, xmax = self.set_xlim(xmin, xmax, emit) ymin = kwargs.get('ymin', None) ymax = kwargs.get('ymax', None) ymin, ymax = self.set_ylim(ymin, ymax, emit) return xmin, xmax, ymin, ymax v = v[0] if len(v) != 4: raise ValueError('v must contain [xmin xmax ymin ymax]') self.set_xlim([v[0], v[1]]) self.set_ylim([v[2], v[3]]) return v def get_child_artists(self): """ Return a list of artists the axes contains. .. deprecated:: 0.98 """ raise DeprecationWarning('Use get_children instead') def get_frame(self): 'Return the axes Rectangle frame' warnings.warn('use ax.patch instead', DeprecationWarning) return self.patch def get_legend(self): 'Return the legend.Legend instance, or None if no legend is defined' return self.legend_ def get_images(self): 'return a list of Axes images contained by the Axes' return cbook.silent_list('AxesImage', self.images) def get_lines(self): 'Return a list of lines contained by the Axes' return cbook.silent_list('Line2D', self.lines) def get_xaxis(self): 'Return the XAxis instance' return self.xaxis def get_xgridlines(self): 'Get the x grid lines as a list of Line2D instances' return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines()) def get_xticklines(self): 'Get the xtick lines as a list of Line2D instances' return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines()) def get_yaxis(self): 'Return the YAxis instance' return self.yaxis def get_ygridlines(self): 'Get the y grid lines as a list of Line2D instances' return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines()) def get_yticklines(self): 'Get the ytick lines as a list of Line2D instances' return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines()) #### Adding and tracking artists def has_data(self): '''Return *True* if any artists have been added to axes. This should not be used to determine whether the *dataLim* need to be updated, and may not actually be useful for anything. ''' return ( len(self.collections) + len(self.images) + len(self.lines) + len(self.patches))>0 def add_artist(self, a): 'Add any :class:`~matplotlib.artist.Artist` to the axes' a.set_axes(self) self.artists.append(a) self._set_artist_props(a) a.set_clip_path(self.patch) a._remove_method = lambda h: self.artists.remove(h) def add_collection(self, collection, autolim=True): ''' add a :class:`~matplotlib.collections.Collection` instance to the axes ''' label = collection.get_label() if not label: collection.set_label('collection%d'%len(self.collections)) self.collections.append(collection) self._set_artist_props(collection) collection.set_clip_path(self.patch) if autolim: if collection._paths and len(collection._paths): self.update_datalim(collection.get_datalim(self.transData)) collection._remove_method = lambda h: self.collections.remove(h) def add_line(self, line): ''' Add a :class:`~matplotlib.lines.Line2D` to the list of plot lines ''' self._set_artist_props(line) line.set_clip_path(self.patch) self._update_line_limits(line) if not line.get_label(): line.set_label('_line%d'%len(self.lines)) self.lines.append(line) line._remove_method = lambda h: self.lines.remove(h) def _update_line_limits(self, line): p = line.get_path() if p.vertices.size > 0: self.dataLim.update_from_path(p, self.ignore_existing_data_limits, updatex=line.x_isdata, updatey=line.y_isdata) self.ignore_existing_data_limits = False def add_patch(self, p): """ Add a :class:`~matplotlib.patches.Patch` *p* to the list of axes patches; the clipbox will be set to the Axes clipping box. If the transform is not set, it will be set to :attr:`transData`. """ self._set_artist_props(p) p.set_clip_path(self.patch) self._update_patch_limits(p) self.patches.append(p) p._remove_method = lambda h: self.patches.remove(h) def _update_patch_limits(self, patch): 'update the data limits for patch *p*' # hist can add zero height Rectangles, which is useful to keep # the bins, counts and patches lined up, but it throws off log # scaling. We'll ignore rects with zero height or width in # the auto-scaling if (isinstance(patch, mpatches.Rectangle) and (patch.get_width()==0 or patch.get_height()==0)): return vertices = patch.get_path().vertices if vertices.size > 0: xys = patch.get_patch_transform().transform(vertices) if patch.get_data_transform() != self.transData: transform = (patch.get_data_transform() + self.transData.inverted()) xys = transform.transform(xys) self.update_datalim(xys, updatex=patch.x_isdata, updatey=patch.y_isdata) def add_table(self, tab): ''' Add a :class:`~matplotlib.tables.Table` instance to the list of axes tables ''' self._set_artist_props(tab) self.tables.append(tab) tab.set_clip_path(self.patch) tab._remove_method = lambda h: self.tables.remove(h) def relim(self): 'recompute the data limits based on current artists' # Collections are deliberately not supported (yet); see # the TODO note in artists.py. self.dataLim.ignore(True) self.ignore_existing_data_limits = True for line in self.lines: self._update_line_limits(line) for p in self.patches: self._update_patch_limits(p) def update_datalim(self, xys, updatex=True, updatey=True): 'Update the data lim bbox with seq of xy tups or equiv. 2-D array' # if no data is set currently, the bbox will ignore its # limits and set the bound to be the bounds of the xydata. # Otherwise, it will compute the bounds of it's current data # and the data in xydata if iterable(xys) and not len(xys): return if not ma.isMaskedArray(xys): xys = np.asarray(xys) self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits, updatex=updatex, updatey=updatey) self.ignore_existing_data_limits = False def update_datalim_numerix(self, x, y): 'Update the data lim bbox with seq of xy tups' # if no data is set currently, the bbox will ignore it's # limits and set the bound to be the bounds of the xydata. # Otherwise, it will compute the bounds of it's current data # and the data in xydata if iterable(x) and not len(x): return self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits) self.ignore_existing_data_limits = False def update_datalim_bounds(self, bounds): ''' Update the datalim to include the given :class:`~matplotlib.transforms.Bbox` *bounds* ''' self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds])) def _process_unit_info(self, xdata=None, ydata=None, kwargs=None): 'look for unit *kwargs* and update the axis instances as necessary' if self.xaxis is None or self.yaxis is None: return #print 'processing', self.get_geometry() if xdata is not None: # we only need to update if there is nothing set yet. if not self.xaxis.have_units(): self.xaxis.update_units(xdata) #print '\tset from xdata', self.xaxis.units if ydata is not None: # we only need to update if there is nothing set yet. if not self.yaxis.have_units(): self.yaxis.update_units(ydata) #print '\tset from ydata', self.yaxis.units # process kwargs 2nd since these will override default units if kwargs is not None: xunits = kwargs.pop( 'xunits', self.xaxis.units) if xunits!=self.xaxis.units: #print '\tkw setting xunits', xunits self.xaxis.set_units(xunits) # If the units being set imply a different converter, # we need to update. if xdata is not None: self.xaxis.update_units(xdata) yunits = kwargs.pop('yunits', self.yaxis.units) if yunits!=self.yaxis.units: #print '\tkw setting yunits', yunits self.yaxis.set_units(yunits) # If the units being set imply a different converter, # we need to update. if ydata is not None: self.yaxis.update_units(ydata) def in_axes(self, mouseevent): ''' return *True* if the given *mouseevent* (in display coords) is in the Axes ''' return self.patch.contains(mouseevent)[0] def get_autoscale_on(self): """ Get whether autoscaling is applied on plot commands """ return self._autoscaleon def set_autoscale_on(self, b): """ Set whether autoscaling is applied on plot commands accepts: [ *True* | *False* ] """ self._autoscaleon = b def autoscale_view(self, tight=False, scalex=True, scaley=True): """ autoscale the view limits using the data limits. You can selectively autoscale only a single axis, eg, the xaxis by setting *scaley* to *False*. The autoscaling preserves any axis direction reversal that has already been done. """ # if image data only just use the datalim if not self._autoscaleon: return if scalex: xshared = self._shared_x_axes.get_siblings(self) dl = [ax.dataLim for ax in xshared] bb = mtransforms.BboxBase.union(dl) x0, x1 = bb.intervalx if scaley: yshared = self._shared_y_axes.get_siblings(self) dl = [ax.dataLim for ax in yshared] bb = mtransforms.BboxBase.union(dl) y0, y1 = bb.intervaly if (tight or (len(self.images)>0 and len(self.lines)==0 and len(self.patches)==0)): if scalex: self.set_xbound(x0, x1) if scaley: self.set_ybound(y0, y1) return if scalex: XL = self.xaxis.get_major_locator().view_limits(x0, x1) self.set_xbound(XL) if scaley: YL = self.yaxis.get_major_locator().view_limits(y0, y1) self.set_ybound(YL) #### Drawing def draw(self, renderer=None, inframe=False): "Draw everything (plot lines, axes, labels)" if renderer is None: renderer = self._cachedRenderer if renderer is None: raise RuntimeError('No renderer defined') if not self.get_visible(): return renderer.open_group('axes') self.apply_aspect() # the patch draws the background rectangle -- the frame below # will draw the edges if self.axison and self._frameon: self.patch.draw(renderer) artists = [] if len(self.images)<=1 or renderer.option_image_nocomposite(): for im in self.images: im.draw(renderer) else: # make a composite image blending alpha # list of (mimage.Image, ox, oy) mag = renderer.get_image_magnification() ims = [(im.make_image(mag),0,0) for im in self.images if im.get_visible()] l, b, r, t = self.bbox.extents width = mag*((round(r) + 0.5) - (round(l) - 0.5)) height = mag*((round(t) + 0.5) - (round(b) - 0.5)) im = mimage.from_images(height, width, ims) im.is_grayscale = False l, b, w, h = self.bbox.bounds # composite images need special args so they will not # respect z-order for now renderer.draw_image( round(l), round(b), im, self.bbox, self.patch.get_path(), self.patch.get_transform()) artists.extend(self.collections) artists.extend(self.patches) artists.extend(self.lines) artists.extend(self.texts) artists.extend(self.artists) if self.axison and not inframe: if self._axisbelow: self.xaxis.set_zorder(0.5) self.yaxis.set_zorder(0.5) else: self.xaxis.set_zorder(2.5) self.yaxis.set_zorder(2.5) artists.extend([self.xaxis, self.yaxis]) if not inframe: artists.append(self.title) artists.extend(self.tables) if self.legend_ is not None: artists.append(self.legend_) # the frame draws the edges around the axes patch -- we # decouple these so the patch can be in the background and the # frame in the foreground. if self.axison and self._frameon: artists.append(self.frame) dsu = [ (a.zorder, i, a) for i, a in enumerate(artists) if not a.get_animated() ] dsu.sort() for zorder, i, a in dsu: a.draw(renderer) renderer.close_group('axes') self._cachedRenderer = renderer def draw_artist(self, a): """ This method can only be used after an initial draw which caches the renderer. It is used to efficiently update Axes data (axis ticks, labels, etc are not updated) """ assert self._cachedRenderer is not None a.draw(self._cachedRenderer) def redraw_in_frame(self): """ This method can only be used after an initial draw which caches the renderer. It is used to efficiently update Axes data (axis ticks, labels, etc are not updated) """ assert self._cachedRenderer is not None self.draw(self._cachedRenderer, inframe=True) def get_renderer_cache(self): return self._cachedRenderer def __draw_animate(self): # ignore for now; broken if self._lastRenderer is None: raise RuntimeError('You must first call ax.draw()') dsu = [(a.zorder, a) for a in self.animated.keys()] dsu.sort() renderer = self._lastRenderer renderer.blit() for tmp, a in dsu: a.draw(renderer) #### Axes rectangle characteristics def get_frame_on(self): """ Get whether the axes rectangle patch is drawn """ return self._frameon def set_frame_on(self, b): """ Set whether the axes rectangle patch is drawn ACCEPTS: [ *True* | *False* ] """ self._frameon = b def get_axisbelow(self): """ Get whether axis below is true or not """ return self._axisbelow def set_axisbelow(self, b): """ Set whether the axis ticks and gridlines are above or below most artists ACCEPTS: [ *True* | *False* ] """ self._axisbelow = b def grid(self, b=None, **kwargs): """ call signature:: grid(self, b=None, **kwargs) Set the axes grids on or off; *b* is a boolean If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If *kwargs* are supplied, it is assumed that you want a grid and *b* is thus set to *True* *kawrgs* are used to set the grid line properties, eg:: ax.grid(color='r', linestyle='-', linewidth=2) Valid :class:`~matplotlib.lines.Line2D` kwargs are %(Line2D)s """ if len(kwargs): b = True self.xaxis.grid(b, **kwargs) self.yaxis.grid(b, **kwargs) grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd def ticklabel_format(self, **kwargs): """ Convenience method for manipulating the ScalarFormatter used by default for linear axes. Optional keyword arguments: ============ ===================================== Keyword Description ============ ===================================== *style* [ 'sci' (or 'scientific') | 'plain' ] plain turns off scientific notation *scilimits* (m, n), pair of integers; if *style* is 'sci', scientific notation will be used for numbers outside the range 10`-m`:sup: to 10`n`:sup:. Use (0,0) to include all numbers. *axis* [ 'x' | 'y' | 'both' ] ============ ===================================== Only the major ticks are affected. If the method is called when the :class:`~matplotlib.ticker.ScalarFormatter` is not the :class:`~matplotlib.ticker.Formatter` being used, an :exc:`AttributeError` will be raised. """ style = kwargs.pop('style', '').lower() scilimits = kwargs.pop('scilimits', None) if scilimits is not None: try: m, n = scilimits m+n+1 # check that both are numbers except (ValueError, TypeError): raise ValueError("scilimits must be a sequence of 2 integers") axis = kwargs.pop('axis', 'both').lower() if style[:3] == 'sci': sb = True elif style in ['plain', 'comma']: sb = False if style == 'plain': cb = False else: cb = True raise NotImplementedError, "comma style remains to be added" elif style == '': sb = None else: raise ValueError, "%s is not a valid style value" try: if sb is not None: if axis == 'both' or axis == 'x': self.xaxis.major.formatter.set_scientific(sb) if axis == 'both' or axis == 'y': self.yaxis.major.formatter.set_scientific(sb) if scilimits is not None: if axis == 'both' or axis == 'x': self.xaxis.major.formatter.set_powerlimits(scilimits) if axis == 'both' or axis == 'y': self.yaxis.major.formatter.set_powerlimits(scilimits) except AttributeError: raise AttributeError( "This method only works with the ScalarFormatter.") def set_axis_off(self): """turn off the axis""" self.axison = False def set_axis_on(self): """turn on the axis""" self.axison = True def get_axis_bgcolor(self): 'Return the axis background color' return self._axisbg def set_axis_bgcolor(self, color): """ set the axes background color ACCEPTS: any matplotlib color - see :func:`~matplotlib.pyplot.colors` """ self._axisbg = color self.patch.set_facecolor(color) ### data limits, ticks, tick labels, and formatting def invert_xaxis(self): "Invert the x-axis." left, right = self.get_xlim() self.set_xlim(right, left) def xaxis_inverted(self): 'Returns True if the x-axis is inverted.' left, right = self.get_xlim() return right < left def get_xbound(self): """ Returns the x-axis numerical bounds where:: lowerBound < upperBound """ left, right = self.get_xlim() if left < right: return left, right else: return right, left def set_xbound(self, lower=None, upper=None): """ Set the lower and upper numerical bounds of the x-axis. This method will honor axes inversion regardless of parameter order. """ if upper is None and iterable(lower): lower,upper = lower old_lower,old_upper = self.get_xbound() if lower is None: lower = old_lower if upper is None: upper = old_upper if self.xaxis_inverted(): if lower < upper: self.set_xlim(upper, lower) else: self.set_xlim(lower, upper) else: if lower < upper: self.set_xlim(lower, upper) else: self.set_xlim(upper, lower) def get_xlim(self): """ Get the x-axis range [*xmin*, *xmax*] """ return tuple(self.viewLim.intervalx) def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs): """ call signature:: set_xlim(self, *args, **kwargs) Set the limits for the xaxis Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*] Examples:: set_xlim((valmin, valmax)) set_xlim(valmin, valmax) set_xlim(xmin=1) # xmax unchanged set_xlim(xmax=1) # xmin unchanged Keyword arguments: *ymin*: scalar the min of the ylim *ymax*: scalar the max of the ylim *emit*: [ True | False ] notify observers of lim change ACCEPTS: len(2) sequence of floats """ if xmax is None and iterable(xmin): xmin,xmax = xmin self._process_unit_info(xdata=(xmin, xmax)) if xmin is not None: xmin = self.convert_xunits(xmin) if xmax is not None: xmax = self.convert_xunits(xmax) old_xmin,old_xmax = self.get_xlim() if xmin is None: xmin = old_xmin if xmax is None: xmax = old_xmax xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False) xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax) self.viewLim.intervalx = (xmin, xmax) if emit: self.callbacks.process('xlim_changed', self) # Call all of the other x-axes that are shared with this one for other in self._shared_x_axes.get_siblings(self): if other is not self: other.set_xlim(self.viewLim.intervalx, emit=False) if (other.figure != self.figure and other.figure.canvas is not None): other.figure.canvas.draw_idle() return xmin, xmax def get_xscale(self): 'return the xaxis scale string: %s' % ( ", ".join(mscale.get_scale_names())) return self.xaxis.get_scale() def set_xscale(self, value, **kwargs): """ call signature:: set_xscale(value) Set the scaling of the x-axis: %(scale)s ACCEPTS: [%(scale)s] Different kwargs are accepted, depending on the scale: %(scale_docs)s """ self.xaxis.set_scale(value, **kwargs) self.autoscale_view() self._update_transScale() set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % { 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]), 'scale_docs': mscale.get_scale_docs().strip()} def get_xticks(self, minor=False): 'Return the x ticks as a list of locations' return self.xaxis.get_ticklocs(minor=minor) def set_xticks(self, ticks, minor=False): """ Set the x ticks with list of *ticks* ACCEPTS: sequence of floats """ return self.xaxis.set_ticks(ticks, minor=minor) def get_xmajorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_majorticklabels()) def get_xminorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_minorticklabels()) def get_xticklabels(self, minor=False): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_ticklabels(minor=minor)) def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs): """ call signature:: set_xticklabels(labels, fontdict=None, minor=False, **kwargs) Set the xtick labels with list of strings *labels*. Return a list of axis text instances. *kwargs* set the :class:`~matplotlib.text.Text` properties. Valid properties are %(Text)s ACCEPTS: sequence of strings """ return self.xaxis.set_ticklabels(labels, fontdict, minor=minor, **kwargs) set_xticklabels.__doc__ = cbook.dedent( set_xticklabels.__doc__) % martist.kwdocd def invert_yaxis(self): "Invert the y-axis." left, right = self.get_ylim() self.set_ylim(right, left) def yaxis_inverted(self): 'Returns True if the y-axis is inverted.' left, right = self.get_ylim() return right < left def get_ybound(self): "Return y-axis numerical bounds in the form of lowerBound < upperBound" left, right = self.get_ylim() if left < right: return left, right else: return right, left def set_ybound(self, lower=None, upper=None): """Set the lower and upper numerical bounds of the y-axis. This method will honor axes inversion regardless of parameter order. """ if upper is None and iterable(lower): lower,upper = lower old_lower,old_upper = self.get_ybound() if lower is None: lower = old_lower if upper is None: upper = old_upper if self.yaxis_inverted(): if lower < upper: self.set_ylim(upper, lower) else: self.set_ylim(lower, upper) else: if lower < upper: self.set_ylim(lower, upper) else: self.set_ylim(upper, lower) def get_ylim(self): """ Get the y-axis range [*ymin*, *ymax*] """ return tuple(self.viewLim.intervaly) def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs): """ call signature:: set_ylim(self, *args, **kwargs): Set the limits for the yaxis; v = [ymin, ymax]:: set_ylim((valmin, valmax)) set_ylim(valmin, valmax) set_ylim(ymin=1) # ymax unchanged set_ylim(ymax=1) # ymin unchanged Keyword arguments: *ymin*: scalar the min of the ylim *ymax*: scalar the max of the ylim *emit*: [ True | False ] notify observers of lim change Returns the current ylimits as a length 2 tuple ACCEPTS: len(2) sequence of floats """ if ymax is None and iterable(ymin): ymin,ymax = ymin if ymin is not None: ymin = self.convert_yunits(ymin) if ymax is not None: ymax = self.convert_yunits(ymax) old_ymin,old_ymax = self.get_ylim() if ymin is None: ymin = old_ymin if ymax is None: ymax = old_ymax ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False) ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax) self.viewLim.intervaly = (ymin, ymax) if emit: self.callbacks.process('ylim_changed', self) # Call all of the other y-axes that are shared with this one for other in self._shared_y_axes.get_siblings(self): if other is not self: other.set_ylim(self.viewLim.intervaly, emit=False) if (other.figure != self.figure and other.figure.canvas is not None): other.figure.canvas.draw_idle() return ymin, ymax def get_yscale(self): 'return the xaxis scale string: %s' % ( ", ".join(mscale.get_scale_names())) return self.yaxis.get_scale() def set_yscale(self, value, **kwargs): """ call signature:: set_yscale(value) Set the scaling of the y-axis: %(scale)s ACCEPTS: [%(scale)s] Different kwargs are accepted, depending on the scale: %(scale_docs)s """ self.yaxis.set_scale(value, **kwargs) self.autoscale_view() self._update_transScale() set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % { 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]), 'scale_docs': mscale.get_scale_docs().strip()} def get_yticks(self, minor=False): 'Return the y ticks as a list of locations' return self.yaxis.get_ticklocs(minor=minor) def set_yticks(self, ticks, minor=False): """ Set the y ticks with list of *ticks* ACCEPTS: sequence of floats Keyword arguments: *minor*: [ False | True ] Sets the minor ticks if True """ return self.yaxis.set_ticks(ticks, minor=minor) def get_ymajorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_majorticklabels()) def get_yminorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_minorticklabels()) def get_yticklabels(self, minor=False): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_ticklabels(minor=minor)) def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs): """ call signature:: set_yticklabels(labels, fontdict=None, minor=False, **kwargs) Set the ytick labels with list of strings *labels*. Return a list of :class:`~matplotlib.text.Text` instances. *kwargs* set :class:`~matplotlib.text.Text` properties for the labels. Valid properties are %(Text)s ACCEPTS: sequence of strings """ return self.yaxis.set_ticklabels(labels, fontdict, minor=minor, **kwargs) set_yticklabels.__doc__ = cbook.dedent( set_yticklabels.__doc__) % martist.kwdocd def xaxis_date(self, tz=None): """Sets up x-axis ticks and labels that treat the x data as dates. *tz* is the time zone to use in labeling dates. Defaults to rc value. """ xmin, xmax = self.dataLim.intervalx if xmin==0.: # no data has been added - let's set the default datalim. # We should probably use a better proxy for the datalim # have been updated than the ignore setting dmax = today = datetime.date.today() dmin = today-datetime.timedelta(days=10) self._process_unit_info(xdata=(dmin, dmax)) dmin, dmax = self.convert_xunits([dmin, dmax]) self.viewLim.intervalx = dmin, dmax self.dataLim.intervalx = dmin, dmax locator = self.xaxis.get_major_locator() if not isinstance(locator, mdates.DateLocator): locator = mdates.AutoDateLocator(tz) self.xaxis.set_major_locator(locator) # the autolocator uses the viewlim to pick the right date # locator, but it may not have correct viewlim before an # autoscale. If the viewlim is still zero..1, set it to the # datalim and the autoscaler will update it on request if self.viewLim.intervalx[0]==0.: self.viewLim.intervalx = tuple(self.dataLim.intervalx) locator.refresh() formatter = self.xaxis.get_major_formatter() if not isinstance(formatter, mdates.DateFormatter): formatter = mdates.AutoDateFormatter(locator, tz) self.xaxis.set_major_formatter(formatter) def yaxis_date(self, tz=None): """Sets up y-axis ticks and labels that treat the y data as dates. *tz* is the time zone to use in labeling dates. Defaults to rc value. """ ymin, ymax = self.dataLim.intervaly if ymin==0.: # no data has been added - let's set the default datalim. # We should probably use a better proxy for the datalim # have been updated than the ignore setting dmax = today = datetime.date.today() dmin = today-datetime.timedelta(days=10) self._process_unit_info(ydata=(dmin, dmax)) dmin, dmax = self.convert_yunits([dmin, dmax]) self.viewLim.intervaly = dmin, dmax self.dataLim.intervaly = dmin, dmax locator = self.yaxis.get_major_locator() if not isinstance(locator, mdates.DateLocator): locator = mdates.AutoDateLocator(tz) self.yaxis.set_major_locator(locator) # the autolocator uses the viewlim to pick the right date # locator, but it may not have correct viewlim before an # autoscale. If the viewlim is still zero..1, set it to the # datalim and the autoscaler will update it on request if self.viewLim.intervaly[0]==0.: self.viewLim.intervaly = tuple(self.dataLim.intervaly) locator.refresh() formatter = self.xaxis.get_major_formatter() if not isinstance(formatter, mdates.DateFormatter): formatter = mdates.AutoDateFormatter(locator, tz) self.yaxis.set_major_formatter(formatter) def format_xdata(self, x): """ Return *x* string formatted. This function will use the attribute self.fmt_xdata if it is callable, else will fall back on the xaxis major formatter """ try: return self.fmt_xdata(x) except TypeError: func = self.xaxis.get_major_formatter().format_data_short val = func(x) return val def format_ydata(self, y): """ Return y string formatted. This function will use the :attr:`fmt_ydata` attribute if it is callable, else will fall back on the yaxis major formatter """ try: return self.fmt_ydata(y) except TypeError: func = self.yaxis.get_major_formatter().format_data_short val = func(y) return val def format_coord(self, x, y): 'return a format string formatting the *x*, *y* coord' if x is None: x = '???' if y is None: y = '???' xs = self.format_xdata(x) ys = self.format_ydata(y) return 'x=%s, y=%s'%(xs,ys) #### Interactive manipulation def can_zoom(self): """ Return *True* if this axes support the zoom box """ return True def get_navigate(self): """ Get whether the axes responds to navigation commands """ return self._navigate def set_navigate(self, b): """ Set whether the axes responds to navigation toolbar commands ACCEPTS: [ True | False ] """ self._navigate = b def get_navigate_mode(self): """ Get the navigation toolbar button status: 'PAN', 'ZOOM', or None """ return self._navigate_mode def set_navigate_mode(self, b): """ Set the navigation toolbar button status; .. warning:: this is not a user-API function. """ self._navigate_mode = b def start_pan(self, x, y, button): """ Called when a pan operation has started. *x*, *y* are the mouse coordinates in display coords. button is the mouse button number: * 1: LEFT * 2: MIDDLE * 3: RIGHT .. note:: Intended to be overridden by new projection types. """ self._pan_start = cbook.Bunch( lim = self.viewLim.frozen(), trans = self.transData.frozen(), trans_inverse = self.transData.inverted().frozen(), bbox = self.bbox.frozen(), x = x, y = y ) def end_pan(self): """ Called when a pan operation completes (when the mouse button is up.) .. note:: Intended to be overridden by new projection types. """ del self._pan_start def drag_pan(self, button, key, x, y): """ Called when the mouse moves during a pan operation. *button* is the mouse button number: * 1: LEFT * 2: MIDDLE * 3: RIGHT *key* is a "shift" key *x*, *y* are the mouse coordinates in display coords. .. note:: Intended to be overridden by new projection types. """ def format_deltas(key, dx, dy): if key=='control': if(abs(dx)>abs(dy)): dy = dx else: dx = dy elif key=='x': dy = 0 elif key=='y': dx = 0 elif key=='shift': if 2*abs(dx) < abs(dy): dx=0 elif 2*abs(dy) < abs(dx): dy=0 elif(abs(dx)>abs(dy)): dy=dy/abs(dy)*abs(dx) else: dx=dx/abs(dx)*abs(dy) return (dx,dy) p = self._pan_start dx = x - p.x dy = y - p.y if dx == 0 and dy == 0: return if button == 1: dx, dy = format_deltas(key, dx, dy) result = p.bbox.translated(-dx, -dy) \ .transformed(p.trans_inverse) elif button == 3: try: dx = -dx / float(self.bbox.width) dy = -dy / float(self.bbox.height) dx, dy = format_deltas(key, dx, dy) if self.get_aspect() != 'auto': dx = 0.5 * (dx + dy) dy = dx alpha = np.power(10.0, (dx, dy)) start = p.trans_inverse.transform_point((p.x, p.y)) lim_points = p.lim.get_points() result = start + alpha * (lim_points - start) result = mtransforms.Bbox(result) except OverflowError: warnings.warn('Overflow while panning') return self.set_xlim(*result.intervalx) self.set_ylim(*result.intervaly) def get_cursor_props(self): """ return the cursor propertiess as a (*linewidth*, *color*) tuple, where *linewidth* is a float and *color* is an RGBA tuple """ return self._cursorProps def set_cursor_props(self, *args): """ Set the cursor property as:: ax.set_cursor_props(linewidth, color) or:: ax.set_cursor_props((linewidth, color)) ACCEPTS: a (*float*, *color*) tuple """ if len(args)==1: lw, c = args[0] elif len(args)==2: lw, c = args else: raise ValueError('args must be a (linewidth, color) tuple') c =mcolors.colorConverter.to_rgba(c) self._cursorProps = lw, c def connect(self, s, func): """ Register observers to be notified when certain events occur. Register with callback functions with the following signatures. The function has the following signature:: func(ax) # where ax is the instance making the callback. The following events can be connected to: 'xlim_changed','ylim_changed' The connection id is is returned - you can use this with disconnect to disconnect from the axes event """ raise DeprecationWarning('use the callbacks CallbackRegistry instance ' 'instead') def disconnect(self, cid): 'disconnect from the Axes event.' raise DeprecationWarning('use the callbacks CallbackRegistry instance ' 'instead') def get_children(self): 'return a list of child artists' children = [] children.append(self.xaxis) children.append(self.yaxis) children.extend(self.lines) children.extend(self.patches) children.extend(self.texts) children.extend(self.tables) children.extend(self.artists) children.extend(self.images) if self.legend_ is not None: children.append(self.legend_) children.extend(self.collections) children.append(self.title) children.append(self.patch) children.append(self.frame) return children def contains(self,mouseevent): """Test whether the mouse event occured in the axes. Returns T/F, {} """ if callable(self._contains): return self._contains(self,mouseevent) return self.patch.contains(mouseevent) def pick(self, *args): """ call signature:: pick(mouseevent) each child artist will fire a pick event if mouseevent is over the artist and the artist has picker set """ if len(args)>1: raise DeprecationWarning('New pick API implemented -- ' 'see API_CHANGES in the src distribution') martist.Artist.pick(self,args[0]) def __pick(self, x, y, trans=None, among=None): """ Return the artist under point that is closest to the *x*, *y*. If *trans* is *None*, *x*, and *y* are in window coords, (0,0 = lower left). Otherwise, *trans* is a :class:`~matplotlib.transforms.Transform` that specifies the coordinate system of *x*, *y*. The selection of artists from amongst which the pick function finds an artist can be narrowed using the optional keyword argument *among*. If provided, this should be either a sequence of permitted artists or a function taking an artist as its argument and returning a true value if and only if that artist can be selected. Note this algorithm calculates distance to the vertices of the polygon, so if you want to pick a patch, click on the edge! """ # MGDTODO: Needs updating if trans is not None: xywin = trans.transform_point((x,y)) else: xywin = x,y def dist_points(p1, p2): 'return the distance between two points' x1, y1 = p1 x2, y2 = p2 return math.sqrt((x1-x2)**2+(y1-y2)**2) def dist_x_y(p1, x, y): '*x* and *y* are arrays; return the distance to the closest point' x1, y1 = p1 return min(np.sqrt((x-x1)**2+(y-y1)**2)) def dist(a): if isinstance(a, Text): bbox = a.get_window_extent() l,b,w,h = bbox.bounds verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b) xt, yt = zip(*verts) elif isinstance(a, Patch): path = a.get_path() tverts = a.get_transform().transform_path(path) xt, yt = zip(*tverts) elif isinstance(a, mlines.Line2D): xdata = a.get_xdata(orig=False) ydata = a.get_ydata(orig=False) xt, yt = a.get_transform().numerix_x_y(xdata, ydata) return dist_x_y(xywin, np.asarray(xt), np.asarray(yt)) artists = self.lines + self.patches + self.texts if callable(among): artists = filter(test, artists) elif iterable(among): amongd = dict([(k,1) for k in among]) artists = [a for a in artists if a in amongd] elif among is None: pass else: raise ValueError('among must be callable or iterable') if not len(artists): return None ds = [ (dist(a),a) for a in artists] ds.sort() return ds[0][1] #### Labelling def get_title(self): """ Get the title text string. """ return self.title.get_text() def set_title(self, label, fontdict=None, **kwargs): """ call signature:: set_title(label, fontdict=None, **kwargs): Set the title for the axes. kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ default = { 'fontsize':rcParams['axes.titlesize'], 'verticalalignment' : 'bottom', 'horizontalalignment' : 'center' } self.title.set_text(label) self.title.update(default) if fontdict is not None: self.title.update(fontdict) self.title.update(kwargs) return self.title set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd def get_xlabel(self): """ Get the xlabel text string. """ label = self.xaxis.get_label() return label.get_text() def set_xlabel(self, xlabel, fontdict=None, **kwargs): """ call signature:: set_xlabel(xlabel, fontdict=None, **kwargs) Set the label for the xaxis. Valid kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ label = self.xaxis.get_label() label.set_text(xlabel) if fontdict is not None: label.update(fontdict) label.update(kwargs) return label set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd def get_ylabel(self): """ Get the ylabel text string. """ label = self.yaxis.get_label() return label.get_text() def set_ylabel(self, ylabel, fontdict=None, **kwargs): """ call signature:: set_ylabel(ylabel, fontdict=None, **kwargs) Set the label for the yaxis Valid kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ label = self.yaxis.get_label() label.set_text(ylabel) if fontdict is not None: label.update(fontdict) label.update(kwargs) return label set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd def text(self, x, y, s, fontdict=None, withdash=False, **kwargs): """ call signature:: text(x, y, s, fontdict=None, **kwargs) Add text in string *s* to axis at location *x*, *y*, data coordinates. Keyword arguments: *fontdict*: A dictionary to override the default text properties. If *fontdict* is *None*, the defaults are determined by your rc parameters. *withdash*: [ False | True ] Creates a :class:`~matplotlib.text.TextWithDash` instance instead of a :class:`~matplotlib.text.Text` instance. Individual keyword arguments can be used to override any given parameter:: text(x, y, s, fontsize=12) The default transform specifies that text is in data coords, alternatively, you can specify text in axis coords (0,0 is lower-left and 1,1 is upper-right). The example below places text in the center of the axes:: text(0.5, 0.5,'matplotlib', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes) You can put a rectangular box around the text instance (eg. to set a background color) by using the keyword *bbox*. *bbox* is a dictionary of :class:`matplotlib.patches.Rectangle` properties. For example:: text(x, y, s, bbox=dict(facecolor='red', alpha=0.5)) Valid kwargs are :class:`matplotlib.text.Text` properties: %(Text)s """ default = { 'verticalalignment' : 'bottom', 'horizontalalignment' : 'left', #'verticalalignment' : 'top', 'transform' : self.transData, } # At some point if we feel confident that TextWithDash # is robust as a drop-in replacement for Text and that # the performance impact of the heavier-weight class # isn't too significant, it may make sense to eliminate # the withdash kwarg and simply delegate whether there's # a dash to TextWithDash and dashlength. if withdash: t = mtext.TextWithDash( x=x, y=y, text=s, ) else: t = mtext.Text( x=x, y=y, text=s, ) self._set_artist_props(t) t.update(default) if fontdict is not None: t.update(fontdict) t.update(kwargs) self.texts.append(t) t._remove_method = lambda h: self.texts.remove(h) #if t.get_clip_on(): t.set_clip_box(self.bbox) if 'clip_on' in kwargs: t.set_clip_box(self.bbox) return t text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd def annotate(self, *args, **kwargs): """ call signature:: annotate(s, xy, xytext=None, xycoords='data', textcoords='data', arrowprops=None, **kwargs) Keyword arguments: %(Annotation)s .. plot:: mpl_examples/pylab_examples/annotation_demo2.py """ a = mtext.Annotation(*args, **kwargs) a.set_transform(mtransforms.IdentityTransform()) self._set_artist_props(a) if kwargs.has_key('clip_on'): a.set_clip_path(self.patch) self.texts.append(a) return a annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd #### Lines and spans def axhline(self, y=0, xmin=0, xmax=1, **kwargs): """ call signature:: axhline(y=0, xmin=0, xmax=1, **kwargs) Axis Horizontal Line Draw a horizontal line at *y* from *xmin* to *xmax*. With the default values of *xmin* = 0 and *xmax* = 1, this line will always span the horizontal extent of the axes, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the horizontal extent is in axes coords: 0=left, 0.5=middle, 1.0=right but the *y* location is in data coordinates. Return value is the :class:`~matplotlib.lines.Line2D` instance. kwargs are the same as kwargs to plot, and can be used to control the line properties. Eg., * draw a thick red hline at *y* = 0 that spans the xrange >>> axhline(linewidth=4, color='r') * draw a default hline at *y* = 1 that spans the xrange >>> axhline(y=1) * draw a default hline at *y* = .5 that spans the the middle half of the xrange >>> axhline(y=.5, xmin=0.25, xmax=0.75) Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`axhspan`: for example plot and source code """ ymin, ymax = self.get_ybound() # We need to strip away the units for comparison with # non-unitized bounds yy = self.convert_yunits( y ) scaley = (yy<ymin) or (yy>ymax) trans = mtransforms.blended_transform_factory( self.transAxes, self.transData) l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs) l.x_isdata = False self.add_line(l) self.autoscale_view(scalex=False, scaley=scaley) return l axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd def axvline(self, x=0, ymin=0, ymax=1, **kwargs): """ call signature:: axvline(x=0, ymin=0, ymax=1, **kwargs) Axis Vertical Line Draw a vertical line at *x* from *ymin* to *ymax*. With the default values of *ymin* = 0 and *ymax* = 1, this line will always span the vertical extent of the axes, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the vertical extent is in axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location is in data coordinates. Return value is the :class:`~matplotlib.lines.Line2D` instance. kwargs are the same as kwargs to plot, and can be used to control the line properties. Eg., * draw a thick red vline at *x* = 0 that spans the yrange >>> axvline(linewidth=4, color='r') * draw a default vline at *x* = 1 that spans the yrange >>> axvline(x=1) * draw a default vline at *x* = .5 that spans the the middle half of the yrange >>> axvline(x=.5, ymin=0.25, ymax=0.75) Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`axhspan`: for example plot and source code """ xmin, xmax = self.get_xbound() # We need to strip away the units for comparison with # non-unitized bounds xx = self.convert_xunits( x ) scalex = (xx<xmin) or (xx>xmax) trans = mtransforms.blended_transform_factory( self.transData, self.transAxes) l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs) l.y_isdata = False self.add_line(l) self.autoscale_view(scalex=scalex, scaley=False) return l axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs): """ call signature:: axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs) Axis Horizontal Span. *y* coords are in data units and *x* coords are in axes (relative 0-1) units. Draw a horizontal span (rectangle) from *ymin* to *ymax*. With the default values of *xmin* = 0 and *xmax* = 1, this always spans the xrange, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the horizontal extent is in axes coords: 0=left, 0.5=middle, 1.0=right but the *y* location is in data coordinates. Return value is a :class:`matplotlib.patches.Polygon` instance. Examples: * draw a gray rectangle from *y* = 0.25-0.75 that spans the horizontal extent of the axes >>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5) Valid kwargs are :class:`~matplotlib.patches.Polygon` properties: %(Polygon)s **Example:** .. plot:: mpl_examples/pylab_examples/axhspan_demo.py """ trans = mtransforms.blended_transform_factory( self.transAxes, self.transData) # process the unit information self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs ) # first we need to strip away the units xmin, xmax = self.convert_xunits( [xmin, xmax] ) ymin, ymax = self.convert_yunits( [ymin, ymax] ) verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin) p = mpatches.Polygon(verts, **kwargs) p.set_transform(trans) p.x_isdata = False self.add_patch(p) return p axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs): """ call signature:: axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs) Axis Vertical Span. *x* coords are in data units and *y* coords are in axes (relative 0-1) units. Draw a vertical span (rectangle) from *xmin* to *xmax*. With the default values of *ymin* = 0 and *ymax* = 1, this always spans the yrange, regardless of the ylim settings, even if you change them, eg. with the :meth:`set_ylim` command. That is, the vertical extent is in axes coords: 0=bottom, 0.5=middle, 1.0=top but the *y* location is in data coordinates. Return value is the :class:`matplotlib.patches.Polygon` instance. Examples: * draw a vertical green translucent rectangle from x=1.25 to 1.55 that spans the yrange of the axes >>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5) Valid kwargs are :class:`~matplotlib.patches.Polygon` properties: %(Polygon)s .. seealso:: :meth:`axhspan`: for example plot and source code """ trans = mtransforms.blended_transform_factory( self.transData, self.transAxes) # process the unit information self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs ) # first we need to strip away the units xmin, xmax = self.convert_xunits( [xmin, xmax] ) ymin, ymax = self.convert_yunits( [ymin, ymax] ) verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)] p = mpatches.Polygon(verts, **kwargs) p.set_transform(trans) p.y_isdata = False self.add_patch(p) return p axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd def hlines(self, y, xmin, xmax, colors='k', linestyles='solid', label='', **kwargs): """ call signature:: hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs) Plot horizontal lines at each *y* from *xmin* to *xmax*. Returns the :class:`~matplotlib.collections.LineCollection` that was added. Required arguments: *y*: a 1-D numpy array or iterable. *xmin* and *xmax*: can be scalars or ``len(x)`` numpy arrays. If they are scalars, then the respective values are constant, else the widths of the lines are determined by *xmin* and *xmax*. Optional keyword arguments: *colors*: a line collections color argument, either a single color or a ``len(y)`` list of colors *linestyles*: [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] **Example:** .. plot:: mpl_examples/pylab_examples/hline_demo.py """ if kwargs.get('fmt') is not None: raise DeprecationWarning('hlines now uses a ' 'collections.LineCollection and not a ' 'list of Line2D to draw; see API_CHANGES') # We do the conversion first since not all unitized data is uniform y = self.convert_yunits( y ) xmin = self.convert_xunits( xmin ) xmax = self.convert_xunits( xmax ) if not iterable(y): y = [y] if not iterable(xmin): xmin = [xmin] if not iterable(xmax): xmax = [xmax] y = np.asarray(y) xmin = np.asarray(xmin) xmax = np.asarray(xmax) if len(xmin)==1: xmin = np.resize( xmin, y.shape ) if len(xmax)==1: xmax = np.resize( xmax, y.shape ) if len(xmin)!=len(y): raise ValueError, 'xmin and y are unequal sized sequences' if len(xmax)!=len(y): raise ValueError, 'xmax and y are unequal sized sequences' verts = [ ((thisxmin, thisy), (thisxmax, thisy)) for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)] coll = mcoll.LineCollection(verts, colors=colors, linestyles=linestyles, label=label) self.add_collection(coll) coll.update(kwargs) minx = min(xmin.min(), xmax.min()) maxx = max(xmin.max(), xmax.max()) miny = y.min() maxy = y.max() corners = (minx, miny), (maxx, maxy) self.update_datalim(corners) self.autoscale_view() return coll hlines.__doc__ = cbook.dedent(hlines.__doc__) def vlines(self, x, ymin, ymax, colors='k', linestyles='solid', label='', **kwargs): """ call signature:: vlines(x, ymin, ymax, color='k', linestyles='solid') Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin* or *ymax* can be scalars or len(*x*) numpy arrays. If they are scalars, then the respective values are constant, else the heights of the lines are determined by *ymin* and *ymax*. *colors* a line collections color args, either a single color or a len(*x*) list of colors *linestyles* one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] Returns the :class:`matplotlib.collections.LineCollection` that was added. kwargs are :class:`~matplotlib.collections.LineCollection` properties: %(LineCollection)s """ if kwargs.get('fmt') is not None: raise DeprecationWarning('vlines now uses a ' 'collections.LineCollection and not a ' 'list of Line2D to draw; see API_CHANGES') self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs) # We do the conversion first since not all unitized data is uniform x = self.convert_xunits( x ) ymin = self.convert_yunits( ymin ) ymax = self.convert_yunits( ymax ) if not iterable(x): x = [x] if not iterable(ymin): ymin = [ymin] if not iterable(ymax): ymax = [ymax] x = np.asarray(x) ymin = np.asarray(ymin) ymax = np.asarray(ymax) if len(ymin)==1: ymin = np.resize( ymin, x.shape ) if len(ymax)==1: ymax = np.resize( ymax, x.shape ) if len(ymin)!=len(x): raise ValueError, 'ymin and x are unequal sized sequences' if len(ymax)!=len(x): raise ValueError, 'ymax and x are unequal sized sequences' Y = np.array([ymin, ymax]).T verts = [ ((thisx, thisymin), (thisx, thisymax)) for thisx, (thisymin, thisymax) in zip(x,Y)] #print 'creating line collection' coll = mcoll.LineCollection(verts, colors=colors, linestyles=linestyles, label=label) self.add_collection(coll) coll.update(kwargs) minx = min( x ) maxx = max( x ) miny = min( min(ymin), min(ymax) ) maxy = max( max(ymin), max(ymax) ) corners = (minx, miny), (maxx, maxy) self.update_datalim(corners) self.autoscale_view() return coll vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd #### Basic plotting def plot(self, *args, **kwargs): """ Plot lines and/or markers to the :class:`~matplotlib.axes.Axes`. *args* is a variable length argument, allowing for multiple *x*, *y* pairs with an optional format string. For example, each of the following is legal:: plot(x, y) # plot x and y using default line style and color plot(x, y, 'bo') # plot x and y using blue circle markers plot(y) # plot y using x as index array 0..N-1 plot(y, 'r+') # ditto, but with red plusses If *x* and/or *y* is 2-dimensional, then the corresponding columns will be plotted. An arbitrary number of *x*, *y*, *fmt* groups can be specified, as in:: a.plot(x1, y1, 'g^', x2, y2, 'g-') Return value is a list of lines that were added. The following format string characters are accepted to control the line style or marker: ================ =============================== character description ================ =============================== '-' solid line style '--' dashed line style '-.' dash-dot line style ':' dotted line style '.' point marker ',' pixel marker 'o' circle marker 'v' triangle_down marker '^' triangle_up marker '<' triangle_left marker '>' triangle_right marker '1' tri_down marker '2' tri_up marker '3' tri_left marker '4' tri_right marker 's' square marker 'p' pentagon marker '*' star marker 'h' hexagon1 marker 'H' hexagon2 marker '+' plus marker 'x' x marker 'D' diamond marker 'd' thin_diamond marker '|' vline marker '_' hline marker ================ =============================== The following color abbreviations are supported: ========== ======== character color ========== ======== 'b' blue 'g' green 'r' red 'c' cyan 'm' magenta 'y' yellow 'k' black 'w' white ========== ======== In addition, you can specify colors in many weird and wonderful ways, including full names (``'green'``), hex strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or grayscale intensities as a string (``'0.8'``). Of these, the string specifications can be used in place of a ``fmt`` group, but the tuple forms can be used only as ``kwargs``. Line styles and colors are combined in a single format string, as in ``'bo'`` for blue circles. The *kwargs* can be used to set line properties (any property that has a ``set_*`` method). You can use this to set a line label (for auto legends), linewidth, anitialising, marker face color, etc. Here is an example:: plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2) plot([1,2,3], [1,4,9], 'rs', label='line 2') axis([0, 4, 0, 10]) legend() If you make multiple lines with one plot command, the kwargs apply to all those lines, e.g.:: plot(x1, y1, x2, y2, antialised=False) Neither line will be antialiased. You do not need to use format strings, which are just abbreviations. All of the line properties can be controlled by keyword arguments. For example, you can set the color, marker, linestyle, and markercolor with:: plot(x, y, color='green', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=12). See :class:`~matplotlib.lines.Line2D` for details. The kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. """ scalex = kwargs.pop( 'scalex', True) scaley = kwargs.pop( 'scaley', True) if not self._hold: self.cla() lines = [] for line in self._get_lines(*args, **kwargs): self.add_line(line) lines.append(line) self.autoscale_view(scalex=scalex, scaley=scaley) return lines plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs): """ call signature:: plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs) Similar to the :func:`~matplotlib.pyplot.plot` command, except the *x* or *y* (or both) data is considered to be dates, and the axis is labeled accordingly. *x* and/or *y* can be a sequence of dates represented as float days since 0001-01-01 UTC. Keyword arguments: *fmt*: string The plot format string. *tz*: [ None | timezone string ] The time zone to use in labeling dates. If *None*, defaults to rc value. *xdate*: [ True | False ] If *True*, the *x*-axis will be labeled with dates. *ydate*: [ False | True ] If *True*, the *y*-axis will be labeled with dates. Note if you are using custom date tickers and formatters, it may be necessary to set the formatters/locators after the call to :meth:`plot_date` since :meth:`plot_date` will set the default tick locator to :class:`matplotlib.ticker.AutoDateLocator` (if the tick locator is not already set to a :class:`matplotlib.ticker.DateLocator` instance) and the default tick formatter to :class:`matplotlib.ticker.AutoDateFormatter` (if the tick formatter is not already set to a :class:`matplotlib.ticker.DateFormatter` instance). Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :mod:`~matplotlib.dates`: for helper functions :func:`~matplotlib.dates.date2num`, :func:`~matplotlib.dates.num2date` and :func:`~matplotlib.dates.drange`: for help on creating the required floating point dates. """ if not self._hold: self.cla() ret = self.plot(x, y, fmt, **kwargs) if xdate: self.xaxis_date(tz) if ydate: self.yaxis_date(tz) self.autoscale_view() return ret plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd def loglog(self, *args, **kwargs): """ call signature:: loglog(*args, **kwargs) Make a plot with log scaling on the *x* and *y* axis. :func:`~matplotlib.pyplot.loglog` supports all the keyword arguments of :func:`~matplotlib.pyplot.plot` and :meth:`matplotlib.axes.Axes.set_xscale` / :meth:`matplotlib.axes.Axes.set_yscale`. Notable keyword arguments: *basex*/*basey*: scalar > 1 base of the *x*/*y* logarithm *subsx*/*subsy*: [ None | sequence ] the location of the minor *x*/*y* ticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`matplotlib.axes.Axes.set_xscale` / :meth:`matplotlib.axes.Axes.set_yscale` for details The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/log_demo.py """ if not self._hold: self.cla() dx = {'basex': kwargs.pop('basex', 10), 'subsx': kwargs.pop('subsx', None), } dy = {'basey': kwargs.pop('basey', 10), 'subsy': kwargs.pop('subsy', None), } self.set_xscale('log', **dx) self.set_yscale('log', **dy) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd def semilogx(self, *args, **kwargs): """ call signature:: semilogx(*args, **kwargs) Make a plot with log scaling on the *x* axis. :func:`semilogx` supports all the keyword arguments of :func:`~matplotlib.pyplot.plot` and :meth:`matplotlib.axes.Axes.set_xscale`. Notable keyword arguments: *basex*: scalar > 1 base of the *x* logarithm *subsx*: [ None | sequence ] The location of the minor xticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for details. The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`loglog`: For example code and figure """ if not self._hold: self.cla() d = {'basex': kwargs.pop( 'basex', 10), 'subsx': kwargs.pop( 'subsx', None), } self.set_xscale('log', **d) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd def semilogy(self, *args, **kwargs): """ call signature:: semilogy(*args, **kwargs) Make a plot with log scaling on the *y* axis. :func:`semilogy` supports all the keyword arguments of :func:`~matplotlib.pylab.plot` and :meth:`matplotlib.axes.Axes.set_yscale`. Notable keyword arguments: *basey*: scalar > 1 Base of the *y* logarithm *subsy*: [ None | sequence ] The location of the minor yticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for details. The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`loglog`: For example code and figure """ if not self._hold: self.cla() d = {'basey': kwargs.pop('basey', 10), 'subsy': kwargs.pop('subsy', None), } self.set_yscale('log', **d) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd def acorr(self, x, **kwargs): """ call signature:: acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False, maxlags=None, **kwargs) Plot the autocorrelation of *x*. If *normed* = *True*, normalize the data by the autocorrelation at 0-th lag. *x* is detrended by the *detrend* callable (default no normalization). Data are plotted as ``plot(lags, c, **kwargs)`` Return value is a tuple (*lags*, *c*, *line*) where: - *lags* are a length 2*maxlags+1 lag vector - *c* is the 2*maxlags+1 auto correlation vector - *line* is a :class:`~matplotlib.lines.Line2D` instance returned by :meth:`plot` The default *linestyle* is None and the default *marker* is ``'o'``, though these can be overridden with keyword args. The cross correlation is performed with :func:`numpy.correlate` with *mode* = 2. If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines` rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw vertical lines from the origin to the acorr. Otherwise, the plot style is determined by the kwargs, which are :class:`~matplotlib.lines.Line2D` properties. *maxlags* is a positive integer detailing the number of lags to show. The default value of *None* will return all :math:`2 \mathrm{len}(x) - 1` lags. The return value is a tuple (*lags*, *c*, *linecol*, *b*) where - *linecol* is the :class:`~matplotlib.collections.LineCollection` - *b* is the *x*-axis. .. seealso:: :meth:`~matplotlib.axes.Axes.plot` or :meth:`~matplotlib.axes.Axes.vlines`: For documentation on valid kwargs. **Example:** :func:`~matplotlib.pyplot.xcorr` above, and :func:`~matplotlib.pyplot.acorr` below. **Example:** .. plot:: mpl_examples/pylab_examples/xcorr_demo.py """ return self.xcorr(x, x, **kwargs) acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none, usevlines=False, maxlags=None, **kwargs): """ call signature:: xcorr(x, y, normed=False, detrend=mlab.detrend_none, usevlines=False, **kwargs): Plot the cross correlation between *x* and *y*. If *normed* = *True*, normalize the data by the cross correlation at 0-th lag. *x* and y are detrended by the *detrend* callable (default no normalization). *x* and *y* must be equal length. Data are plotted as ``plot(lags, c, **kwargs)`` Return value is a tuple (*lags*, *c*, *line*) where: - *lags* are a length ``2*maxlags+1`` lag vector - *c* is the ``2*maxlags+1`` auto correlation vector - *line* is a :class:`~matplotlib.lines.Line2D` instance returned by :func:`~matplotlib.pyplot.plot`. The default *linestyle* is *None* and the default *marker* is 'o', though these can be overridden with keyword args. The cross correlation is performed with :func:`numpy.correlate` with *mode* = 2. If *usevlines* is *True*: :func:`~matplotlib.pyplot.vlines` rather than :func:`~matplotlib.pyplot.plot` is used to draw vertical lines from the origin to the xcorr. Otherwise the plotstyle is determined by the kwargs, which are :class:`~matplotlib.lines.Line2D` properties. The return value is a tuple (*lags*, *c*, *linecol*, *b*) where *linecol* is the :class:`matplotlib.collections.LineCollection` instance and *b* is the *x*-axis. *maxlags* is a positive integer detailing the number of lags to show. The default value of *None* will return all ``(2*len(x)-1)`` lags. **Example:** :func:`~matplotlib.pyplot.xcorr` above, and :func:`~matplotlib.pyplot.acorr` below. **Example:** .. plot:: mpl_examples/pylab_examples/xcorr_demo.py """ Nx = len(x) if Nx!=len(y): raise ValueError('x and y must be equal length') x = detrend(np.asarray(x)) y = detrend(np.asarray(y)) c = np.correlate(x, y, mode=2) if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y)) if maxlags is None: maxlags = Nx - 1 if maxlags >= Nx or maxlags < 1: raise ValueError('maglags must be None or strictly ' 'positive < %d'%Nx) lags = np.arange(-maxlags,maxlags+1) c = c[Nx-1-maxlags:Nx+maxlags] if usevlines: a = self.vlines(lags, [0], c, **kwargs) b = self.axhline(**kwargs) else: kwargs.setdefault('marker', 'o') kwargs.setdefault('linestyle', 'None') a, = self.plot(lags, c, **kwargs) b = None return lags, c, a, b xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd def legend(self, *args, **kwargs): """ call signature:: legend(*args, **kwargs) Place a legend on the current axes at location *loc*. Labels are a sequence of strings and *loc* can be a string or an integer specifying the legend location. To make a legend with existing lines:: legend() :meth:`legend` by itself will try and build a legend using the label property of the lines/patches/collections. You can set the label of a line by doing:: plot(x, y, label='my data') or:: line.set_label('my data'). If label is set to '_nolegend_', the item will not be shown in legend. To automatically generate the legend from labels:: legend( ('label1', 'label2', 'label3') ) To make a legend for a list of lines and labels:: legend( (line1, line2, line3), ('label1', 'label2', 'label3') ) To make a legend at a given location, using a location argument:: legend( ('label1', 'label2', 'label3'), loc='upper left') or:: legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2) The location codes are =============== ============= Location String Location Code =============== ============= 'best' 0 'upper right' 1 'upper left' 2 'lower left' 3 'lower right' 4 'right' 5 'center left' 6 'center right' 7 'lower center' 8 'upper center' 9 'center' 10 =============== ============= If none of these are locations are suitable, loc can be a 2-tuple giving x,y in axes coords, ie:: loc = 0, 1 # left top loc = 0.5, 0.5 # center Keyword arguments: *isaxes*: [ True | False ] Indicates that this is an axes legend *numpoints*: integer The number of points in the legend line, default is 4 *prop*: [ None | FontProperties ] A :class:`matplotlib.font_manager.FontProperties` instance, or *None* to use rc settings. *pad*: [ None | scalar ] The fractional whitespace inside the legend border, between 0 and 1. If *None*, use rc settings. *markerscale*: [ None | scalar ] The relative size of legend markers vs. original. If *None*, use rc settings. *shadow*: [ None | False | True ] If *True*, draw a shadow behind legend. If *None*, use rc settings. *labelsep*: [ None | scalar ] The vertical space between the legend entries. If *None*, use rc settings. *handlelen*: [ None | scalar ] The length of the legend lines. If *None*, use rc settings. *handletextsep*: [ None | scalar ] The space between the legend line and legend text. If *None*, use rc settings. *axespad*: [ None | scalar ] The border between the axes and legend edge. If *None*, use rc settings. **Example:** .. plot:: mpl_examples/api/legend_demo.py """ def get_handles(): handles = self.lines[:] handles.extend(self.patches) handles.extend([c for c in self.collections if isinstance(c, mcoll.LineCollection)]) handles.extend([c for c in self.collections if isinstance(c, mcoll.RegularPolyCollection)]) return handles if len(args)==0: handles = [] labels = [] for handle in get_handles(): label = handle.get_label() if (label is not None and label != '' and not label.startswith('_')): handles.append(handle) labels.append(label) if len(handles) == 0: warnings.warn("No labeled objects found. " "Use label='...' kwarg on individual plots.") return None elif len(args)==1: # LABELS labels = args[0] handles = [h for h, label in zip(get_handles(), labels)] elif len(args)==2: if is_string_like(args[1]) or isinstance(args[1], int): # LABELS, LOC labels, loc = args handles = [h for h, label in zip(get_handles(), labels)] kwargs['loc'] = loc else: # LINES, LABELS handles, labels = args elif len(args)==3: # LINES, LABELS, LOC handles, labels, loc = args kwargs['loc'] = loc else: raise TypeError('Invalid arguments to legend') handles = cbook.flatten(handles) self.legend_ = mlegend.Legend(self, handles, labels, **kwargs) return self.legend_ #### Specialized plotting def step(self, x, y, *args, **kwargs): ''' call signature:: step(x, y, *args, **kwargs) Make a step plot. Additional keyword args to :func:`step` are the same as those for :func:`~matplotlib.pyplot.plot`. *x* and *y* must be 1-D sequences, and it is assumed, but not checked, that *x* is uniformly increasing. Keyword arguments: *where*: [ 'pre' | 'post' | 'mid' ] If 'pre', the interval from x[i] to x[i+1] has level y[i] If 'post', that interval has level y[i+1] If 'mid', the jumps in *y* occur half-way between the *x*-values. ''' where = kwargs.pop('where', 'pre') if where not in ('pre', 'post', 'mid'): raise ValueError("'where' argument to step must be " "'pre', 'post' or 'mid'") kwargs['linestyle'] = 'steps-' + where return self.plot(x, y, *args, **kwargs) def bar(self, left, height, width=0.8, bottom=None, color=None, edgecolor=None, linewidth=None, yerr=None, xerr=None, ecolor=None, capsize=3, align='edge', orientation='vertical', log=False, **kwargs ): """ call signature:: bar(left, height, width=0.8, bottom=0, color=None, edgecolor=None, linewidth=None, yerr=None, xerr=None, ecolor=None, capsize=3, align='edge', orientation='vertical', log=False) Make a bar plot with rectangles bounded by: *left*, *left* + *width*, *bottom*, *bottom* + *height* (left, right, bottom and top edges) *left*, *height*, *width*, and *bottom* can be either scalars or sequences Return value is a list of :class:`matplotlib.patches.Rectangle` instances. Required arguments: ======== =============================================== Argument Description ======== =============================================== *left* the x coordinates of the left sides of the bars *height* the heights of the bars ======== =============================================== Optional keyword arguments: =============== ========================================== Keyword Description =============== ========================================== *width* the widths of the bars *bottom* the y coordinates of the bottom edges of the bars *color* the colors of the bars *edgecolor* the colors of the bar edges *linewidth* width of bar edges; None means use default linewidth; 0 means don't draw edges. *xerr* if not None, will be used to generate errorbars on the bar chart *yerr* if not None, will be used to generate errorbars on the bar chart *ecolor* specifies the color of any errorbar *capsize* (default 3) determines the length in points of the error bar caps *align* 'edge' (default) | 'center' *orientation* 'vertical' | 'horizontal' *log* [False|True] False (default) leaves the orientation axis as-is; True sets it to log scale =============== ========================================== For vertical bars, *align* = 'edge' aligns bars by their left edges in left, while *align* = 'center' interprets these values as the *x* coordinates of the bar centers. For horizontal bars, *align* = 'edge' aligns bars by their bottom edges in bottom, while *align* = 'center' interprets these values as the *y* coordinates of the bar centers. The optional arguments *color*, *edgecolor*, *linewidth*, *xerr*, and *yerr* can be either scalars or sequences of length equal to the number of bars. This enables you to use bar as the basis for stacked bar charts, or candlestick plots. Other optional kwargs: %(Rectangle)s **Example:** A stacked bar chart. .. plot:: mpl_examples/pylab_examples/bar_stacked.py """ if not self._hold: self.cla() label = kwargs.pop('label', '') def make_iterable(x): if not iterable(x): return [x] else: return x # make them safe to take len() of _left = left left = make_iterable(left) height = make_iterable(height) width = make_iterable(width) _bottom = bottom bottom = make_iterable(bottom) linewidth = make_iterable(linewidth) adjust_ylim = False adjust_xlim = False if orientation == 'vertical': self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs) if log: self.set_yscale('log') # size width and bottom according to length of left if _bottom is None: if self.get_yscale() == 'log': bottom = [1e-100] adjust_ylim = True else: bottom = [0] nbars = len(left) if len(width) == 1: width *= nbars if len(bottom) == 1: bottom *= nbars elif orientation == 'horizontal': self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs) if log: self.set_xscale('log') # size left and height according to length of bottom if _left is None: if self.get_xscale() == 'log': left = [1e-100] adjust_xlim = True else: left = [0] nbars = len(bottom) if len(left) == 1: left *= nbars if len(height) == 1: height *= nbars else: raise ValueError, 'invalid orientation: %s' % orientation # do not convert to array here as unit info is lost #left = np.asarray(left) #height = np.asarray(height) #width = np.asarray(width) #bottom = np.asarray(bottom) if len(linewidth) < nbars: linewidth *= nbars if color is None: color = [None] * nbars else: color = list(mcolors.colorConverter.to_rgba_array(color)) if len(color) < nbars: color *= nbars if edgecolor is None: edgecolor = [None] * nbars else: edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor)) if len(edgecolor) < nbars: edgecolor *= nbars if yerr is not None: if not iterable(yerr): yerr = [yerr]*nbars if xerr is not None: if not iterable(xerr): xerr = [xerr]*nbars # FIXME: convert the following to proper input validation # raising ValueError; don't use assert for this. assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars assert len(height)==nbars, ("argument 'height' must be %d or scalar" % nbars) assert len(width)==nbars, ("argument 'width' must be %d or scalar" % nbars) assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" % nbars) if yerr is not None and len(yerr)!=nbars: raise ValueError( "bar() argument 'yerr' must be len(%s) or scalar" % nbars) if xerr is not None and len(xerr)!=nbars: raise ValueError( "bar() argument 'xerr' must be len(%s) or scalar" % nbars) patches = [] # lets do some conversions now since some types cannot be # subtracted uniformly if self.xaxis is not None: xconv = self.xaxis.converter if xconv is not None: units = self.xaxis.get_units() left = xconv.convert( left, units ) width = xconv.convert( width, units ) if self.yaxis is not None: yconv = self.yaxis.converter if yconv is not None : units = self.yaxis.get_units() bottom = yconv.convert( bottom, units ) height = yconv.convert( height, units ) if align == 'edge': pass elif align == 'center': if orientation == 'vertical': left = [left[i] - width[i]/2. for i in xrange(len(left))] elif orientation == 'horizontal': bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))] else: raise ValueError, 'invalid alignment: %s' % align args = zip(left, bottom, width, height, color, edgecolor, linewidth) for l, b, w, h, c, e, lw in args: if h<0: b += h h = abs(h) if w<0: l += w w = abs(w) r = mpatches.Rectangle( xy=(l, b), width=w, height=h, facecolor=c, edgecolor=e, linewidth=lw, label=label ) label = '_nolegend_' r.update(kwargs) #print r.get_label(), label, 'label' in kwargs self.add_patch(r) patches.append(r) holdstate = self._hold self.hold(True) # ensure hold is on before plotting errorbars if xerr is not None or yerr is not None: if orientation == 'vertical': # using list comps rather than arrays to preserve unit info x = [l+0.5*w for l, w in zip(left, width)] y = [b+h for b,h in zip(bottom, height)] elif orientation == 'horizontal': # using list comps rather than arrays to preserve unit info x = [l+w for l,w in zip(left, width)] y = [b+0.5*h for b,h in zip(bottom, height)] self.errorbar( x, y, yerr=yerr, xerr=xerr, fmt=None, ecolor=ecolor, capsize=capsize) self.hold(holdstate) # restore previous hold state if adjust_xlim: xmin, xmax = self.dataLim.intervalx xmin = np.amin(width[width!=0]) # filter out the 0 width rects if xerr is not None: xmin = xmin - np.amax(xerr) xmin = max(xmin*0.9, 1e-100) self.dataLim.intervalx = (xmin, xmax) if adjust_ylim: ymin, ymax = self.dataLim.intervaly ymin = np.amin(height[height!=0]) # filter out the 0 height rects if yerr is not None: ymin = ymin - np.amax(yerr) ymin = max(ymin*0.9, 1e-100) self.dataLim.intervaly = (ymin, ymax) self.autoscale_view() return patches bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd def barh(self, bottom, width, height=0.8, left=None, **kwargs): """ call signature:: barh(bottom, width, height=0.8, left=0, **kwargs) Make a horizontal bar plot with rectangles bounded by: *left*, *left* + *width*, *bottom*, *bottom* + *height* (left, right, bottom and top edges) *bottom*, *width*, *height*, and *left* can be either scalars or sequences Return value is a list of :class:`matplotlib.patches.Rectangle` instances. Required arguments: ======== ====================================================== Argument Description ======== ====================================================== *bottom* the vertical positions of the bottom edges of the bars *width* the lengths of the bars ======== ====================================================== Optional keyword arguments: =============== ========================================== Keyword Description =============== ========================================== *height* the heights (thicknesses) of the bars *left* the x coordinates of the left edges of the bars *color* the colors of the bars *edgecolor* the colors of the bar edges *linewidth* width of bar edges; None means use default linewidth; 0 means don't draw edges. *xerr* if not None, will be used to generate errorbars on the bar chart *yerr* if not None, will be used to generate errorbars on the bar chart *ecolor* specifies the color of any errorbar *capsize* (default 3) determines the length in points of the error bar caps *align* 'edge' (default) | 'center' *log* [False|True] False (default) leaves the horizontal axis as-is; True sets it to log scale =============== ========================================== Setting *align* = 'edge' aligns bars by their bottom edges in bottom, while *align* = 'center' interprets these values as the *y* coordinates of the bar centers. The optional arguments *color*, *edgecolor*, *linewidth*, *xerr*, and *yerr* can be either scalars or sequences of length equal to the number of bars. This enables you to use barh as the basis for stacked bar charts, or candlestick plots. other optional kwargs: %(Rectangle)s """ patches = self.bar(left=left, height=height, width=width, bottom=bottom, orientation='horizontal', **kwargs) return patches barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd def broken_barh(self, xranges, yrange, **kwargs): """ call signature:: broken_barh(self, xranges, yrange, **kwargs) A collection of horizontal bars spanning *yrange* with a sequence of *xranges*. Required arguments: ========= ============================== Argument Description ========= ============================== *xranges* sequence of (*xmin*, *xwidth*) *yrange* sequence of (*ymin*, *ywidth*) ========= ============================== kwargs are :class:`matplotlib.collections.BrokenBarHCollection` properties: %(BrokenBarHCollection)s these can either be a single argument, ie:: facecolors = 'black' or a sequence of arguments for the various bars, ie:: facecolors = ('black', 'red', 'green') **Example:** .. plot:: mpl_examples/pylab_examples/broken_barh.py """ col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs) self.add_collection(col, autolim=True) self.autoscale_view() return col broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'): """ call signature:: stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-') A stem plot plots vertical lines (using *linefmt*) at each *x* location from the baseline to *y*, and places a marker there using *markerfmt*. A horizontal line at 0 is is plotted using *basefmt*. Return value is a tuple (*markerline*, *stemlines*, *baseline*). .. seealso:: `this document`__ for details :file:`examples/pylab_examples/stem_plot.py`: for a demo __ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html """ remember_hold=self._hold if not self._hold: self.cla() self.hold(True) markerline, = self.plot(x, y, markerfmt) stemlines = [] for thisx, thisy in zip(x, y): l, = self.plot([thisx,thisx], [0, thisy], linefmt) stemlines.append(l) baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt) self.hold(remember_hold) return markerline, stemlines, baseline def pie(self, x, explode=None, labels=None, colors=None, autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1): r""" call signature:: pie(x, explode=None, labels=None, colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'), autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False) Make a pie chart of array *x*. The fractional area of each wedge is given by x/sum(x). If sum(x) <= 1, then the values of x give the fractional area directly and the array will not be normalized. Keyword arguments: *explode*: [ None | len(x) sequence ] If not *None*, is a len(*x*) array which specifies the fraction of the radius with which to offset each wedge. *colors*: [ None | color sequence ] A sequence of matplotlib color args through which the pie chart will cycle. *labels*: [ None | len(x) sequence of strings ] A sequence of strings providing the labels for each wedge *autopct*: [ None | format string | format function ] If not *None*, is a string or function used to label the wedges with their numeric value. The label will be placed inside the wedge. If it is a format string, the label will be ``fmt%pct``. If it is a function, it will be called. *pctdistance*: scalar The ratio between the center of each pie slice and the start of the text generated by *autopct*. Ignored if *autopct* is *None*; default is 0.6. *labeldistance*: scalar The radial distance at which the pie labels are drawn *shadow*: [ False | True ] Draw a shadow beneath the pie. The pie chart will probably look best if the figure and axes are square. Eg.:: figure(figsize=(8,8)) ax = axes([0.1, 0.1, 0.8, 0.8]) Return value: If *autopct* is None, return the tuple (*patches*, *texts*): - *patches* is a sequence of :class:`matplotlib.patches.Wedge` instances - *texts* is a list of the label :class:`matplotlib.text.Text` instances. If *autopct* is not *None*, return the tuple (*patches*, *texts*, *autotexts*), where *patches* and *texts* are as above, and *autotexts* is a list of :class:`~matplotlib.text.Text` instances for the numeric labels. """ self.set_frame_on(False) x = np.asarray(x).astype(np.float32) sx = float(x.sum()) if sx>1: x = np.divide(x,sx) if labels is None: labels = ['']*len(x) if explode is None: explode = [0]*len(x) assert(len(x)==len(labels)) assert(len(x)==len(explode)) if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w') center = 0,0 radius = 1 theta1 = 0 i = 0 texts = [] slices = [] autotexts = [] for frac, label, expl in cbook.safezip(x,labels, explode): x, y = center theta2 = theta1 + frac thetam = 2*math.pi*0.5*(theta1+theta2) x += expl*math.cos(thetam) y += expl*math.sin(thetam) w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2, facecolor=colors[i%len(colors)]) slices.append(w) self.add_patch(w) w.set_label(label) if shadow: # make sure to add a shadow after the call to # add_patch so the figure and transform props will be # set shad = mpatches.Shadow(w, -0.02, -0.02, #props={'facecolor':w.get_facecolor()} ) shad.set_zorder(0.9*w.get_zorder()) self.add_patch(shad) xt = x + labeldistance*radius*math.cos(thetam) yt = y + labeldistance*radius*math.sin(thetam) label_alignment = xt > 0 and 'left' or 'right' t = self.text(xt, yt, label, size=rcParams['xtick.labelsize'], horizontalalignment=label_alignment, verticalalignment='center') texts.append(t) if autopct is not None: xt = x + pctdistance*radius*math.cos(thetam) yt = y + pctdistance*radius*math.sin(thetam) if is_string_like(autopct): s = autopct%(100.*frac) elif callable(autopct): s = autopct(100.*frac) else: raise TypeError( 'autopct must be callable or a format string') t = self.text(xt, yt, s, horizontalalignment='center', verticalalignment='center') autotexts.append(t) theta1 = theta2 i += 1 self.set_xlim((-1.25, 1.25)) self.set_ylim((-1.25, 1.25)) self.set_xticks([]) self.set_yticks([]) if autopct is None: return slices, texts else: return slices, texts, autotexts def errorbar(self, x, y, yerr=None, xerr=None, fmt='-', ecolor=None, elinewidth=None, capsize=3, barsabove=False, lolims=False, uplims=False, xlolims=False, xuplims=False, **kwargs): """ call signature:: errorbar(x, y, yerr=None, xerr=None, fmt='-', ecolor=None, elinewidth=None, capsize=3, barsabove=False, lolims=False, uplims=False, xlolims=False, xuplims=False) Plot *x* versus *y* with error deltas in *yerr* and *xerr*. Vertical errorbars are plotted if *yerr* is not *None*. Horizontal errorbars are plotted if *xerr* is not *None*. *x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a single error bar at *x*, *y*. Optional keyword arguments: *xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ] If a scalar number, len(N) array-like object, or an Nx1 array-like object, errorbars are drawn +/- value. If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and +column2 *fmt*: '-' The plot format symbol for *y*. If *fmt* is *None*, just plot the errorbars with no line symbols. This can be useful for creating a bar plot with errorbars. *ecolor*: [ None | mpl color ] a matplotlib color arg which gives the color the errorbar lines; if *None*, use the marker color. *elinewidth*: scalar the linewidth of the errorbar lines. If *None*, use the linewidth. *capsize*: scalar the size of the error bar caps in points *barsabove*: [ True | False ] if *True*, will plot the errorbars above the plot symbols. Default is below. *lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ] These arguments can be used to indicate that a value gives only upper/lower limits. In that case a caret symbol is used to indicate this. lims-arguments may be of the same type as *xerr* and *yerr*. All other keyword arguments are passed on to the plot command for the markers, so you can add additional key=value pairs to control the errorbar markers. For example, this code makes big red squares with thick green edges:: x,y,yerr = rand(3,10) errorbar(x, y, yerr, marker='s', mfc='red', mec='green', ms=20, mew=4) where *mfc*, *mec*, *ms* and *mew* are aliases for the longer property names, *markerfacecolor*, *markeredgecolor*, *markersize* and *markeredgewith*. valid kwargs for the marker properties are %(Line2D)s Return value is a length 3 tuple. The first element is the :class:`~matplotlib.lines.Line2D` instance for the *y* symbol lines. The second element is a list of error bar cap lines, the third element is a list of :class:`~matplotlib.collections.LineCollection` instances for the horizontal and vertical error ranges. **Example:** .. plot:: mpl_examples/pylab_examples/errorbar_demo.py """ self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) if not self._hold: self.cla() # make sure all the args are iterable; use lists not arrays to # preserve units if not iterable(x): x = [x] if not iterable(y): y = [y] if xerr is not None: if not iterable(xerr): xerr = [xerr]*len(x) if yerr is not None: if not iterable(yerr): yerr = [yerr]*len(y) l0 = None if barsabove and fmt is not None: l0, = self.plot(x,y,fmt,**kwargs) barcols = [] caplines = [] lines_kw = {'label':'_nolegend_'} if elinewidth: lines_kw['linewidth'] = elinewidth else: if 'linewidth' in kwargs: lines_kw['linewidth']=kwargs['linewidth'] if 'lw' in kwargs: lines_kw['lw']=kwargs['lw'] if 'transform' in kwargs: lines_kw['transform'] = kwargs['transform'] # arrays fine here, they are booleans and hence not units if not iterable(lolims): lolims = np.asarray([lolims]*len(x), bool) else: lolims = np.asarray(lolims, bool) if not iterable(uplims): uplims = np.array([uplims]*len(x), bool) else: uplims = np.asarray(uplims, bool) if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool) else: xlolims = np.asarray(xlolims, bool) if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool) else: xuplims = np.asarray(xuplims, bool) def xywhere(xs, ys, mask): """ return xs[mask], ys[mask] where mask is True but xs and ys are not arrays """ assert len(xs)==len(ys) assert len(xs)==len(mask) xs = [thisx for thisx, b in zip(xs, mask) if b] ys = [thisy for thisy, b in zip(ys, mask) if b] return xs, ys if capsize > 0: plot_kw = { 'ms':2*capsize, 'label':'_nolegend_'} if 'markeredgewidth' in kwargs: plot_kw['markeredgewidth']=kwargs['markeredgewidth'] if 'mew' in kwargs: plot_kw['mew']=kwargs['mew'] if 'transform' in kwargs: plot_kw['transform'] = kwargs['transform'] if xerr is not None: if (iterable(xerr) and len(xerr)==2 and iterable(xerr[0]) and iterable(xerr[1])): # using list comps rather than arrays to preserve units left = [thisx-thiserr for (thisx, thiserr) in cbook.safezip(x,xerr[0])] right = [thisx+thiserr for (thisx, thiserr) in cbook.safezip(x,xerr[1])] else: # using list comps rather than arrays to preserve units left = [thisx-thiserr for (thisx, thiserr) in cbook.safezip(x,xerr)] right = [thisx+thiserr for (thisx, thiserr) in cbook.safezip(x,xerr)] barcols.append( self.hlines(y, left, right, **lines_kw ) ) if capsize > 0: if xlolims.any(): # can't use numpy logical indexing since left and # y are lists leftlo, ylo = xywhere(left, y, xlolims) caplines.extend( self.plot(leftlo, ylo, ls='None', marker=mlines.CARETLEFT, **plot_kw) ) xlolims = ~xlolims leftlo, ylo = xywhere(left, y, xlolims) caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) ) else: caplines.extend( self.plot(left, y, 'k|', **plot_kw) ) if xuplims.any(): rightup, yup = xywhere(right, y, xuplims) caplines.extend( self.plot(rightup, yup, ls='None', marker=mlines.CARETRIGHT, **plot_kw) ) xuplims = ~xuplims rightup, yup = xywhere(right, y, xuplims) caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) ) else: caplines.extend( self.plot(right, y, 'k|', **plot_kw) ) if yerr is not None: if (iterable(yerr) and len(yerr)==2 and iterable(yerr[0]) and iterable(yerr[1])): # using list comps rather than arrays to preserve units lower = [thisy-thiserr for (thisy, thiserr) in cbook.safezip(y,yerr[0])] upper = [thisy+thiserr for (thisy, thiserr) in cbook.safezip(y,yerr[1])] else: # using list comps rather than arrays to preserve units lower = [thisy-thiserr for (thisy, thiserr) in cbook.safezip(y,yerr)] upper = [thisy+thiserr for (thisy, thiserr) in cbook.safezip(y,yerr)] barcols.append( self.vlines(x, lower, upper, **lines_kw) ) if capsize > 0: if lolims.any(): xlo, lowerlo = xywhere(x, lower, lolims) caplines.extend( self.plot(xlo, lowerlo, ls='None', marker=mlines.CARETDOWN, **plot_kw) ) lolims = ~lolims xlo, lowerlo = xywhere(x, lower, lolims) caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) ) else: caplines.extend( self.plot(x, lower, 'k_', **plot_kw) ) if uplims.any(): xup, upperup = xywhere(x, upper, uplims) caplines.extend( self.plot(xup, upperup, ls='None', marker=mlines.CARETUP, **plot_kw) ) uplims = ~uplims xup, upperup = xywhere(x, upper, uplims) caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) ) else: caplines.extend( self.plot(x, upper, 'k_', **plot_kw) ) if not barsabove and fmt is not None: l0, = self.plot(x,y,fmt,**kwargs) if ecolor is None: if l0 is None: ecolor = self._get_lines._get_next_cycle_color() else: ecolor = l0.get_color() for l in barcols: l.set_color(ecolor) for l in caplines: l.set_color(ecolor) self.autoscale_view() return (l0, caplines, barcols) errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5, positions=None, widths=None): """ call signature:: boxplot(x, notch=0, sym='+', vert=1, whis=1.5, positions=None, widths=None) Make a box and whisker plot for each column of *x* or each vector in sequence *x*. The box extends from the lower to upper quartile values of the data, with a line at the median. The whiskers extend from the box to show the range of the data. Flier points are those past the end of the whiskers. - *notch* = 0 (default) produces a rectangular box plot. - *notch* = 1 will produce a notched box plot *sym* (default 'b+') is the default symbol for flier points. Enter an empty string ('') if you don't want to show fliers. - *vert* = 1 (default) makes the boxes vertical. - *vert* = 0 makes horizontal boxes. This seems goofy, but that's how Matlab did it. *whis* (default 1.5) defines the length of the whiskers as a function of the inner quartile range. They extend to the most extreme data point within ( ``whis*(75%-25%)`` ) data range. *positions* (default 1,2,...,n) sets the horizontal positions of the boxes. The ticks and limits are automatically set to match the positions. *widths* is either a scalar or a vector and sets the width of each box. The default is 0.5, or ``0.15*(distance between extreme positions)`` if that is smaller. *x* is an array or a sequence of vectors. Returns a dictionary mapping each component of the boxplot to a list of the :class:`matplotlib.lines.Line2D` instances created. **Example:** .. plot:: pyplots/boxplot_demo.py """ if not self._hold: self.cla() holdStatus = self._hold whiskers, caps, boxes, medians, fliers = [], [], [], [], [] # convert x to a list of vectors if hasattr(x, 'shape'): if len(x.shape) == 1: if hasattr(x[0], 'shape'): x = list(x) else: x = [x,] elif len(x.shape) == 2: nr, nc = x.shape if nr == 1: x = [x] elif nc == 1: x = [x.ravel()] else: x = [x[:,i] for i in xrange(nc)] else: raise ValueError, "input x can have no more than 2 dimensions" if not hasattr(x[0], '__len__'): x = [x] col = len(x) # get some plot info if positions is None: positions = range(1, col + 1) if widths is None: distance = max(positions) - min(positions) widths = min(0.15*max(distance,1.0), 0.5) if isinstance(widths, float) or isinstance(widths, int): widths = np.ones((col,), float) * widths # loop through columns, adding each to plot self.hold(True) for i,pos in enumerate(positions): d = np.ravel(x[i]) row = len(d) # get median and quartiles q1, med, q3 = mlab.prctile(d,[25,50,75]) # get high extreme iq = q3 - q1 hi_val = q3 + whis*iq wisk_hi = np.compress( d <= hi_val , d ) if len(wisk_hi) == 0: wisk_hi = q3 else: wisk_hi = max(wisk_hi) # get low extreme lo_val = q1 - whis*iq wisk_lo = np.compress( d >= lo_val, d ) if len(wisk_lo) == 0: wisk_lo = q1 else: wisk_lo = min(wisk_lo) # get fliers - if we are showing them flier_hi = [] flier_lo = [] flier_hi_x = [] flier_lo_x = [] if len(sym) != 0: flier_hi = np.compress( d > wisk_hi, d ) flier_lo = np.compress( d < wisk_lo, d ) flier_hi_x = np.ones(flier_hi.shape[0]) * pos flier_lo_x = np.ones(flier_lo.shape[0]) * pos # get x locations for fliers, whisker, whisker cap and box sides box_x_min = pos - widths[i] * 0.5 box_x_max = pos + widths[i] * 0.5 wisk_x = np.ones(2) * pos cap_x_min = pos - widths[i] * 0.25 cap_x_max = pos + widths[i] * 0.25 cap_x = [cap_x_min, cap_x_max] # get y location for median med_y = [med, med] # calculate 'regular' plot if notch == 0: # make our box vectors box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ] box_y = [q1, q1, q3, q3, q1 ] # make our median line vectors med_x = [box_x_min, box_x_max] # calculate 'notch' plot else: notch_max = med + 1.57*iq/np.sqrt(row) notch_min = med - 1.57*iq/np.sqrt(row) if notch_max > q3: notch_max = q3 if notch_min < q1: notch_min = q1 # make our notched box vectors box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max, box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min, box_x_min ] box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max, med, notch_min, q1] # make our median line vectors med_x = [cap_x_min, cap_x_max] med_y = [med, med] # vertical or horizontal plot? if vert: def doplot(*args): return self.plot(*args) else: def doplot(*args): shuffled = [] for i in xrange(0, len(args), 3): shuffled.extend([args[i+1], args[i], args[i+2]]) return self.plot(*shuffled) whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--', wisk_x, [q3, wisk_hi], 'b--')) caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-', cap_x, [wisk_lo, wisk_lo], 'k-')) boxes.extend(doplot(box_x, box_y, 'b-')) medians.extend(doplot(med_x, med_y, 'r-')) fliers.extend(doplot(flier_hi_x, flier_hi, sym, flier_lo_x, flier_lo, sym)) # fix our axes/ticks up a little if 1 == vert: setticks, setlim = self.set_xticks, self.set_xlim else: setticks, setlim = self.set_yticks, self.set_ylim newlimits = min(positions)-0.5, max(positions)+0.5 setlim(newlimits) setticks(positions) # reset hold status self.hold(holdStatus) return dict(whiskers=whiskers, caps=caps, boxes=boxes, medians=medians, fliers=fliers) def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, faceted=True, verts=None, **kwargs): """ call signatures:: scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, verts=None, **kwargs) Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D sequences of the same length, *N*. Keyword arguments: *s*: size in points^2. It is a scalar or an array of the same length as *x* and *y*. *c*: a color. *c* can be a single color format string, or a sequence of color specifications of length *N*, or a sequence of *N* numbers to be mapped to colors using the *cmap* and *norm* specified via kwargs (see below). Note that *c* should not be a single numeric RGB or RGBA sequence because that is indistinguishable from an array of values to be colormapped. *c* can be a 2-D array in which the rows are RGB or RGBA, however. *marker*: can be one of: ===== ============== Value Description ===== ============== 's' square 'o' circle '^' triangle up '>' triangle right 'v' triangle down '<' triangle left 'd' diamond 'p' pentagram 'h' hexagon '8' octagon '+' plus 'x' cross ===== ============== The marker can also be a tuple (*numsides*, *style*, *angle*), which will create a custom, regular symbol. *numsides*: the number of sides *style*: the style of the regular symbol: ===== ============================================= Value Description ===== ============================================= 0 a regular polygon 1 a star-like symbol 2 an asterisk 3 a circle (*numsides* and *angle* is ignored) ===== ============================================= *angle*: the angle of rotation of the symbol Finally, *marker* can be (*verts*, 0): *verts* is a sequence of (*x*, *y*) vertices for a custom scatter symbol. Alternatively, use the kwarg combination *marker* = *None*, *verts* = *verts*. Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in which case all masks will be combined and only unmasked points will be plotted. Other keyword arguments: the color mapping and normalization arguments will be used only if *c* is an array of floats. *cmap*: [ None | Colormap ] A :class:`matplotlib.colors.Colormap` instance. If *None*, defaults to rc ``image.cmap``. *cmap* is only used if *c* is an array of floats. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0, 1. If *None*, use the default :func:`normalize`. *norm* is only used if *c* is an array of floats. *vmin*/*vmax*: *vmin* and *vmax* are used in conjunction with norm to normalize luminance data. If either are None, the min and max of the color array *C* is used. Note if you pass a *norm* instance, your settings for *vmin* and *vmax* will be ignored. *alpha*: 0 <= scalar <= 1 The alpha value for the patches *linewidths*: [ None | scalar | sequence ] If *None*, defaults to (lines.linewidth,). Note that this is a tuple, and if you set the linewidths argument you must set it as a sequence of floats, as required by :class:`~matplotlib.collections.RegularPolyCollection`. Optional kwargs control the :class:`~matplotlib.collections.Collection` properties; in particular: *edgecolors*: 'none' to plot faces with no outlines *facecolors*: 'none' to plot unfilled outlines Here are the standard descriptions of all the :class:`~matplotlib.collections.Collection` kwargs: %(Collection)s A :class:`~matplotlib.collections.Collection` instance is returned. """ if not self._hold: self.cla() syms = { # a dict from symbol to (numsides, angle) 's' : (4,math.pi/4.0,0), # square 'o' : (20,3,0), # circle '^' : (3,0,0), # triangle up '>' : (3,math.pi/2.0,0), # triangle right 'v' : (3,math.pi,0), # triangle down '<' : (3,3*math.pi/2.0,0), # triangle left 'd' : (4,0,0), # diamond 'p' : (5,0,0), # pentagram 'h' : (6,0,0), # hexagon '8' : (8,0,0), # octagon '+' : (4,0,2), # plus 'x' : (4,math.pi/4.0,2) # cross } self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) x, y, s, c = cbook.delete_masked_points(x, y, s, c) if is_string_like(c) or cbook.is_sequence_of_strings(c): colors = mcolors.colorConverter.to_rgba_array(c, alpha) else: sh = np.shape(c) # The inherent ambiguity is resolved in favor of color # mapping, not interpretation as rgb or rgba: if len(sh) == 1 and sh[0] == len(x): colors = None # use cmap, norm after collection is created else: colors = mcolors.colorConverter.to_rgba_array(c, alpha) if not iterable(s): scales = (s,) else: scales = s if faceted: edgecolors = None else: edgecolors = 'none' warnings.warn( '''replace "faceted=False" with "edgecolors='none'"''', DeprecationWarning) #2008/04/18 sym = None symstyle = 0 # to be API compatible if marker is None and not (verts is None): marker = (verts, 0) verts = None if is_string_like(marker): # the standard way to define symbols using a string character sym = syms.get(marker) if sym is None and verts is None: raise ValueError('Unknown marker symbol to scatter') numsides, rotation, symstyle = syms[marker] elif iterable(marker): # accept marker to be: # (numsides, style, [angle]) # or # (verts[], style, [angle]) if len(marker)<2 or len(marker)>3: raise ValueError('Cannot create markersymbol from marker') if cbook.is_numlike(marker[0]): # (numsides, style, [angle]) if len(marker)==2: numsides, rotation = marker[0], 0. elif len(marker)==3: numsides, rotation = marker[0], marker[2] sym = True if marker[1] in (1,2): symstyle = marker[1] else: verts = np.asarray(marker[0]) if sym is not None: if symstyle==0: collection = mcoll.RegularPolyCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==1: collection = mcoll.StarPolygonCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==2: collection = mcoll.AsteriskPolygonCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==3: collection = mcoll.CircleCollection( scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) else: rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2)) verts /= rescale collection = mcoll.PolyCollection( (verts,), scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) collection.set_transform(mtransforms.IdentityTransform()) collection.set_alpha(alpha) collection.update(kwargs) if colors is None: if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_array(np.asarray(c)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() temp_x = x temp_y = y minx = np.amin(temp_x) maxx = np.amax(temp_x) miny = np.amin(temp_y) maxy = np.amax(temp_y) w = maxx-minx h = maxy-miny # the pad is a little hack to deal with the fact that we don't # want to transform all the symbols whose scales are in points # to data coords to get the exact bounding box for efficiency # reasons. It can be done right if this is deemed important padx, pady = 0.05*w, 0.05*h corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady) self.update_datalim( corners) self.autoscale_view() # add the collection last self.add_collection(collection) return collection scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd def hexbin(self, x, y, C = None, gridsize = 100, bins = None, xscale = 'linear', yscale = 'linear', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, edgecolors='none', reduce_C_function = np.mean, **kwargs): """ call signature:: hexbin(x, y, C = None, gridsize = 100, bins = None, xscale = 'linear', yscale = 'linear', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, edgecolors='none' reduce_C_function = np.mean, **kwargs) Make a hexagonal binning plot of *x* versus *y*, where *x*, *y* are 1-D sequences of the same length, *N*. If *C* is None (the default), this is a histogram of the number of occurences of the observations at (x[i],y[i]). If *C* is specified, it specifies values at the coordinate (x[i],y[i]). These values are accumulated for each hexagonal bin and then reduced according to *reduce_C_function*, which defaults to numpy's mean function (np.mean). (If *C* is specified, it must also be a 1-D sequence of the same length as *x* and *y*.) *x*, *y* and/or *C* may be masked arrays, in which case only unmasked points will be plotted. Optional keyword arguments: *gridsize*: [ 100 | integer ] The number of hexagons in the *x*-direction, default is 100. The corresponding number of hexagons in the *y*-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the *x*-direction and the *y*-direction. *bins*: [ None | 'log' | integer | sequence ] If *None*, no binning is applied; the color of each hexagon directly corresponds to its count value. If 'log', use a logarithmic scale for the color map. Internally, :math:`log_{10}(i+1)` is used to determine the hexagon color. If an integer, divide the counts in the specified number of bins, and color the hexagons accordingly. If a sequence of values, the values of the lower bound of the bins to be used. *xscale*: [ 'linear' | 'log' ] Use a linear or log10 scale on the horizontal axis. *scale*: [ 'linear' | 'log' ] Use a linear or log10 scale on the vertical axis. Other keyword arguments controlling color mapping and normalization arguments: *cmap*: [ None | Colormap ] a :class:`matplotlib.cm.Colormap` instance. If *None*, defaults to rc ``image.cmap``. *norm*: [ None | Normalize ] :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. *vmin*/*vmax*: scalar *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. Note if you pass a norm instance, your settings for *vmin* and *vmax* will be ignored. *alpha*: scalar the alpha value for the patches *linewidths*: [ None | scalar ] If *None*, defaults to rc lines.linewidth. Note that this is a tuple, and if you set the linewidths argument you must set it as a sequence of floats, as required by :class:`~matplotlib.collections.RegularPolyCollection`. Other keyword arguments controlling the Collection properties: *edgecolors*: [ None | mpl color | color sequence ] If 'none', draws the edges in the same color as the fill color. This is the default, as it avoids unsightly unpainted pixels between the hexagons. If *None*, draws the outlines in the default color. If a matplotlib color arg or sequence of rgba tuples, draws the outlines in the specified color. Here are the standard descriptions of all the :class:`~matplotlib.collections.Collection` kwargs: %(Collection)s The return value is a :class:`~matplotlib.collections.PolyCollection` instance; use :meth:`~matplotlib.collection.PolyCollection.get_array` on this :class:`~matplotlib.collections.PolyCollection` to get the counts in each hexagon. **Example:** .. plot:: mpl_examples/pylab_examples/hexbin_demo.py """ if not self._hold: self.cla() self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) x, y, C = cbook.delete_masked_points(x, y, C) # Set the size of the hexagon grid if iterable(gridsize): nx, ny = gridsize else: nx = gridsize ny = int(nx/math.sqrt(3)) # Count the number of data in each hexagon x = np.array(x, float) y = np.array(y, float) if xscale=='log': x = np.log10(x) if yscale=='log': y = np.log10(y) xmin = np.amin(x) xmax = np.amax(x) ymin = np.amin(y) ymax = np.amax(y) # In the x-direction, the hexagons exactly cover the region from # xmin to xmax. Need some padding to avoid roundoff errors. padding = 1.e-9 * (xmax - xmin) xmin -= padding xmax += padding sx = (xmax-xmin) / nx sy = (ymax-ymin) / ny x = (x-xmin)/sx y = (y-ymin)/sy ix1 = np.round(x).astype(int) iy1 = np.round(y).astype(int) ix2 = np.floor(x).astype(int) iy2 = np.floor(y).astype(int) nx1 = nx + 1 ny1 = ny + 1 nx2 = nx ny2 = ny n = nx1*ny1+nx2*ny2 d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2 d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2 bdist = (d1<d2) if C is None: accum = np.zeros(n) # Create appropriate views into "accum" array. lattice1 = accum[:nx1*ny1] lattice2 = accum[nx1*ny1:] lattice1.shape = (nx1,ny1) lattice2.shape = (nx2,ny2) for i in xrange(len(x)): if bdist[i]: lattice1[ix1[i], iy1[i]]+=1 else: lattice2[ix2[i], iy2[i]]+=1 else: # create accumulation arrays lattice1 = np.empty((nx1,ny1),dtype=object) for i in xrange(nx1): for j in xrange(ny1): lattice1[i,j] = [] lattice2 = np.empty((nx2,ny2),dtype=object) for i in xrange(nx2): for j in xrange(ny2): lattice2[i,j] = [] for i in xrange(len(x)): if bdist[i]: lattice1[ix1[i], iy1[i]].append( C[i] ) else: lattice2[ix2[i], iy2[i]].append( C[i] ) for i in xrange(nx1): for j in xrange(ny1): vals = lattice1[i,j] if len(vals): lattice1[i,j] = reduce_C_function( vals ) else: lattice1[i,j] = np.nan for i in xrange(nx2): for j in xrange(ny2): vals = lattice2[i,j] if len(vals): lattice2[i,j] = reduce_C_function( vals ) else: lattice2[i,j] = np.nan accum = np.hstack(( lattice1.astype(float).ravel(), lattice2.astype(float).ravel())) good_idxs = ~np.isnan(accum) px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0]) py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0 polygons = np.zeros((6, n, 2), float) polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1) polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1) polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2) polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5 if C is not None: # remove accumulation bins with no data polygons = polygons[:,good_idxs,:] accum = accum[good_idxs] polygons = np.transpose(polygons, axes=[1,0,2]) polygons[:,:,0] *= sx polygons[:,:,1] *= sy polygons[:,:,0] += px polygons[:,:,1] += py if xscale=='log': polygons[:,:,0] = 10**(polygons[:,:,0]) xmin = 10**xmin xmax = 10**xmax self.set_xscale('log') if yscale=='log': polygons[:,:,1] = 10**(polygons[:,:,1]) ymin = 10**ymin ymax = 10**ymax self.set_yscale('log') if edgecolors=='none': edgecolors = 'face' collection = mcoll.PolyCollection( polygons, edgecolors = edgecolors, linewidths = linewidths, transOffset = self.transData, ) # Transform accum if needed if bins=='log': accum = np.log10(accum+1) elif bins!=None: if not iterable(bins): minimum, maximum = min(accum), max(accum) bins-=1 # one less edge than bins bins = minimum + (maximum-minimum)*np.arange(bins)/bins bins = np.sort(bins) accum = bins.searchsorted(accum) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_array(accum) collection.set_cmap(cmap) collection.set_norm(norm) collection.set_alpha(alpha) collection.update(kwargs) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() corners = ((xmin, ymin), (xmax, ymax)) self.update_datalim( corners) self.autoscale_view() # add the collection last self.add_collection(collection) return collection hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd def arrow(self, x, y, dx, dy, **kwargs): """ call signature:: arrow(x, y, dx, dy, **kwargs) Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*, *y* + *dy*). Optional kwargs control the arrow properties: %(FancyArrow)s **Example:** .. plot:: mpl_examples/pylab_examples/arrow_demo.py """ a = mpatches.FancyArrow(x, y, dx, dy, **kwargs) self.add_artist(a) return a arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd def quiverkey(self, *args, **kw): qk = mquiver.QuiverKey(*args, **kw) self.add_artist(qk) return qk quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc def quiver(self, *args, **kw): if not self._hold: self.cla() q = mquiver.Quiver(self, *args, **kw) self.add_collection(q, False) self.update_datalim(q.XY) self.autoscale_view() return q quiver.__doc__ = mquiver.Quiver.quiver_doc def barbs(self, *args, **kw): """ %(barbs_doc)s **Example:** .. plot:: mpl_examples/pylab_examples/barb_demo.py """ if not self._hold: self.cla() b = mquiver.Barbs(self, *args, **kw) self.add_collection(b) self.update_datalim(b.get_offsets()) self.autoscale_view() return b barbs.__doc__ = cbook.dedent(barbs.__doc__) % { 'barbs_doc': mquiver.Barbs.barbs_doc} def fill(self, *args, **kwargs): """ call signature:: fill(*args, **kwargs) Plot filled polygons. *args* is a variable length argument, allowing for multiple *x*, *y* pairs with an optional color format string; see :func:`~matplotlib.pyplot.plot` for details on the argument parsing. For example, to plot a polygon with vertices at *x*, *y* in blue.:: ax.fill(x,y, 'b' ) An arbitrary number of *x*, *y*, *color* groups can be specified:: ax.fill(x1, y1, 'g', x2, y2, 'r') Return value is a list of :class:`~matplotlib.patches.Patch` instances that were added. The same color strings that :func:`~matplotlib.pyplot.plot` supports are supported by the fill format string. If you would like to fill below a curve, eg. shade a region between 0 and *y* along *x*, use :meth:`fill_between` The *closed* kwarg will close the polygon when *True* (default). kwargs control the Polygon properties: %(Polygon)s **Example:** .. plot:: mpl_examples/pylab_examples/fill_demo.py """ if not self._hold: self.cla() patches = [] for poly in self._get_patches_for_fill(*args, **kwargs): self.add_patch( poly ) patches.append( poly ) self.autoscale_view() return patches fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd def fill_between(self, x, y1, y2=0, where=None, **kwargs): """ call signature:: fill_between(x, y1, y2=0, where=None, **kwargs) Create a :class:`~matplotlib.collections.PolyCollection` filling the regions between *y1* and *y2* where ``where==True`` *x* an N length np array of the x data *y1* an N length scalar or np array of the x data *y2* an N length scalar or np array of the x data *where* if None, default to fill between everywhere. If not None, it is a a N length numpy boolean array and the fill will only happen over the regions where ``where==True`` *kwargs* keyword args passed on to the :class:`PolyCollection` kwargs control the Polygon properties: %(PolyCollection)s .. plot:: mpl_examples/pylab_examples/fill_between.py """ # Handle united data, such as dates self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs) self._process_unit_info(ydata=y2) # Convert the arrays so we can work with them x = np.asarray(self.convert_xunits(x)) y1 = np.asarray(self.convert_yunits(y1)) y2 = np.asarray(self.convert_yunits(y2)) if not cbook.iterable(y1): y1 = np.ones_like(x)*y1 if not cbook.iterable(y2): y2 = np.ones_like(x)*y2 if where is None: where = np.ones(len(x), np.bool) where = np.asarray(where) assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where)) polys = [] for ind0, ind1 in mlab.contiguous_regions(where): theseverts = [] xslice = x[ind0:ind1] y1slice = y1[ind0:ind1] y2slice = y2[ind0:ind1] if not len(xslice): continue N = len(xslice) X = np.zeros((2*N+2, 2), np.float) # the purpose of the next two lines is for when y2 is a # scalar like 0 and we want the fill to go all the way # down to 0 even if none of the y1 sample points do X[0] = xslice[0], y2slice[0] X[N+1] = xslice[-1], y2slice[-1] X[1:N+1,0] = xslice X[1:N+1,1] = y1slice X[N+2:,0] = xslice[::-1] X[N+2:,1] = y2slice[::-1] polys.append(X) collection = mcoll.PolyCollection(polys, **kwargs) # now update the datalim and autoscale XY1 = np.array([x[where], y1[where]]).T XY2 = np.array([x[where], y2[where]]).T self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits, updatex=True, updatey=True) self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits, updatex=False, updatey=True) self.add_collection(collection) self.autoscale_view() return collection fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd #### plotting z(x,y): imshow, pcolor and relatives, contour def imshow(self, X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=1.0, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, **kwargs): """ call signature:: imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=1.0, vmin=None, vmax=None, origin=None, extent=None, **kwargs) Display the image in *X* to current axes. *X* may be a float array, a uint8 array or a PIL image. If *X* is an array, *X* can have the following shapes: * MxN -- luminance (grayscale, float array only) * MxNx3 -- RGB (float or uint8 array) * MxNx4 -- RGBA (float or uint8 array) The value for each component of MxNx3 and MxNx4 float arrays should be in the range 0.0 to 1.0; MxN float arrays may be normalised. An :class:`matplotlib.image.AxesImage` instance is returned. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet. If *None*, default to rc ``image.cmap`` value. *cmap* is ignored when *X* has RGB(A) information *aspect*: [ None | 'auto' | 'equal' | scalar ] If 'auto', changes the image aspect ratio to match that of the axes If 'equal', and *extent* is *None*, changes the axes aspect ratio to match that of the image. If *extent* is not *None*, the axes aspect ratio is changed to match that of the extent. If *None*, default to rc ``image.aspect`` value. *interpolation*: Acceptable values are *None*, 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', If *interpolation* is *None*, default to rc ``image.interpolation``. See also the *filternorm* and *filterrad* parameters *norm*: [ None | Normalize ] An :class:`matplotlib.colors.Normalize` instance; if *None*, default is ``normalization()``. This scales luminance -> 0-1 *norm* is only used for an MxN float array. *vmin*/*vmax*: [ None | scalar ] Used to scale a luminance image to 0-1. If either is *None*, the min and max of the luminance values will be used. Note if *norm* is not *None*, the settings for *vmin* and *vmax* will be ignored. *alpha*: scalar The alpha blending value, between 0 (transparent) and 1 (opaque) *origin*: [ None | 'upper' | 'lower' ] Place the [0,0] index of the array in the upper left or lower left corner of the axes. If *None*, default to rc ``image.origin``. *extent*: [ None | scalars (left, right, bottom, top) ] Eata values of the axes. The default assigns zero-based row, column indices to the *x*, *y* centers of the pixels. *shape*: [ None | scalars (columns, rows) ] For raw buffer images *filternorm*: A parameter for the antigrain image resize filter. From the antigrain documentation, if *filternorm* = 1, the filter normalizes integer values and corrects the rounding errors. It doesn't do anything with the source floating point values, it corrects only integers according to the rule of 1.0 which means that any sum of pixel weights must be equal to 1.0. So, the filter function must produce a graph of the proper shape. *filterrad*: The filter radius for filters that have a radius parameter, i.e. when interpolation is one of: 'sinc', 'lanczos' or 'blackman' Additional kwargs are :class:`~matplotlib.artist.Artist` properties: %(Artist)s **Example:** .. plot:: mpl_examples/pylab_examples/image_demo.py """ if not self._hold: self.cla() if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) if aspect is None: aspect = rcParams['image.aspect'] self.set_aspect(aspect) im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent, filternorm=filternorm, filterrad=filterrad, resample=resample, **kwargs) im.set_data(X) im.set_alpha(alpha) self._set_artist_props(im) im.set_clip_path(self.patch) #if norm is None and shape is None: # im.set_clim(vmin, vmax) if vmin is not None or vmax is not None: im.set_clim(vmin, vmax) else: im.autoscale_None() im.set_url(url) xmin, xmax, ymin, ymax = im.get_extent() corners = (xmin, ymin), (xmax, ymax) self.update_datalim(corners) if self._autoscaleon: self.set_xlim((xmin, xmax)) self.set_ylim((ymin, ymax)) self.images.append(im) return im imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd def _pcolorargs(self, funcname, *args): if len(args)==1: C = args[0] numRows, numCols = C.shape X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) ) elif len(args)==3: X, Y, C = args else: raise TypeError( 'Illegal arguments to %s; see help(%s)' % (funcname, funcname)) Nx = X.shape[-1] Ny = Y.shape[0] if len(X.shape) <> 2 or X.shape[0] == 1: x = X.reshape(1,Nx) X = x.repeat(Ny, axis=0) if len(Y.shape) <> 2 or Y.shape[1] == 1: y = Y.reshape(Ny, 1) Y = y.repeat(Nx, axis=1) if X.shape != Y.shape: raise TypeError( 'Incompatible X, Y inputs to %s; see help(%s)' % ( funcname, funcname)) return X, Y, C def pcolor(self, *args, **kwargs): """ call signatures:: pcolor(C, **kwargs) pcolor(X, Y, C, **kwargs) Create a pseudocolor plot of a 2-D array. *C* is the array of color values. *X* and *Y*, if given, specify the (*x*, *y*) coordinates of the colored quadrilaterals; the quadrilateral for C[i,j] has corners at:: (X[i, j], Y[i, j]), (X[i, j+1], Y[i, j+1]), (X[i+1, j], Y[i+1, j]), (X[i+1, j+1], Y[i+1, j+1]). Ideally the dimensions of *X* and *Y* should be one greater than those of *C*; if the dimensions are the same, then the last row and column of *C* will be ignored. Note that the the column index corresponds to the *x*-coordinate, and the row index corresponds to *y*; for details, see the :ref:`Grid Orientation <axes-pcolor-grid-orientation>` section below. If either or both of *X* and *Y* are 1-D arrays or column vectors, they will be expanded as needed into the appropriate 2-D arrays, making a rectangular grid. *X*, *Y* and *C* may be masked arrays. If either C[i, j], or one of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j], [i, j+1],[i+1, j+1]) is masked, nothing is plotted. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance. If *None*, use rc settings. norm: [ None | Normalize ] An :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. If *None*, defaults to :func:`normalize`. *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a *norm* instance, *vmin* and *vmax* will be ignored. *shading*: [ 'flat' | 'faceted' ] If 'faceted', a black grid is drawn around each rectangle; if 'flat', edges are not drawn. Default is 'flat', contrary to Matlab(TM). This kwarg is deprecated; please use 'edgecolors' instead: * shading='flat' -- edgecolors='None' * shading='faceted -- edgecolors='k' *edgecolors*: [ None | 'None' | color | color sequence] If *None*, the rc setting is used by default. If 'None', edges will not be visible. An mpl color or sequence of colors will set the edge color *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is a :class:`matplotlib.collection.Collection` instance. .. _axes-pcolor-grid-orientation: The grid orientation follows the Matlab(TM) convention: an array *C* with shape (*nrows*, *ncolumns*) is plotted with the column number as *X* and the row number as *Y*, increasing up; hence it is plotted the way the array would be printed, except that the *Y* axis is reversed. That is, *C* is taken as *C*(*y*, *x*). Similarly for :func:`~matplotlib.pyplot.meshgrid`:: x = np.arange(5) y = np.arange(3) X, Y = meshgrid(x,y) is equivalent to: X = array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Y = array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2]]) so if you have:: C = rand( len(x), len(y)) then you need:: pcolor(X, Y, C.T) or:: pcolor(C.T) Matlab :func:`pcolor` always discards the last row and column of *C*, but matplotlib displays the last row and column if *X* and *Y* are not specified, or if *X* and *Y* have one more row and column than *C*. kwargs can be used to control the :class:`~matplotlib.collection.PolyCollection` properties: %(PolyCollection)s """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) shading = kwargs.pop('shading', 'flat') X, Y, C = self._pcolorargs('pcolor', *args) Ny, Nx = X.shape # convert to MA, if necessary. C = ma.asarray(C) X = ma.asarray(X) Y = ma.asarray(Y) mask = ma.getmaskarray(X)+ma.getmaskarray(Y) xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1] # don't plot if C or any of the surrounding vertices are masked. mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask newaxis = np.newaxis compress = np.compress ravelmask = (mask==0).ravel() X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel()) Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel()) X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel()) Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel()) X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel()) Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel()) X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel()) Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel()) npoly = len(X1) xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis], X2[:,newaxis], Y2[:,newaxis], X3[:,newaxis], Y3[:,newaxis], X4[:,newaxis], Y4[:,newaxis], X1[:,newaxis], Y1[:,newaxis]), axis=1) verts = xy.reshape((npoly, 5, 2)) #verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4)) C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel()) if shading == 'faceted': edgecolors = (0,0,0,1), linewidths = (0.25,) else: edgecolors = 'face' linewidths = (1.0,) kwargs.setdefault('edgecolors', edgecolors) kwargs.setdefault('antialiaseds', (0,)) kwargs.setdefault('linewidths', linewidths) collection = mcoll.PolyCollection(verts, **kwargs) collection.set_alpha(alpha) collection.set_array(C) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() self.grid(False) x = X.compressed() y = Y.compressed() minx = np.amin(x) maxx = np.amax(x) miny = np.amin(y) maxy = np.amax(y) corners = (minx, miny), (maxx, maxy) self.update_datalim( corners) self.autoscale_view() self.add_collection(collection) return collection pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd def pcolormesh(self, *args, **kwargs): """ call signatures:: pcolormesh(C) pcolormesh(X, Y, C) pcolormesh(C, **kwargs) *C* may be a masked array, but *X* and *Y* may not. Masked array support is implemented via *cmap* and *norm*; in contrast, :func:`~matplotlib.pyplot.pcolor` simply does not draw quadrilaterals with masked colors or vertices. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance. If None, use rc settings. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. If None, defaults to :func:`normalize`. *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a *norm* instance, *vmin* and *vmax* will be ignored. *shading*: [ 'flat' | 'faceted' ] If 'faceted', a black grid is drawn around each rectangle; if 'flat', edges are not drawn. Default is 'flat', contrary to Matlab(TM). This kwarg is deprecated; please use 'edgecolors' instead: * shading='flat' -- edgecolors='None' * shading='faceted -- edgecolors='k' *edgecolors*: [ None | 'None' | color | color sequence] If None, the rc setting is used by default. If 'None', edges will not be visible. An mpl color or sequence of colors will set the edge color *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is a :class:`matplotlib.collection.QuadMesh` object. kwargs can be used to control the :class:`matplotlib.collections.QuadMesh` properties: %(QuadMesh)s .. seealso:: :func:`~matplotlib.pyplot.pcolor`: For an explanation of the grid orientation and the expansion of 1-D *X* and/or *Y* to 2-D arrays. """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) shading = kwargs.pop('shading', 'flat') edgecolors = kwargs.pop('edgecolors', 'None') antialiased = kwargs.pop('antialiased', False) X, Y, C = self._pcolorargs('pcolormesh', *args) Ny, Nx = X.shape # convert to one dimensional arrays C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at # lower left corner X = X.ravel() Y = Y.ravel() coords = np.zeros(((Nx * Ny), 2), dtype=float) coords[:, 0] = X coords[:, 1] = Y if shading == 'faceted' or edgecolors != 'None': showedges = 1 else: showedges = 0 collection = mcoll.QuadMesh( Nx - 1, Ny - 1, coords, showedges, antialiased=antialiased) # kwargs are not used collection.set_alpha(alpha) collection.set_array(C) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() self.grid(False) minx = np.amin(X) maxx = np.amax(X) miny = np.amin(Y) maxy = np.amax(Y) corners = (minx, miny), (maxx, maxy) self.update_datalim( corners) self.autoscale_view() self.add_collection(collection) return collection pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd def pcolorfast(self, *args, **kwargs): """ pseudocolor plot of a 2-D array Experimental; this is a version of pcolor that does not draw lines, that provides the fastest possible rendering with the Agg backend, and that can handle any quadrilateral grid. Call signatures:: pcolor(C, **kwargs) pcolor(xr, yr, C, **kwargs) pcolor(x, y, C, **kwargs) pcolor(X, Y, C, **kwargs) C is the 2D array of color values corresponding to quadrilateral cells. Let (nr, nc) be its shape. C may be a masked array. ``pcolor(C, **kwargs)`` is equivalent to ``pcolor([0,nc], [0,nr], C, **kwargs)`` *xr*, *yr* specify the ranges of *x* and *y* corresponding to the rectangular region bounding *C*. If:: xr = [x0, x1] and:: yr = [y0,y1] then *x* goes from *x0* to *x1* as the second index of *C* goes from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of cell (0,0), and (*x1*, *y1*) is the outermost corner of cell (*nr*-1, *nc*-1). All cells are rectangles of the same size. This is the fastest version. *x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively, giving the x and y boundaries of the cells. Hence the cells are rectangular but the grid may be nonuniform. The speed is intermediate. (The grid is checked, and if found to be uniform the fast version is used.) *X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify the (x,y) coordinates of the corners of the colored quadrilaterals; the quadrilateral for C[i,j] has corners at (X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]), (X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular. This is the most general, but the slowest to render. It may produce faster and more compact output using ps, pdf, and svg backends, however. Note that the the column index corresponds to the x-coordinate, and the row index corresponds to y; for details, see the "Grid Orientation" section below. Optional keyword arguments: *cmap*: [ None | Colormap ] A cm Colormap instance from cm. If None, use rc settings. *norm*: [ None | Normalize ] An mcolors.Normalize instance is used to scale luminance data to 0,1. If None, defaults to normalize() *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with norm to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a norm instance, *vmin* and *vmax* will be *None*. *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is an image if a regular or rectangular grid is specified, and a QuadMesh collection in the general quadrilateral case. """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) C = args[-1] nr, nc = C.shape if len(args) == 1: style = "image" x = [0, nc] y = [0, nr] elif len(args) == 3: x, y = args[:2] x = np.asarray(x) y = np.asarray(y) if x.ndim == 1 and y.ndim == 1: if x.size == 2 and y.size == 2: style = "image" else: dx = np.diff(x) dy = np.diff(y) if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and np.ptp(dy) < 0.01*np.abs(dy.mean())): style = "image" else: style = "pcolorimage" elif x.ndim == 2 and y.ndim == 2: style = "quadmesh" else: raise TypeError("arguments do not match valid signatures") else: raise TypeError("need 1 argument or 3 arguments") if style == "quadmesh": # convert to one dimensional arrays # This should also be moved to the QuadMesh class C = ma.ravel(C) # data point in each cell is value # at lower left corner X = x.ravel() Y = y.ravel() Nx = nc+1 Ny = nr+1 # The following needs to be cleaned up; the renderer # requires separate contiguous arrays for X and Y, # but the QuadMesh class requires the 2D array. coords = np.empty(((Nx * Ny), 2), np.float64) coords[:, 0] = X coords[:, 1] = Y # The QuadMesh class can also be changed to # handle relevant superclass kwargs; the initializer # should do much more than it does now. collection = mcoll.QuadMesh(nc, nr, coords, 0) collection.set_alpha(alpha) collection.set_array(C) collection.set_cmap(cmap) collection.set_norm(norm) self.add_collection(collection) xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max() ret = collection else: # One of the image styles: xl, xr, yb, yt = x[0], x[-1], y[0], y[-1] if style == "image": im = mimage.AxesImage(self, cmap, norm, interpolation='nearest', origin='lower', extent=(xl, xr, yb, yt), **kwargs) im.set_data(C) im.set_alpha(alpha) self.images.append(im) ret = im if style == "pcolorimage": im = mimage.PcolorImage(self, x, y, C, cmap=cmap, norm=norm, alpha=alpha, **kwargs) self.images.append(im) ret = im self._set_artist_props(ret) if vmin is not None or vmax is not None: ret.set_clim(vmin, vmax) else: ret.autoscale_None() self.update_datalim(np.array([[xl, yb], [xr, yt]])) self.autoscale_view(tight=True) return ret def contour(self, *args, **kwargs): if not self._hold: self.cla() kwargs['filled'] = False return mcontour.ContourSet(self, *args, **kwargs) contour.__doc__ = mcontour.ContourSet.contour_doc def contourf(self, *args, **kwargs): if not self._hold: self.cla() kwargs['filled'] = True return mcontour.ContourSet(self, *args, **kwargs) contourf.__doc__ = mcontour.ContourSet.contour_doc def clabel(self, CS, *args, **kwargs): return CS.clabel(*args, **kwargs) clabel.__doc__ = mcontour.ContourSet.clabel.__doc__ def table(self, **kwargs): """ call signature:: table(cellText=None, cellColours=None, cellLoc='right', colWidths=None, rowLabels=None, rowColours=None, rowLoc='left', colLabels=None, colColours=None, colLoc='center', loc='bottom', bbox=None): Add a table to the current axes. Returns a :class:`matplotlib.table.Table` instance. For finer grained control over tables, use the :class:`~matplotlib.table.Table` class and add it to the axes with :meth:`~matplotlib.axes.Axes.add_table`. Thanks to John Gill for providing the class and table. kwargs control the :class:`~matplotlib.table.Table` properties: %(Table)s """ return mtable.table(self, **kwargs) table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd def twinx(self): """ call signature:: ax = twinx() create a twin of Axes for generating a plot with a sharex x-axis but independent y axis. The y-axis of self will have ticks on left and the returned axes will have ticks on the right """ ax2 = self.figure.add_axes(self.get_position(True), sharex=self, frameon=False) ax2.yaxis.tick_right() ax2.yaxis.set_label_position('right') self.yaxis.tick_left() return ax2 def twiny(self): """ call signature:: ax = twiny() create a twin of Axes for generating a plot with a shared y-axis but independent x axis. The x-axis of self will have ticks on bottom and the returned axes will have ticks on the top """ ax2 = self.figure.add_axes(self.get_position(True), sharey=self, frameon=False) ax2.xaxis.tick_top() ax2.xaxis.set_label_position('top') self.xaxis.tick_bottom() return ax2 def get_shared_x_axes(self): 'Return a copy of the shared axes Grouper object for x axes' return self._shared_x_axes def get_shared_y_axes(self): 'Return a copy of the shared axes Grouper object for y axes' return self._shared_y_axes #### Data analysis def hist(self, x, bins=10, range=None, normed=False, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, **kwargs): """ call signature:: hist(x, bins=10, range=None, normed=False, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, **kwargs) Compute and draw the histogram of *x*. The return value is a tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*, [*patches0*, *patches1*,...]) if the input contains multiple data. Keyword arguments: *bins*: Either an integer number of bins or a sequence giving the bins. *x* are the data to be binned. *x* can be an array, a 2D array with multiple data in its columns, or a list of arrays with data of different length. Note, if *bins* is an integer input argument=numbins, *bins* + 1 bin edges will be returned, compatible with the semantics of :func:`numpy.histogram` with the *new* = True argument. Unequally spaced bins are supported if *bins* is a sequence. *range*: The lower and upper range of the bins. Lower and upper outliers are ignored. If not provided, *range* is (x.min(), x.max()). Range has no effect if *bins* is a sequence. If *bins* is a sequence or *range* is specified, autoscaling is set off (*autoscale_on* is set to *False*) and the xaxis limits are set to encompass the full specified bin range. *normed*: If *True*, the first element of the return tuple will be the counts normalized to form a probability density, i.e., ``n/(len(x)*dbin)``. In a probability density, the integral of the histogram should be 1; you can verify that with a trapezoidal integration of the probability density function:: pdf, bins, patches = ax.hist(...) print np.sum(pdf * np.diff(bins)) *cumulative*: If *True*, then a histogram is computed where each bin gives the counts in that bin plus all bins for smaller values. The last bin gives the total number of datapoints. If *normed* is also *True* then the histogram is normalized such that the last bin equals 1. If *cumulative* evaluates to less than 0 (e.g. -1), the direction of accumulation is reversed. In this case, if *normed* is also *True*, then the histogram is normalized such that the first bin equals 1. *histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ] The type of histogram to draw. - 'bar' is a traditional bar-type histogram. If multiple data are given the bars are aranged side by side. - 'barstacked' is a bar-type histogram where multiple data are stacked on top of each other. - 'step' generates a lineplot that is by default unfilled. - 'stepfilled' generates a lineplot that is by default filled. *align*: ['left' | 'mid' | 'right' ] Controls how the histogram is plotted. - 'left': bars are centered on the left bin edges. - 'mid': bars are centered between the bin edges. - 'right': bars are centered on the right bin edges. *orientation*: [ 'horizontal' | 'vertical' ] If 'horizontal', :func:`~matplotlib.pyplot.barh` will be used for bar-type histograms and the *bottom* kwarg will be the left edges. *rwidth*: The relative width of the bars as a fraction of the bin width. If *None*, automatically compute the width. Ignored if *histtype* = 'step' or 'stepfilled'. *log*: If *True*, the histogram axis will be set to a log scale. If *log* is *True* and *x* is a 1D array, empty bins will be filtered out and only the non-empty (*n*, *bins*, *patches*) will be returned. kwargs are used to update the properties of the hist :class:`~matplotlib.patches.Rectangle` instances: %(Rectangle)s You can use labels for your histogram, and only the first :class:`~matplotlib.patches.Rectangle` gets the label (the others get the magic string '_nolegend_'. This will make the histograms work in the intuitive way for bar charts:: ax.hist(10+2*np.random.randn(1000), label='men') ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5) ax.legend() **Example:** .. plot:: mpl_examples/pylab_examples/histogram_demo.py """ if not self._hold: self.cla() # NOTE: the range keyword overwrites the built-in func range !!! # needs to be fixed in with numpy !!! if kwargs.get('width') is not None: raise DeprecationWarning( 'hist now uses the rwidth to give relative width ' 'and not absolute width') try: # make sure a copy is created: don't use asarray x = np.transpose(np.array(x)) if len(x.shape)==1: x.shape = (1,x.shape[0]) elif len(x.shape)==2 and x.shape[1]<x.shape[0]: warnings.warn('2D hist should be nsamples x nvariables; ' 'this looks transposed') except ValueError: # multiple hist with data of different length if iterable(x[0]) and not is_string_like(x[0]): tx = [] for i in xrange(len(x)): tx.append( np.array(x[i]) ) x = tx else: raise ValueError, 'Can not use providet data to create a histogram' # Check whether bins or range are given explicitly. In that # case do not autoscale axes. binsgiven = (cbook.iterable(bins) or range != None) # check the version of the numpy if np.__version__ < "1.3": # version 1.1 and 1.2 hist_kwargs = dict(range=range, normed=bool(normed), new=True) else: # version 1.3 and later, drop new=True hist_kwargs = dict(range=range, normed=bool(normed)) n = [] for i in xrange(len(x)): # this will automatically overwrite bins, # so that each histogram uses the same bins m, bins = np.histogram(x[i], bins, **hist_kwargs) n.append(m) if cumulative: slc = slice(None) if cbook.is_numlike(cumulative) and cumulative < 0: slc = slice(None,None,-1) if normed: n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n] else: n = [m[slc].cumsum()[slc] for m in n] patches = [] if histtype.startswith('bar'): totwidth = np.diff(bins) stacked = False if rwidth is not None: dr = min(1., max(0., rwidth)) elif len(n)>1: dr = 0.8 else: dr = 1.0 if histtype=='bar': width = dr*totwidth/len(n) dw = width if len(n)>1: boffset = -0.5*dr*totwidth*(1.-1./len(n)) else: boffset = 0.0 elif histtype=='barstacked': width = dr*totwidth boffset, dw = 0.0, 0.0 stacked = True else: raise ValueError, 'invalid histtype: %s' % histtype if align == 'mid' or align == 'edge': boffset += 0.5*totwidth elif align == 'right': boffset += totwidth elif align != 'left' and align != 'center': raise ValueError, 'invalid align: %s' % align if orientation == 'horizontal': for m in n: color = self._get_lines._get_next_cycle_color() patch = self.barh(bins[:-1]+boffset, m, height=width, left=bottom, align='center', log=log, color=color) patches.append(patch) if stacked: if bottom is None: bottom = 0.0 bottom += m boffset += dw elif orientation == 'vertical': for m in n: color = self._get_lines._get_next_cycle_color() patch = self.bar(bins[:-1]+boffset, m, width=width, bottom=bottom, align='center', log=log, color=color) patches.append(patch) if stacked: if bottom is None: bottom = 0.0 bottom += m boffset += dw else: raise ValueError, 'invalid orientation: %s' % orientation elif histtype.startswith('step'): x = np.zeros( 2*len(bins), np.float ) y = np.zeros( 2*len(bins), np.float ) x[0::2], x[1::2] = bins, bins if align == 'left' or align == 'center': x -= 0.5*(bins[1]-bins[0]) elif align == 'right': x += 0.5*(bins[1]-bins[0]) elif align != 'mid' and align != 'edge': raise ValueError, 'invalid align: %s' % align if log: y[0],y[-1] = 1e-100, 1e-100 if orientation == 'horizontal': self.set_xscale('log') elif orientation == 'vertical': self.set_yscale('log') fill = False if histtype == 'stepfilled': fill = True elif histtype != 'step': raise ValueError, 'invalid histtype: %s' % histtype for m in n: y[1:-1:2], y[2::2] = m, m if orientation == 'horizontal': x,y = y,x elif orientation != 'vertical': raise ValueError, 'invalid orientation: %s' % orientation color = self._get_lines._get_next_cycle_color() if fill: patches.append( self.fill(x, y, closed=False, facecolor=color) ) else: patches.append( self.fill(x, y, closed=False, edgecolor=color, fill=False) ) # adopted from adjust_x/ylim part of the bar method if orientation == 'horizontal': xmin, xmax = 0, self.dataLim.intervalx[1] for m in n: xmin = np.amin(m[m!=0]) # filter out the 0 height bins xmin = max(xmin*0.9, 1e-100) self.dataLim.intervalx = (xmin, xmax) elif orientation == 'vertical': ymin, ymax = 0, self.dataLim.intervaly[1] for m in n: ymin = np.amin(m[m!=0]) # filter out the 0 height bins ymin = max(ymin*0.9, 1e-100) self.dataLim.intervaly = (ymin, ymax) self.autoscale_view() else: raise ValueError, 'invalid histtype: %s' % histtype label = kwargs.pop('label', '') for patch in patches: for p in patch: p.update(kwargs) p.set_label(label) label = '_nolegend_' if binsgiven: self.set_autoscale_on(False) if orientation == 'vertical': self.autoscale_view(scalex=False, scaley=True) XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1]) self.set_xbound(XL) else: self.autoscale_view(scalex=True, scaley=False) YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1]) self.set_ybound(YL) if len(n)==1: return n[0], bins, cbook.silent_list('Patch', patches[0]) else: return n, bins, cbook.silent_list('Lists of Patches', patches) hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) The power spectral density by Welch's average periodogram method. The vector *x* is divided into *NFFT* length segments. Each segment is detrended by function *detrend* and windowed by function *window*. *noverlap* gives the length of the overlap between segments. The :math:`|\mathrm{fft}(i)|^2` of each segment :math:`i` are averaged to compute *Pxx*, with a scaling to correct for power loss due to windowing. *Fs* is the sampling frequency. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. Returns the tuple (*Pxx*, *freqs*). For plotting, the power is plotted as :math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself is returned. References: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/psd_demo.py """ if not self._hold: self.cla() pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) pxx.shape = len(freqs), freqs += Fc if scale_by_freq in (None, True): psd_units = 'dB/Hz' else: psd_units = 'dB' self.plot(freqs, 10*np.log10(pxx), **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Power Spectral Density (%s)' % psd_units) self.grid(True) vmin, vmax = self.viewLim.intervaly intv = vmax-vmin logi = int(np.log10(intv)) if logi==0: logi=.1 step = 10*logi #print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1 ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step) self.set_yticks(ticks) return pxx, freqs psd_doc_dict = dict() psd_doc_dict.update(martist.kwdocd) psd_doc_dict.update(mlab.kwdocd) psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD']) psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) The cross spectral density :math:`P_{xy}` by Welch's average periodogram method. The vectors *x* and *y* are divided into *NFFT* length segments. Each segment is detrended by function *detrend* and windowed by function *window*. The product of the direct FFTs of *x* and *y* are averaged over each segment to compute :math:`P_{xy}`, with a scaling to correct for power loss due to windowing. Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum (complex valued), and :math:`10\log_{10}|P_{xy}|` is plotted. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. References: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the Line2D properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/csd_demo.py .. seealso: :meth:`psd` For a description of the optional parameters. """ if not self._hold: self.cla() pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) pxy.shape = len(freqs), # pxy is complex freqs += Fc self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Cross Spectrum Magnitude (dB)') self.grid(True) vmin, vmax = self.viewLim.intervaly intv = vmax-vmin step = 10*int(np.log10(intv)) ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step) self.set_yticks(ticks) return pxy, freqs csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none, window = mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) cohere the coherence between *x* and *y*. Coherence is the normalized cross spectral density: .. math:: C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}} %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. The return value is a tuple (*Cxy*, *f*), where *f* are the frequencies of the coherence vector. kwargs are applied to the lines. References: * Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the :class:`~matplotlib.lines.Line2D` properties of the coherence plot: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/cohere_demo.py """ if not self._hold: self.cla() cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap, scale_by_freq) freqs += Fc self.plot(freqs, cxy, **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Coherence') self.grid(True) return cxy, freqs cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None, pad_to=None, sides='default', scale_by_freq=None): """ call signature:: specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None, pad_to=None, sides='default', scale_by_freq=None) Compute a spectrogram of data in *x*. Data are split into *NFFT* length segments and the PSD of each section is computed. The windowing function *window* is applied to each segment, and the amount of overlap of each segment is specified with *noverlap*. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the y extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. *cmap*: A :class:`matplotlib.cm.Colormap` instance; if *None* use default determined by rc *xextent*: The image extent along the x-axis. xextent = (xmin,xmax) The default is (0,max(bins)), where bins is the return value from :func:`mlab.specgram` Return value is (*Pxx*, *freqs*, *bins*, *im*): - *bins* are the time points the spectrogram is calculated over - *freqs* is an array of frequencies - *Pxx* is a len(times) x len(freqs) array of power - *im* is a :class:`matplotlib.image.AxesImage` instance Note: If *x* is real (i.e. non-complex), only the positive spectrum is shown. If *x* is complex, both positive and negative parts of the spectrum are shown. This can be overridden using the *sides* keyword argument. **Example:** .. plot:: mpl_examples/pylab_examples/specgram_demo.py """ if not self._hold: self.cla() Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Z = 10. * np.log10(Pxx) Z = np.flipud(Z) if xextent is None: xextent = 0, np.amax(bins) xmin, xmax = xextent freqs += Fc extent = xmin, xmax, freqs[0], freqs[-1] im = self.imshow(Z, cmap, extent=extent) self.axis('auto') return Pxx, freqs, bins, im specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict del psd_doc_dict #So that this does not become an Axes attribute def spy(self, Z, precision=0, marker=None, markersize=None, aspect='equal', **kwargs): """ call signature:: spy(Z, precision=0, marker=None, markersize=None, aspect='equal', **kwargs) ``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*. If *precision* is 0, any non-zero value will be plotted; else, values of :math:`|Z| > precision` will be plotted. For :class:`scipy.sparse.spmatrix` instances, there is a special case: if *precision* is 'present', any value present in the array will be plotted, even if it is identically zero. The array will be plotted as it would be printed, with the first index (row) increasing down and the second index (column) increasing to the right. By default aspect is 'equal', so that each array element occupies a square space; set the aspect kwarg to 'auto' to allow the plot to fill the plot box, or to any scalar number to specify the aspect ratio of an array element directly. Two plotting styles are available: image or marker. Both are available for full arrays, but only the marker style works for :class:`scipy.sparse.spmatrix` instances. If *marker* and *markersize* are *None*, an image will be returned and any remaining kwargs are passed to :func:`~matplotlib.pyplot.imshow`; else, a :class:`~matplotlib.lines.Line2D` object will be returned with the value of marker determining the marker type, and any remaining kwargs passed to the :meth:`~matplotlib.axes.Axes.plot` method. If *marker* and *markersize* are *None*, useful kwargs include: * *cmap* * *alpha* .. seealso:: :func:`~matplotlib.pyplot.imshow` For controlling colors, e.g. cyan background and red marks, use:: cmap = mcolors.ListedColormap(['c','r']) If *marker* or *markersize* is not *None*, useful kwargs include: * *marker* * *markersize* * *color* Useful values for *marker* include: * 's' square (default) * 'o' circle * '.' point * ',' pixel .. seealso:: :func:`~matplotlib.pyplot.plot` """ if precision is None: precision = 0 warnings.DeprecationWarning("Use precision=0 instead of None") # 2008/10/03 if marker is None and markersize is None and hasattr(Z, 'tocoo'): marker = 's' if marker is None and markersize is None: Z = np.asarray(Z) mask = np.absolute(Z)>precision if 'cmap' not in kwargs: kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'], name='binary') nr, nc = Z.shape extent = [-0.5, nc-0.5, nr-0.5, -0.5] ret = self.imshow(mask, interpolation='nearest', aspect=aspect, extent=extent, origin='upper', **kwargs) else: if hasattr(Z, 'tocoo'): c = Z.tocoo() if precision == 'present': y = c.row x = c.col else: nonzero = np.absolute(c.data) > precision y = c.row[nonzero] x = c.col[nonzero] else: Z = np.asarray(Z) nonzero = np.absolute(Z)>precision y, x = np.nonzero(nonzero) if marker is None: marker = 's' if markersize is None: markersize = 10 marks = mlines.Line2D(x, y, linestyle='None', marker=marker, markersize=markersize, **kwargs) self.add_line(marks) nr, nc = Z.shape self.set_xlim(xmin=-0.5, xmax=nc-0.5) self.set_ylim(ymin=nr-0.5, ymax=-0.5) self.set_aspect(aspect) ret = marks self.title.set_y(1.05) self.xaxis.tick_top() self.xaxis.set_ticks_position('both') self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) return ret def matshow(self, Z, **kwargs): ''' Plot a matrix or array as an image. The matrix will be shown the way it would be printed, with the first row at the top. Row and column numbering is zero-based. Argument: *Z* anything that can be interpreted as a 2-D array kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`. :meth:`matshow` sets defaults for *extent*, *origin*, *interpolation*, and *aspect*; use care in overriding the *extent* and *origin* kwargs, because they interact. (Also, if you want to change them, you probably should be using imshow directly in your own version of matshow.) Returns: an :class:`matplotlib.image.AxesImage` instance. ''' Z = np.asarray(Z) nr, nc = Z.shape extent = [-0.5, nc-0.5, nr-0.5, -0.5] kw = {'extent': extent, 'origin': 'upper', 'interpolation': 'nearest', 'aspect': 'equal'} # (already the imshow default) kw.update(kwargs) im = self.imshow(Z, **kw) self.title.set_y(1.05) self.xaxis.tick_top() self.xaxis.set_ticks_position('both') self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) return im class SubplotBase: """ Base class for subplots, which are :class:`Axes` instances with additional methods to facilitate generating and manipulating a set of :class:`Axes` within a figure. """ def __init__(self, fig, *args, **kwargs): """ *fig* is a :class:`matplotlib.figure.Figure` instance. *args* is the tuple (*numRows*, *numCols*, *plotNum*), where the array of subplots in the figure has dimensions *numRows*, *numCols*, and where *plotNum* is the number of the subplot being created. *plotNum* starts at 1 in the upper left corner and increases to the right. If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*. """ self.figure = fig if len(args)==1: s = str(args[0]) if len(s) != 3: raise ValueError('Argument to subplot must be a 3 digits long') rows, cols, num = map(int, s) elif len(args)==3: rows, cols, num = args else: raise ValueError( 'Illegal argument to subplot') total = rows*cols num -= 1 # convert from matlab to python indexing # ie num in range(0,total) if num >= total: raise ValueError( 'Subplot number exceeds total subplots') self._rows = rows self._cols = cols self._num = num self.update_params() # _axes_class is set in the subplot_class_factory self._axes_class.__init__(self, fig, self.figbox, **kwargs) def get_geometry(self): 'get the subplot geometry, eg 2,2,3' return self._rows, self._cols, self._num+1 # COVERAGE NOTE: Never used internally or from examples def change_geometry(self, numrows, numcols, num): 'change subplot geometry, eg. from 1,1,1 to 2,2,3' self._rows = numrows self._cols = numcols self._num = num-1 self.update_params() self.set_position(self.figbox) def update_params(self): 'update the subplot position from fig.subplotpars' rows = self._rows cols = self._cols num = self._num pars = self.figure.subplotpars left = pars.left right = pars.right bottom = pars.bottom top = pars.top wspace = pars.wspace hspace = pars.hspace totWidth = right-left totHeight = top-bottom figH = totHeight/(rows + hspace*(rows-1)) sepH = hspace*figH figW = totWidth/(cols + wspace*(cols-1)) sepW = wspace*figW rowNum, colNum = divmod(num, cols) figBottom = top - (rowNum+1)*figH - rowNum*sepH figLeft = left + colNum*(figW + sepW) self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom, figW, figH) self.rowNum = rowNum self.colNum = colNum self.numRows = rows self.numCols = cols if 0: print 'rcn', rows, cols, num print 'lbrt', left, bottom, right, top print 'self.figBottom', self.figBottom print 'self.figLeft', self.figLeft print 'self.figW', self.figW print 'self.figH', self.figH print 'self.rowNum', self.rowNum print 'self.colNum', self.colNum print 'self.numRows', self.numRows print 'self.numCols', self.numCols def is_first_col(self): return self.colNum==0 def is_first_row(self): return self.rowNum==0 def is_last_row(self): return self.rowNum==self.numRows-1 def is_last_col(self): return self.colNum==self.numCols-1 # COVERAGE NOTE: Never used internally or from examples def label_outer(self): """ set the visible property on ticklabels so xticklabels are visible only if the subplot is in the last row and yticklabels are visible only if the subplot is in the first column """ lastrow = self.is_last_row() firstcol = self.is_first_col() for label in self.get_xticklabels(): label.set_visible(lastrow) for label in self.get_yticklabels(): label.set_visible(firstcol) _subplot_classes = {} def subplot_class_factory(axes_class=None): # This makes a new class that inherits from SubclassBase and the # given axes_class (which is assumed to be a subclass of Axes). # This is perhaps a little bit roundabout to make a new class on # the fly like this, but it means that a new Subplot class does # not have to be created for every type of Axes. if axes_class is None: axes_class = Axes new_class = _subplot_classes.get(axes_class) if new_class is None: new_class = new.classobj("%sSubplot" % (axes_class.__name__), (SubplotBase, axes_class), {'_axes_class': axes_class}) _subplot_classes[axes_class] = new_class return new_class # This is provided for backward compatibility Subplot = subplot_class_factory() martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes) """ # this is some discarded code I was using to find the minimum positive # data point for some log scaling fixes. I realized there was a # cleaner way to do it, but am keeping this around as an example for # how to get the data out of the axes. Might want to make something # like this a method one day, or better yet make get_verts an Artist # method minx, maxx = self.get_xlim() if minx<=0 or maxx<=0: # find the min pos value in the data xs = [] for line in self.lines: xs.extend(line.get_xdata(orig=False)) for patch in self.patches: xs.extend([x for x,y in patch.get_verts()]) for collection in self.collections: xs.extend([x for x,y in collection.get_verts()]) posx = [x for x in xs if x>0] if len(posx): minx = min(posx) maxx = max(posx) # warning, probably breaks inverted axis self.set_xlim((0.1*minx, maxx)) """
agpl-3.0
nvoron23/scikit-learn
doc/sphinxext/numpy_ext/docscrape_sphinx.py
408
8061
import re import inspect import textwrap import pydoc from .docscrape import NumpyDocString from .docscrape import FunctionDoc from .docscrape import ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config=None): config = {} if config is None else config self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' ' * indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it import sphinx # local import to avoid test dependency if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: out += ['.. latexonly::', ''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Methods',): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config=None): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
MJuddBooth/pandas
pandas/tests/frame/test_rank.py
1
11364
# -*- coding: utf-8 -*- from datetime import datetime, timedelta from distutils.version import LooseVersion import numpy as np import pytest from pandas import DataFrame, Series from pandas.tests.frame.common import TestData import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal class TestRank(TestData): s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]) df = DataFrame({'A': s, 'B': s}) results = { 'average': np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]), 'min': np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]), 'max': np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]), 'first': np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]), 'dense': np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]), } @pytest.fixture(params=['average', 'min', 'max', 'first', 'dense']) def method(self, request): """ Fixture for trying all rank methods """ return request.param def test_rank(self): rankdata = pytest.importorskip('scipy.stats.rankdata') self.frame['A'][::2] = np.nan self.frame['B'][::3] = np.nan self.frame['C'][::4] = np.nan self.frame['D'][::5] = np.nan ranks0 = self.frame.rank() ranks1 = self.frame.rank(1) mask = np.isnan(self.frame.values) fvals = self.frame.fillna(np.inf).values exp0 = np.apply_along_axis(rankdata, 0, fvals) exp0[mask] = np.nan exp1 = np.apply_along_axis(rankdata, 1, fvals) exp1[mask] = np.nan tm.assert_almost_equal(ranks0.values, exp0) tm.assert_almost_equal(ranks1.values, exp1) # integers df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4))) result = df.rank() exp = df.astype(float).rank() tm.assert_frame_equal(result, exp) result = df.rank(1) exp = df.astype(float).rank(1) tm.assert_frame_equal(result, exp) def test_rank2(self): df = DataFrame([[1, 3, 2], [1, 2, 3]]) expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0 result = df.rank(1, pct=True) tm.assert_frame_equal(result, expected) df = DataFrame([[1, 3, 2], [1, 2, 3]]) expected = df.rank(0) / 2.0 result = df.rank(0, pct=True) tm.assert_frame_equal(result, expected) df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']]) expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]]) result = df.rank(1, numeric_only=False) tm.assert_frame_equal(result, expected) expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]]) result = df.rank(0, numeric_only=False) tm.assert_frame_equal(result, expected) df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']]) expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]]) result = df.rank(1, numeric_only=False) tm.assert_frame_equal(result, expected) expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]]) result = df.rank(0, numeric_only=False) tm.assert_frame_equal(result, expected) # f7u12, this does not work without extensive workaround data = [[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], [datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)]] df = DataFrame(data) # check the rank expected = DataFrame([[2., np.nan, 1.], [2., 3., 1.]]) result = df.rank(1, numeric_only=False, ascending=True) tm.assert_frame_equal(result, expected) expected = DataFrame([[1., np.nan, 2.], [2., 1., 3.]]) result = df.rank(1, numeric_only=False, ascending=False) tm.assert_frame_equal(result, expected) # mixed-type frames self.mixed_frame['datetime'] = datetime.now() self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1) result = self.mixed_frame.rank(1) expected = self.mixed_frame.rank(1, numeric_only=True) tm.assert_frame_equal(result, expected) df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]}) exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]}) tm.assert_frame_equal(df.rank(), exp) def test_rank_na_option(self): rankdata = pytest.importorskip('scipy.stats.rankdata') self.frame['A'][::2] = np.nan self.frame['B'][::3] = np.nan self.frame['C'][::4] = np.nan self.frame['D'][::5] = np.nan # bottom ranks0 = self.frame.rank(na_option='bottom') ranks1 = self.frame.rank(1, na_option='bottom') fvals = self.frame.fillna(np.inf).values exp0 = np.apply_along_axis(rankdata, 0, fvals) exp1 = np.apply_along_axis(rankdata, 1, fvals) tm.assert_almost_equal(ranks0.values, exp0) tm.assert_almost_equal(ranks1.values, exp1) # top ranks0 = self.frame.rank(na_option='top') ranks1 = self.frame.rank(1, na_option='top') fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values fval1 = self.frame.T fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T fval1 = fval1.fillna(np.inf).values exp0 = np.apply_along_axis(rankdata, 0, fval0) exp1 = np.apply_along_axis(rankdata, 1, fval1) tm.assert_almost_equal(ranks0.values, exp0) tm.assert_almost_equal(ranks1.values, exp1) # descending # bottom ranks0 = self.frame.rank(na_option='top', ascending=False) ranks1 = self.frame.rank(1, na_option='top', ascending=False) fvals = self.frame.fillna(np.inf).values exp0 = np.apply_along_axis(rankdata, 0, -fvals) exp1 = np.apply_along_axis(rankdata, 1, -fvals) tm.assert_almost_equal(ranks0.values, exp0) tm.assert_almost_equal(ranks1.values, exp1) # descending # top ranks0 = self.frame.rank(na_option='bottom', ascending=False) ranks1 = self.frame.rank(1, na_option='bottom', ascending=False) fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values fval1 = self.frame.T fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T fval1 = fval1.fillna(np.inf).values exp0 = np.apply_along_axis(rankdata, 0, -fval0) exp1 = np.apply_along_axis(rankdata, 1, -fval1) tm.assert_numpy_array_equal(ranks0.values, exp0) tm.assert_numpy_array_equal(ranks1.values, exp1) # bad values throw error msg = "na_option must be one of 'keep', 'top', or 'bottom'" with pytest.raises(ValueError, match=msg): self.frame.rank(na_option='bad', ascending=False) # invalid type with pytest.raises(ValueError, match=msg): self.frame.rank(na_option=True, ascending=False) def test_rank_axis(self): # check if using axes' names gives the same result df = DataFrame([[2, 1], [4, 3]]) tm.assert_frame_equal(df.rank(axis=0), df.rank(axis='index')) tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns')) def test_rank_methods_frame(self): pytest.importorskip('scipy.stats.special') rankdata = pytest.importorskip('scipy.stats.rankdata') import scipy xs = np.random.randint(0, 21, (100, 26)) xs = (xs - 10.0) / 10.0 cols = [chr(ord('z') - i) for i in range(xs.shape[1])] for vals in [xs, xs + 1e6, xs * 1e-6]: df = DataFrame(vals, columns=cols) for ax in [0, 1]: for m in ['average', 'min', 'max', 'first', 'dense']: result = df.rank(axis=ax, method=m) sprank = np.apply_along_axis( rankdata, ax, vals, m if m != 'first' else 'ordinal') sprank = sprank.astype(np.float64) expected = DataFrame(sprank, columns=cols) if (LooseVersion(scipy.__version__) >= LooseVersion('0.17.0')): expected = expected.astype('float64') tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('dtype', ['O', 'f8', 'i8']) def test_rank_descending(self, method, dtype): if 'i' in dtype: df = self.df.dropna() else: df = self.df.astype(dtype) res = df.rank(ascending=False) expected = (df.max() - df).rank() assert_frame_equal(res, expected) if method == 'first' and dtype == 'O': return expected = (df.max() - df).rank(method=method) if dtype != 'O': res2 = df.rank(method=method, ascending=False, numeric_only=True) assert_frame_equal(res2, expected) res3 = df.rank(method=method, ascending=False, numeric_only=False) assert_frame_equal(res3, expected) @pytest.mark.parametrize('axis', [0, 1]) @pytest.mark.parametrize('dtype', [None, object]) def test_rank_2d_tie_methods(self, method, axis, dtype): df = self.df def _check2d(df, expected, method='average', axis=0): exp_df = DataFrame({'A': expected, 'B': expected}) if axis == 1: df = df.T exp_df = exp_df.T result = df.rank(method=method, axis=axis) assert_frame_equal(result, exp_df) disabled = {(object, 'first')} if (dtype, method) in disabled: return frame = df if dtype is None else df.astype(dtype) _check2d(frame, self.results[method], method=method, axis=axis) @pytest.mark.parametrize( "method,exp", [("dense", [[1., 1., 1.], [1., 0.5, 2. / 3], [1., 0.5, 1. / 3]]), ("min", [[1. / 3, 1., 1.], [1. / 3, 1. / 3, 2. / 3], [1. / 3, 1. / 3, 1. / 3]]), ("max", [[1., 1., 1.], [1., 2. / 3, 2. / 3], [1., 2. / 3, 1. / 3]]), ("average", [[2. / 3, 1., 1.], [2. / 3, 0.5, 2. / 3], [2. / 3, 0.5, 1. / 3]]), ("first", [[1. / 3, 1., 1.], [2. / 3, 1. / 3, 2. / 3], [3. / 3, 2. / 3, 1. / 3]])]) def test_rank_pct_true(self, method, exp): # see gh-15630. df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]]) result = df.rank(method=method, pct=True) expected = DataFrame(exp) tm.assert_frame_equal(result, expected) @pytest.mark.single @pytest.mark.high_memory def test_pct_max_many_rows(self): # GH 18271 df = DataFrame({'A': np.arange(2**24 + 1), 'B': np.arange(2**24 + 1, 0, -1)}) result = df.rank(pct=True).max() assert (result == 1).all()
bsd-3-clause
frank-tancf/scikit-learn
sklearn/linear_model/tests/test_logistic.py
24
39507
import numpy as np import scipy.sparse as sp from scipy import linalg, optimize, sparse from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_raise_message from sklearn.exceptions import ConvergenceWarning from sklearn.utils import compute_class_weight from sklearn.utils.fixes import sp_version from sklearn.linear_model.logistic import ( LogisticRegression, logistic_regression_path, LogisticRegressionCV, _logistic_loss_and_grad, _logistic_grad_hess, _multinomial_grad_hess, _logistic_loss, ) from sklearn.model_selection import StratifiedKFold from sklearn.datasets import load_iris, make_classification from sklearn.metrics import log_loss X = [[-1, 0], [0, 1], [1, 1]] X_sp = sp.csr_matrix(X) Y1 = [0, 1, 1] Y2 = [2, 1, 0] iris = load_iris() def check_predictions(clf, X, y): """Check that the model is able to fit the classification data""" n_samples = len(y) classes = np.unique(y) n_classes = classes.shape[0] predicted = clf.fit(X, y).predict(X) assert_array_equal(clf.classes_, classes) assert_equal(predicted.shape, (n_samples,)) assert_array_equal(predicted, y) probabilities = clf.predict_proba(X) assert_equal(probabilities.shape, (n_samples, n_classes)) assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) assert_array_equal(probabilities.argmax(axis=1), y) def test_predict_2_classes(): # Simple sanity check on a 2 classes dataset # Make sure it predicts the correct result on simple datasets. check_predictions(LogisticRegression(random_state=0), X, Y1) check_predictions(LogisticRegression(random_state=0), X_sp, Y1) check_predictions(LogisticRegression(C=100, random_state=0), X, Y1) check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1) check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1) check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X_sp, Y1) def test_error(): # Test for appropriate exception on errors msg = "Penalty term must be positive" assert_raise_message(ValueError, msg, LogisticRegression(C=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LogisticRegression(C="test").fit, X, Y1) for LR in [LogisticRegression, LogisticRegressionCV]: msg = "Tolerance for stopping criteria must be positive" assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1) msg = "Maximum number of iteration must be positive" assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1) def test_predict_3_classes(): check_predictions(LogisticRegression(C=10), X, Y2) check_predictions(LogisticRegression(C=10), X_sp, Y2) def test_predict_iris(): # Test logistic regression with the iris dataset n_samples, n_features = iris.data.shape target = iris.target_names[iris.target] # Test that both multinomial and OvR solvers handle # multiclass data correctly and give good accuracy # score (>0.95) for the training data. for clf in [LogisticRegression(C=len(iris.data)), LogisticRegression(C=len(iris.data), solver='lbfgs', multi_class='multinomial'), LogisticRegression(C=len(iris.data), solver='newton-cg', multi_class='multinomial'), LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2, multi_class='ovr', random_state=42)]: clf.fit(iris.data, target) assert_array_equal(np.unique(target), clf.classes_) pred = clf.predict(iris.data) assert_greater(np.mean(pred == target), .95) probabilities = clf.predict_proba(iris.data) assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) pred = iris.target_names[probabilities.argmax(axis=1)] assert_greater(np.mean(pred == target), .95) def test_multinomial_validation(): for solver in ['lbfgs', 'newton-cg', 'sag']: lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial') assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1]) def test_check_solver_option(): X, y = iris.data, iris.target for LR in [LogisticRegression, LogisticRegressionCV]: msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs" " and sag solvers, got wrong_name") lr = LR(solver="wrong_name") assert_raise_message(ValueError, msg, lr.fit, X, y) msg = "multi_class should be either multinomial or ovr, got wrong_name" lr = LR(solver='newton-cg', multi_class="wrong_name") assert_raise_message(ValueError, msg, lr.fit, X, y) # only 'liblinear' solver msg = "Solver liblinear does not support a multinomial backend." lr = LR(solver='liblinear', multi_class='multinomial') assert_raise_message(ValueError, msg, lr.fit, X, y) # all solvers except 'liblinear' for solver in ['newton-cg', 'lbfgs', 'sag']: msg = ("Solver %s supports only l2 penalties, got l1 penalty." % solver) lr = LR(solver=solver, penalty='l1') assert_raise_message(ValueError, msg, lr.fit, X, y) msg = ("Solver %s supports only dual=False, got dual=True" % solver) lr = LR(solver=solver, dual=True) assert_raise_message(ValueError, msg, lr.fit, X, y) def test_multinomial_binary(): # Test multinomial LR on a binary problem. target = (iris.target > 0).astype(np.intp) target = np.array(["setosa", "not-setosa"])[target] for solver in ['lbfgs', 'newton-cg', 'sag']: clf = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, max_iter=2000) clf.fit(iris.data, target) assert_equal(clf.coef_.shape, (1, iris.data.shape[1])) assert_equal(clf.intercept_.shape, (1,)) assert_array_equal(clf.predict(iris.data), target) mlr = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, fit_intercept=False) mlr.fit(iris.data, target) pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] assert_greater(np.mean(pred == target), .9) def test_sparsify(): # Test sparsify and densify members. n_samples, n_features = iris.data.shape target = iris.target_names[iris.target] clf = LogisticRegression(random_state=0).fit(iris.data, target) pred_d_d = clf.decision_function(iris.data) clf.sparsify() assert_true(sp.issparse(clf.coef_)) pred_s_d = clf.decision_function(iris.data) sp_data = sp.coo_matrix(iris.data) pred_s_s = clf.decision_function(sp_data) clf.densify() pred_d_s = clf.decision_function(sp_data) assert_array_almost_equal(pred_d_d, pred_s_d) assert_array_almost_equal(pred_d_d, pred_s_s) assert_array_almost_equal(pred_d_d, pred_d_s) def test_inconsistent_input(): # Test that an exception is raised on inconsistent input rng = np.random.RandomState(0) X_ = rng.random_sample((5, 10)) y_ = np.ones(X_.shape[0]) y_[0] = 0 clf = LogisticRegression(random_state=0) # Wrong dimensions for training data y_wrong = y_[:-1] assert_raises(ValueError, clf.fit, X, y_wrong) # Wrong dimensions for test data assert_raises(ValueError, clf.fit(X_, y_).predict, rng.random_sample((3, 12))) def test_write_parameters(): # Test that we can write to coef_ and intercept_ clf = LogisticRegression(random_state=0) clf.fit(X, Y1) clf.coef_[:] = 0 clf.intercept_[:] = 0 assert_array_almost_equal(clf.decision_function(X), 0) @raises(ValueError) def test_nan(): # Test proper NaN handling. # Regression test for Issue #252: fit used to go into an infinite loop. Xnan = np.array(X, dtype=np.float64) Xnan[0, 1] = np.nan LogisticRegression(random_state=0).fit(Xnan, Y1) def test_consistency_path(): # Test that the path algorithm is consistent rng = np.random.RandomState(0) X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) y = [1] * 100 + [-1] * 100 Cs = np.logspace(0, 4, 10) f = ignore_warnings # can't test with fit_intercept=True since LIBLINEAR # penalizes the intercept for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'): coefs, Cs, _ = f(logistic_regression_path)( X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver, random_state=0) for i, C in enumerate(Cs): lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5, random_state=0) lr.fit(X, y) lr_coef = lr.coef_.ravel() assert_array_almost_equal(lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver) # test for fit_intercept=True for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'): Cs = [1e3] coefs, Cs, _ = f(logistic_regression_path)( X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver, intercept_scaling=10000., random_state=0) lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4, intercept_scaling=10000., random_state=0) lr.fit(X, y) lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_]) assert_array_almost_equal(lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver) def test_liblinear_dual_random_state(): # random_state is relevant for liblinear solver only if dual=True X, y = make_classification(n_samples=20) lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15) lr1.fit(X, y) lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15) lr2.fit(X, y) lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15) lr3.fit(X, y) # same result for same random state assert_array_almost_equal(lr1.coef_, lr2.coef_) # different results for different random states msg = "Arrays are not almost equal to 6 decimals" assert_raise_message(AssertionError, msg, assert_array_almost_equal, lr1.coef_, lr3.coef_) def test_logistic_loss_and_grad(): X_ref, y = make_classification(n_samples=20) n_features = X_ref.shape[1] X_sp = X_ref.copy() X_sp[X_sp < .1] = 0 X_sp = sp.csr_matrix(X_sp) for X in (X_ref, X_sp): w = np.zeros(n_features) # First check that our derivation of the grad is correct loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.) approx_grad = optimize.approx_fprime( w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3 ) assert_array_almost_equal(grad, approx_grad, decimal=2) # Second check that our intercept implementation is good w = np.zeros(n_features + 1) loss_interp, grad_interp = _logistic_loss_and_grad( w, X, y, alpha=1. ) assert_array_almost_equal(loss, loss_interp) approx_grad = optimize.approx_fprime( w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3 ) assert_array_almost_equal(grad_interp, approx_grad, decimal=2) def test_logistic_grad_hess(): rng = np.random.RandomState(0) n_samples, n_features = 50, 5 X_ref = rng.randn(n_samples, n_features) y = np.sign(X_ref.dot(5 * rng.randn(n_features))) X_ref -= X_ref.mean() X_ref /= X_ref.std() X_sp = X_ref.copy() X_sp[X_sp < .1] = 0 X_sp = sp.csr_matrix(X_sp) for X in (X_ref, X_sp): w = .1 * np.ones(n_features) # First check that _logistic_grad_hess is consistent # with _logistic_loss_and_grad loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.) grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.) assert_array_almost_equal(grad, grad_2) # Now check our hessian along the second direction of the grad vector = np.zeros_like(grad) vector[1] = 1 hess_col = hess(vector) # Computation of the Hessian is particularly fragile to numerical # errors when doing simple finite differences. Here we compute the # grad along a path in the direction of the vector and then use a # least-square regression to estimate the slope e = 1e-3 d_x = np.linspace(-e, e, 30) d_grad = np.array([ _logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1] for t in d_x ]) d_grad -= d_grad.mean(axis=0) approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel() assert_array_almost_equal(approx_hess_col, hess_col, decimal=3) # Second check that our intercept implementation is good w = np.zeros(n_features + 1) loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.) loss_interp_2 = _logistic_loss(w, X, y, alpha=1.) grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.) assert_array_almost_equal(loss_interp, loss_interp_2) assert_array_almost_equal(grad_interp, grad_interp_2) def test_logistic_cv(): # test for LogisticRegressionCV object n_samples, n_features = 50, 5 rng = np.random.RandomState(0) X_ref = rng.randn(n_samples, n_features) y = np.sign(X_ref.dot(5 * rng.randn(n_features))) X_ref -= X_ref.mean() X_ref /= X_ref.std() lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False, solver='liblinear') lr_cv.fit(X_ref, y) lr = LogisticRegression(C=1., fit_intercept=False) lr.fit(X_ref, y) assert_array_almost_equal(lr.coef_, lr_cv.coef_) assert_array_equal(lr_cv.coef_.shape, (1, n_features)) assert_array_equal(lr_cv.classes_, [-1, 1]) assert_equal(len(lr_cv.classes_), 2) coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values())) assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features)) assert_array_equal(lr_cv.Cs_.shape, (1, )) scores = np.asarray(list(lr_cv.scores_.values())) assert_array_equal(scores.shape, (1, 3, 1)) def test_logistic_cv_sparse(): X, y = make_classification(n_samples=50, n_features=5, random_state=0) X[X < 1.0] = 0.0 csr = sp.csr_matrix(X) clf = LogisticRegressionCV(fit_intercept=True) clf.fit(X, y) clfs = LogisticRegressionCV(fit_intercept=True) clfs.fit(csr, y) assert_array_almost_equal(clfs.coef_, clf.coef_) assert_array_almost_equal(clfs.intercept_, clf.intercept_) assert_equal(clfs.C_, clf.C_) def test_intercept_logistic_helper(): n_samples, n_features = 10, 5 X, y = make_classification(n_samples=n_samples, n_features=n_features, random_state=0) # Fit intercept case. alpha = 1. w = np.ones(n_features + 1) grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha) loss_interp = _logistic_loss(w, X, y, alpha) # Do not fit intercept. This can be considered equivalent to adding # a feature vector of ones, i.e column of one vectors. X_ = np.hstack((X, np.ones(10)[:, np.newaxis])) grad, hess = _logistic_grad_hess(w, X_, y, alpha) loss = _logistic_loss(w, X_, y, alpha) # In the fit_intercept=False case, the feature vector of ones is # penalized. This should be taken care of. assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss) # Check gradient. assert_array_almost_equal(grad_interp[:n_features], grad[:n_features]) assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1]) rng = np.random.RandomState(0) grad = rng.rand(n_features + 1) hess_interp = hess_interp(grad) hess = hess(grad) assert_array_almost_equal(hess_interp[:n_features], hess[:n_features]) assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1]) def test_ovr_multinomial_iris(): # Test that OvR and multinomial are correct using the iris dataset. train, target = iris.data, iris.target n_samples, n_features = train.shape # The cv indices from stratified kfold (where stratification is done based # on the fine-grained iris classes, i.e, before the classes 0 and 1 are # conflated) is used for both clf and clf1 n_cv = 2 cv = StratifiedKFold(n_cv) precomputed_folds = list(cv.split(train, target)) # Train clf on the original dataset where classes 0 and 1 are separated clf = LogisticRegressionCV(cv=precomputed_folds) clf.fit(train, target) # Conflate classes 0 and 1 and train clf1 on this modified dataset clf1 = LogisticRegressionCV(cv=precomputed_folds) target_copy = target.copy() target_copy[target_copy == 0] = 1 clf1.fit(train, target_copy) # Ensure that what OvR learns for class2 is same regardless of whether # classes 0 and 1 are separated or not assert_array_almost_equal(clf.scores_[2], clf1.scores_[2]) assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_) assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_) # Test the shape of various attributes. assert_equal(clf.coef_.shape, (3, n_features)) assert_array_equal(clf.classes_, [0, 1, 2]) coefs_paths = np.asarray(list(clf.coefs_paths_.values())) assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1)) assert_equal(clf.Cs_.shape, (10, )) scores = np.asarray(list(clf.scores_.values())) assert_equal(scores.shape, (3, n_cv, 10)) # Test that for the iris data multinomial gives a better accuracy than OvR for solver in ['lbfgs', 'newton-cg', 'sag']: max_iter = 100 if solver == 'sag' else 15 clf_multi = LogisticRegressionCV( solver=solver, multi_class='multinomial', max_iter=max_iter, random_state=42, tol=1e-2, cv=2) clf_multi.fit(train, target) multi_score = clf_multi.score(train, target) ovr_score = clf.score(train, target) assert_greater(multi_score, ovr_score) # Test attributes of LogisticRegressionCV assert_equal(clf.coef_.shape, clf_multi.coef_.shape) assert_array_equal(clf_multi.classes_, [0, 1, 2]) coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values())) assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1)) assert_equal(clf_multi.Cs_.shape, (10, )) scores = np.asarray(list(clf_multi.scores_.values())) assert_equal(scores.shape, (3, n_cv, 10)) def test_logistic_regression_solvers(): X, y = make_classification(n_features=10, n_informative=5, random_state=0) ncg = LogisticRegression(solver='newton-cg', fit_intercept=False) lbf = LogisticRegression(solver='lbfgs', fit_intercept=False) lib = LogisticRegression(fit_intercept=False) sag = LogisticRegression(solver='sag', fit_intercept=False, random_state=42) ncg.fit(X, y) lbf.fit(X, y) sag.fit(X, y) lib.fit(X, y) assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3) assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3) assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3) assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3) assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3) assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3) def test_logistic_regression_solvers_multiclass(): X, y = make_classification(n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0) tol = 1e-6 ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol) lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol) lib = LogisticRegression(fit_intercept=False, tol=tol) sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol, max_iter=1000, random_state=42) ncg.fit(X, y) lbf.fit(X, y) sag.fit(X, y) lib.fit(X, y) assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4) assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4) assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4) assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4) assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4) assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4) def test_logistic_regressioncv_class_weights(): X, y = make_classification(n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0) msg = ("In LogisticRegressionCV the liblinear solver cannot handle " "multiclass with class_weight of type dict. Use the lbfgs, " "newton-cg or sag solvers or set class_weight='balanced'") clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2}, solver='liblinear') assert_raise_message(ValueError, msg, clf_lib.fit, X, y) y_ = y.copy() y_[y == 2] = 1 clf_lib.fit(X, y_) assert_array_equal(clf_lib.classes_, [0, 1]) # Test for class_weight=balanced X, y = make_classification(n_samples=20, n_features=20, n_informative=10, random_state=0) clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False, class_weight='balanced') clf_lbf.fit(X, y) clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False, class_weight='balanced') clf_lib.fit(X, y) clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False, class_weight='balanced', max_iter=2000) clf_sag.fit(X, y) assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4) assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4) assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4) def test_logistic_regression_sample_weights(): X, y = make_classification(n_samples=20, n_features=5, n_informative=3, n_classes=2, random_state=0) sample_weight = y + 1 for LR in [LogisticRegression, LogisticRegressionCV]: # Test that passing sample_weight as ones is the same as # not passing them at all (default None) for solver in ['lbfgs', 'liblinear']: clf_sw_none = LR(solver=solver, fit_intercept=False) clf_sw_none.fit(X, y) clf_sw_ones = LR(solver=solver, fit_intercept=False) clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0])) assert_array_almost_equal( clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4) # Test that sample weights work the same with the lbfgs, # newton-cg, and 'sag' solvers clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False) clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight) clf_sw_n = LR(solver='newton-cg', fit_intercept=False) clf_sw_n.fit(X, y, sample_weight=sample_weight) clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10) # ignore convergence warning due to small dataset with ignore_warnings(): clf_sw_sag.fit(X, y, sample_weight=sample_weight) clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False) clf_sw_liblinear.fit(X, y, sample_weight=sample_weight) assert_array_almost_equal( clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4) assert_array_almost_equal( clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4) assert_array_almost_equal( clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4) # Test that passing class_weight as [1,2] is the same as # passing class weight = [1,1] but adjusting sample weights # to be 2 for all instances of class 2 for solver in ['lbfgs', 'liblinear']: clf_cw_12 = LR(solver=solver, fit_intercept=False, class_weight={0: 1, 1: 2}) clf_cw_12.fit(X, y) clf_sw_12 = LR(solver=solver, fit_intercept=False) clf_sw_12.fit(X, y, sample_weight=sample_weight) assert_array_almost_equal( clf_cw_12.coef_, clf_sw_12.coef_, decimal=4) # Test the above for l1 penalty and l2 penalty with dual=True. # since the patched liblinear code is different. clf_cw = LogisticRegression( solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2}, penalty="l1") clf_cw.fit(X, y) clf_sw = LogisticRegression( solver="liblinear", fit_intercept=False, penalty="l1") clf_sw.fit(X, y, sample_weight) assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) clf_cw = LogisticRegression( solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2}, penalty="l2", dual=True) clf_cw.fit(X, y) clf_sw = LogisticRegression( solver="liblinear", fit_intercept=False, penalty="l2", dual=True) clf_sw.fit(X, y, sample_weight) assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4) def _compute_class_weight_dictionary(y): # helper for returning a dictionary instead of an array classes = np.unique(y) class_weight = compute_class_weight("balanced", classes, y) class_weight_dict = dict(zip(classes, class_weight)) return class_weight_dict def test_logistic_regression_class_weights(): # Multinomial case: remove 90% of class 0 X = iris.data[45:, :] y = iris.target[45:] solvers = ("lbfgs", "newton-cg") class_weight_dict = _compute_class_weight_dictionary(y) for solver in solvers: clf1 = LogisticRegression(solver=solver, multi_class="multinomial", class_weight="balanced") clf2 = LogisticRegression(solver=solver, multi_class="multinomial", class_weight=class_weight_dict) clf1.fit(X, y) clf2.fit(X, y) assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4) # Binary case: remove 90% of class 0 and 100% of class 2 X = iris.data[45:100, :] y = iris.target[45:100] solvers = ("lbfgs", "newton-cg", "liblinear") class_weight_dict = _compute_class_weight_dictionary(y) for solver in solvers: clf1 = LogisticRegression(solver=solver, multi_class="ovr", class_weight="balanced") clf2 = LogisticRegression(solver=solver, multi_class="ovr", class_weight=class_weight_dict) clf1.fit(X, y) clf2.fit(X, y) assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6) def test_multinomial_logistic_regression_with_classweight_auto(): X, y = iris.data, iris.target model = LogisticRegression(multi_class='multinomial', class_weight='auto', solver='lbfgs') # 'auto' is deprecated and will be removed in 0.19 assert_warns_message(DeprecationWarning, "class_weight='auto' heuristic is deprecated", model.fit, X, y) def test_logistic_regression_convergence_warnings(): # Test that warnings are raised if model does not converge X, y = make_classification(n_samples=20, n_features=20) clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1) assert_warns(ConvergenceWarning, clf_lib.fit, X, y) assert_equal(clf_lib.n_iter_, 2) def test_logistic_regression_multinomial(): # Tests for the multinomial option in logistic regression # Some basic attributes of Logistic Regression n_samples, n_features, n_classes = 50, 20, 3 X, y = make_classification(n_samples=n_samples, n_features=n_features, n_informative=10, n_classes=n_classes, random_state=0) # 'lbfgs' is used as a referenced solver = 'lbfgs' ref_i = LogisticRegression(solver=solver, multi_class='multinomial') ref_w = LogisticRegression(solver=solver, multi_class='multinomial', fit_intercept=False) ref_i.fit(X, y) ref_w.fit(X, y) assert_array_equal(ref_i.coef_.shape, (n_classes, n_features)) assert_array_equal(ref_w.coef_.shape, (n_classes, n_features)) for solver in ['sag', 'newton-cg']: clf_i = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, max_iter=1000, tol=1e-6) clf_w = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, max_iter=1000, tol=1e-6, fit_intercept=False) clf_i.fit(X, y) clf_w.fit(X, y) assert_array_equal(clf_i.coef_.shape, (n_classes, n_features)) assert_array_equal(clf_w.coef_.shape, (n_classes, n_features)) # Compare solutions between lbfgs and the other solvers assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3) assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3) assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3) # Test that the path give almost the same results. However since in this # case we take the average of the coefs after fitting across all the # folds, it need not be exactly the same. for solver in ['lbfgs', 'newton-cg', 'sag']: clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6, multi_class='multinomial', Cs=[1.]) clf_path.fit(X, y) assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3) assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3) def test_multinomial_grad_hess(): rng = np.random.RandomState(0) n_samples, n_features, n_classes = 100, 5, 3 X = rng.randn(n_samples, n_features) w = rng.rand(n_classes, n_features) Y = np.zeros((n_samples, n_classes)) ind = np.argmax(np.dot(X, w.T), axis=1) Y[range(0, n_samples), ind] = 1 w = w.ravel() sample_weights = np.ones(X.shape[0]) grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1., sample_weight=sample_weights) # extract first column of hessian matrix vec = np.zeros(n_features * n_classes) vec[0] = 1 hess_col = hessp(vec) # Estimate hessian using least squares as done in # test_logistic_grad_hess e = 1e-3 d_x = np.linspace(-e, e, 30) d_grad = np.array([ _multinomial_grad_hess(w + t * vec, X, Y, alpha=1., sample_weight=sample_weights)[0] for t in d_x ]) d_grad -= d_grad.mean(axis=0) approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel() assert_array_almost_equal(hess_col, approx_hess_col) def test_liblinear_decision_function_zero(): # Test negative prediction when decision_function values are zero. # Liblinear predicts the positive class when decision_function values # are zero. This is a test to verify that we do not do the same. # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600 # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623 X, y = make_classification(n_samples=5, n_features=5) clf = LogisticRegression(fit_intercept=False) clf.fit(X, y) # Dummy data such that the decision function becomes zero. X = np.zeros((5, 5)) assert_array_equal(clf.predict(X), np.zeros(5)) def test_liblinear_logregcv_sparse(): # Test LogRegCV with solver='liblinear' works for sparse matrices X, y = make_classification(n_samples=10, n_features=5) clf = LogisticRegressionCV(solver='liblinear') clf.fit(sparse.csr_matrix(X), y) def test_logreg_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: clf = LogisticRegression(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % clf.intercept_scaling) assert_raise_message(ValueError, msg, clf.fit, X, Y1) def test_logreg_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False clf = LogisticRegression(fit_intercept=False) clf.fit(X, Y1) assert_equal(clf.intercept_, 0.) def test_logreg_cv_penalty(): # Test that the correct penalty is passed to the final fit. X, y = make_classification(n_samples=50, n_features=20, random_state=0) lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear') lr_cv.fit(X, y) lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear') lr.fit(X, y) assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_)) def test_logreg_predict_proba_multinomial(): X, y = make_classification(n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10) # Predicted probabilites using the true-entropy loss should give a # smaller loss than those using the ovr method. clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs") clf_multi.fit(X, y) clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs") clf_ovr.fit(X, y) clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X)) assert_greater(clf_ovr_loss, clf_multi_loss) # Predicted probabilites using the soft-max function should give a # smaller loss than those using the logistic function. clf_multi_loss = log_loss(y, clf_multi.predict_proba(X)) clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X)) assert_greater(clf_wrong_loss, clf_multi_loss) @ignore_warnings def test_max_iter(): # Test that the maximum number of iteration is reached X, y_bin = iris.data, iris.target.copy() y_bin[y_bin == 2] = 0 solvers = ['newton-cg', 'liblinear', 'sag'] # old scipy doesn't have maxiter if sp_version >= (0, 12): solvers.append('lbfgs') for max_iter in range(1, 5): for solver in solvers: for multi_class in ['ovr', 'multinomial']: if solver == 'liblinear' and multi_class == 'multinomial': continue lr = LogisticRegression(max_iter=max_iter, tol=1e-15, multi_class=multi_class, random_state=0, solver=solver) lr.fit(X, y_bin) assert_equal(lr.n_iter_[0], max_iter) def test_n_iter(): # Test that self.n_iter_ has the correct format. X, y = iris.data, iris.target y_bin = y.copy() y_bin[y_bin == 2] = 0 n_Cs = 4 n_cv_fold = 2 for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']: # OvR case n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0] clf = LogisticRegression(tol=1e-2, multi_class='ovr', solver=solver, C=1., random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes,)) n_classes = np.unique(y).shape[0] clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr', solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs)) clf.fit(X, y_bin) assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs)) # multinomial case n_classes = 1 if solver in ('liblinear', 'sag'): break clf = LogisticRegression(tol=1e-2, multi_class='multinomial', solver=solver, C=1., random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes,)) clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial', solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42, max_iter=100) clf.fit(X, y) assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs)) clf.fit(X, y_bin) assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs)) @ignore_warnings def test_warm_start(): # A 1-iteration second fit on same data should give almost same result # with warm starting, and quite different result without warm starting. # Warm starting does not work with liblinear solver. X, y = iris.data, iris.target solvers = ['newton-cg', 'sag'] # old scipy doesn't have maxiter if sp_version >= (0, 12): solvers.append('lbfgs') for warm_start in [True, False]: for fit_intercept in [True, False]: for solver in solvers: for multi_class in ['ovr', 'multinomial']: clf = LogisticRegression(tol=1e-4, multi_class=multi_class, warm_start=warm_start, solver=solver, random_state=42, max_iter=100, fit_intercept=fit_intercept) clf.fit(X, y) coef_1 = clf.coef_ clf.max_iter = 1 with ignore_warnings(): clf.fit(X, y) cum_diff = np.sum(np.abs(coef_1 - clf.coef_)) msg = ("Warm starting issue with %s solver in %s mode " "with fit_intercept=%s and warm_start=%s" % (solver, multi_class, str(fit_intercept), str(warm_start))) if warm_start: assert_greater(2.0, cum_diff, msg) else: assert_greater(cum_diff, 2.0, msg)
bsd-3-clause
sanghack81/SDCIT
experiments/draw_figures.py
1
18832
import collections import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy import scipy.stats import seaborn as sns from os.path import exists from experiments.exp_setup import SDCIT_RESULT_DIR, SDCIT_FIGURE_DIR from sdcit.utils import AUPC names_chsic_chaotic = ['independent', 'gamma', 'noise', 'trial', 'N', 'runtime', 'statistic', 'pvalue'] names_chsic_postnonlinear = ['independent', 'noise', 'trial', 'N', 'runtime', 'statistic', 'pvalue'] names_kcit_chaotic = ['independent', 'gamma', 'noise', 'trial', 'N', 'runtime', 'statistic', 'boot_p_value', 'appr_p_value'] names_kcit_postnonlinear = ['independent', 'noise', 'trial', 'N', 'runtime', 'statistic', 'boot_p_value', 'appr_p_value'] names_sdcit_chaotic = ['independent', 'gamma', 'trial', 'N', 'statistic', 'pvalue'] names_sdcit_postnonlinear = ['independent', 'noise', 'trial', 'N', 'statistic', 'pvalue'] names_kcipt_chaotic = ['independent', 'gamma', 'trial', 'N', 'statistic', 'pvalue', 'B'] names_kcipt_postnonlinear = ['independent', 'noise', 'trial', 'N', 'statistic', 'pvalue', 'B'] names = {('CHSIC', 'chaotic'): names_chsic_chaotic, ('CHSIC', 'postnonlinear'): names_chsic_postnonlinear, ('KCIT', 'chaotic'): names_kcit_chaotic, ('KCIT', 'postnonlinear'): names_kcit_postnonlinear, ('KCIT2', 'chaotic'): names_kcit_chaotic, ('KCIT2', 'postnonlinear'): names_kcit_postnonlinear, ('SDCIT', 'chaotic'): names_sdcit_chaotic, ('SDCIT', 'postnonlinear'): names_sdcit_postnonlinear, ('KCIPT', 'chaotic'): names_kcipt_chaotic, ('KCIPT', 'postnonlinear'): names_kcipt_postnonlinear, } pvalue_column = collections.defaultdict(lambda: 'pvalue') pvalue_column['KCIT'] = 'boot_p_value' pvalue_column['KCIT2'] = 'boot_p_value' color_palettes = sns.color_palette('Paired', 10) method_color_codes = {'KCIT': 3, 'SDCIT': 5, 'KCIPT': 1, 'CHSIC': 9, 'KCIT2': 2} markers = collections.defaultdict(lambda: 'o') markers.update({'KCIT': 'o', 'SDCIT': 's', 'KCIPT': '*', 'CHSIC': '^', 'KCIT2': 'o'}) all_algos = ['KCIT', 'SDCIT', 'KCIPT', 'CHSIC', 'KCIT2'] def algo_name(org_name): map = {'KCIT2': 'KCIT', 'KCIT': 'KCIT (org.)'} if org_name in map: return map[org_name] else: return org_name def draw_aupc_chaotic(): data = 'chaotic' aupc_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for group_key, group_df in df.groupby(by=['gamma', 'independent', 'N']): group_key = (int(group_key[0] * 10) / 10, *group_key[1:]) if group_key[1] == 0: aupc_data.append([algo, *group_key, AUPC(group_df[pvalue_column[algo]])]) print(draw_aupc_chaotic.__name__) [print(xx) for xx in aupc_data] aupc_data = np.array(aupc_data) aupc_df = pd.DataFrame({'algorithm': aupc_data[:, 0], 'gamma': aupc_data[:, 1], 'independent': aupc_data[:, 2], 'N': aupc_data[:, 3], 'AUPC': aupc_data[:, 4]}) aupc_df['gamma'] = aupc_df['gamma'].astype(float) aupc_df['independent'] = aupc_df['independent'].astype(int) aupc_df['N'] = aupc_df['N'].map(int) aupc_df['AUPC'] = aupc_df['AUPC'].astype(float) aupc_df = aupc_df[aupc_df['independent'] == 0] aupc_df["algo-N"] = aupc_df["algorithm"].map(str) + aupc_df["N"].map(lambda xxx: ' (' + str(xxx) + ')') sns_setting() for k, gdf in aupc_df.groupby(['algorithm', 'N']): print('chaotic', k, gdf['AUPC']) if k[1] == 400: plt.plot(gdf['gamma'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) else: plt.plot(gdf['gamma'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label='_nolegend_') plt.axes().set_xlabel(r'$\gamma$') plt.axes().set_ylabel('Area Under Power Curve') plt.axes().set_ylim([0.45, 1.05]) handles, labels = plt.axes().get_legend_handles_labels() # plt.axes().legend(handles[::-1], labels[::-1]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/{}_aupc.pdf'.format(data), transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() def draw_calib_chaotic(): data = 'chaotic' calib_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for k, gdf in df.groupby(by=['independent', 'gamma', 'N']): if float(k[0]) == 1: D, _ = scipy.stats.kstest(gdf[pvalue_column[algo]], 'uniform') calib_data.append([algo, float(k[1]), int(k[2]), D]) print(draw_calib_chaotic.__name__) [print(xx) for xx in calib_data] df = pd.DataFrame(calib_data, columns=['algo', 'gamma', 'N', 'D']) df['gamma'] = df['gamma'].astype(float) df['N'] = df['N'].map(int) df['D'] = df['D'].astype(float) sns_setting() for k, gdf in df.groupby(['algo', 'N']): if k[1] == 400: plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) else: plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label='_nolegend_') handles, labels = plt.axes().get_legend_handles_labels() plt.axes().legend(handles[::-1], labels[::-1], ncol=2) plt.axes().set_xlabel(r'$\gamma$') plt.axes().set_ylabel('KS test statistic') plt.axes().set_ylim([0.0, 0.5]) plt.axes().invert_yaxis() plt.axes().set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) handles, labels = plt.axes().get_legend_handles_labels() # plt.axes().legend(handles[::-1], labels[::-1]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/chaotic_calib.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() def draw_type_I_error_chaotic(): data = 'chaotic' calib_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for k, gdf in df.groupby(by=['independent', 'gamma', 'N']): if float(k[0]) == 1: calib_data.append([algo, float(k[1]), int(k[2]), np.mean(gdf[pvalue_column[algo]] <= 0.05)]) print(draw_type_I_error_chaotic.__name__) [print(xx) for xx in calib_data] df = pd.DataFrame(calib_data, columns=['algo', 'gamma', 'N', 'D']) df['gamma'] = df['gamma'].astype(float) df['N'] = df['N'].map(int) df['D'] = df['D'].astype(float) sns_setting() for k, gdf in df.groupby(['algo', 'N']): if k[1] == 400: plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) else: plt.plot(gdf['gamma'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label='_nolegend_') plt.axes().set_xlabel(r'$\gamma$') plt.axes().set_xticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) plt.axes().set_ylabel('Type I error') plt.axes().set_ylim([0.0, 0.2]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/chaotic_type_I.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() def draw_aupc_postnonlinear(): data = 'postnonlinear' aupc_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for group_key, group_df in df.groupby(by=['noise', 'independent', 'N']): group_key = (int(group_key[0] * 10) / 10, int(group_key[1]), int(group_key[2])) aupc_data.append([algo, *group_key, AUPC(group_df[pvalue_column[algo]])]) print(draw_aupc_postnonlinear.__name__) [print(xx) for xx in aupc_data] aupc_data = np.array(aupc_data) aupc_df = pd.DataFrame({'algorithm': [str(v) for v in aupc_data[:, 0]], 'noise': [int(float(v)) for v in aupc_data[:, 1]], 'independent': [int(v) for v in aupc_data[:, 2]], 'N': [int(v) for v in aupc_data[:, 3]], 'AUPC': [float(v) for v in aupc_data[:, 4]]}) aupc_df['dimension'] = (aupc_df['noise'] + 1).astype(int) aupc_df = aupc_df[aupc_df['independent'] == 0] aupc_df["algo-N"] = aupc_df["algorithm"].map(str) + aupc_df["N"].map(lambda xxx: ' (' + str(xxx) + ')') sns_setting() for k, gdf in aupc_df.groupby(['algorithm', 'N']): gdf = gdf[gdf['dimension'] <= 5] if k[1] == 400: plt.plot(gdf['dimension'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) else: plt.plot(gdf['dimension'], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label='_nolegend_') plt.axes().set_xlabel('dimension') plt.axes().set_ylabel('Area Under Power Curve') plt.axes().set_ylim([0.45, 1.05]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_aupc.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() def draw_aupc_postnonlinear_highdim(): data = 'postnonlinear' aupc_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for group_key, group_df in df.groupby(by=['noise', 'independent', 'N']): group_key = (int(group_key[0] * 10) / 10, int(group_key[1]), int(group_key[2])) aupc_data.append([algo, *group_key, AUPC(group_df[pvalue_column[algo]])]) print(draw_aupc_postnonlinear_highdim.__name__) [print(xx) for xx in aupc_data] aupc_data = np.array(aupc_data) aupc_df = pd.DataFrame({'algorithm': [str(v) for v in aupc_data[:, 0]], 'noise': [int(float(v)) for v in aupc_data[:, 1]], 'independent': [int(v) for v in aupc_data[:, 2]], 'N': [int(v) for v in aupc_data[:, 3]], 'AUPC': [float(v) for v in aupc_data[:, 4]]}) aupc_df['dimension'] = (aupc_df['noise'] + 1).astype(int) aupc_df = aupc_df[aupc_df['independent'] == 0] aupc_df["algo-N"] = aupc_df["algorithm"].map(str) + aupc_df["N"].map(lambda xxx: ' (' + str(xxx) + ')') sns_setting() for k, gdf in aupc_df.groupby(['algorithm', 'N']): if k[1] == 400: plt.plot([int(v) for v in gdf['dimension']], gdf['AUPC'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) plt.axes().set_xlabel('dimension') plt.axes().set_ylabel('Area Under Power Curve') plt.axes().set_ylim([0.95, 1.01]) plt.axes().set_xscale('log') plt.xticks([1, 5, 10, 20, 50], [1, 5, 10, 20, 50]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_aupc_highdim.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() def draw_calib_postnonlinear(): data = 'postnonlinear' calib_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for k, gdf in df.groupby(by=['independent', 'noise', 'N']): if float(k[0]) == 1: D, _ = scipy.stats.kstest(gdf[pvalue_column[algo]], 'uniform') calib_data.append([algo, float(k[1]), int(k[2]), D]) print(draw_calib_postnonlinear.__name__) [print(xx) for xx in calib_data] df = pd.DataFrame(calib_data, columns=['algo', 'noise', 'N', 'D']) df['noise'] = df['noise'].map(int) df['dimension'] = (df['noise'] + 1).astype(int) df['N'] = df['N'].map(int) df['D'] = df['D'].astype(float) sns_setting() for k, gdf in df.groupby(['algo', 'N']): gdf = gdf[gdf['dimension'] <= 5] if k[1] == 400: plt.plot([int(v) for v in gdf['dimension']], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) else: plt.plot([int(v) for v in gdf['dimension']], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label='_nolegend_') plt.axes().set_xlabel('dimension') plt.axes().set_ylabel('KS test statistic') plt.axes().set_ylim([0.0, 0.5]) plt.axes().invert_yaxis() plt.axes().set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_calib.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() def sns_setting(): paper_rc = {'lines.linewidth': 1, 'lines.markersize': 2} sns.set_context("paper", rc=paper_rc) sns.set(style='white', font_scale=1.4) plt.figure(figsize=[4, 3]) plt.rc('text', usetex=True) plt.rc('text.latex', preamble=r'\usepackage{cmbright}') def draw_calib_postnonlinear_highdim(): data = 'postnonlinear' calib_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for k, gdf in df.groupby(by=['independent', 'noise', 'N']): if float(k[0]) == 1 and k[2] == 400: dd, _ = scipy.stats.kstest(gdf[pvalue_column[algo]], 'uniform') calib_data.append([algo, float(k[1]), int(k[2]), dd]) print(draw_calib_postnonlinear_highdim.__name__) [print(xx) for xx in calib_data] df = pd.DataFrame(calib_data, columns=['algo', 'noise', 'N', 'D']) df['noise'] = df['noise'].map(int) df['dimension'] = (df['noise'] + 1).astype(int) df['N'] = df['N'].map(int) df['D'] = df['D'].astype(float) sns_setting() for k, gdf in df.groupby(['algo', 'N']): print('postnonlinear', k, gdf['D']) if k[1] == 400: plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) else: plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label='_nolegend_') plt.axes().set_xlabel('dimension') plt.axes().set_ylabel('KS test statistic') plt.axes().set_xscale('log') plt.axes().set_ylim([0.0, 0.5]) plt.axes().invert_yaxis() plt.xticks([1, 5, 10, 20, 50], [1, 5, 10, 20, 50]) plt.axes().set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_calib_highdim.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() def draw_type_I_postnonlinear_highdim(): data = 'postnonlinear' calib_data = [] for algo in all_algos: df = pd.read_csv(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv', names=names[(algo, data)]) for k, gdf in df.groupby(by=['independent', 'noise', 'N']): if float(k[0]) == 1 and k[2] == 400: dd = np.mean(gdf[pvalue_column[algo]] <= 0.05) calib_data.append([algo, float(k[1]), int(k[2]), dd]) print(draw_type_I_postnonlinear_highdim.__name__) [print(xx) for xx in calib_data] df = pd.DataFrame(calib_data, columns=['algo', 'noise', 'N', 'D']) df['noise'] = df['noise'].map(int) df['dimension'] = (df['noise'] + 1).astype(int) df['N'] = df['N'].map(int) df['D'] = df['D'].astype(float) sns_setting() for k, gdf in df.groupby(['algo', 'N']): if k[1] == 400: plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label=algo_name(str(k[0]))) else: plt.plot(gdf['dimension'], gdf['D'], markers[(k[0])], c=color_palettes[method_color_codes[k[0]]] if k[1] == 400 else color_palettes[-0 + method_color_codes[k[0]]], ls='-' if k[1] == 400 else ':', label='_nolegend_') plt.axes().set_xlabel('dimension') plt.axes().set_xscale('log') plt.xticks([1, 5, 10, 20, 50], [1, 5, 10, 20, 50]) plt.axes().set_ylim([0.0, 0.2]) handles, labels = plt.axes().get_legend_handles_labels() plt.axes().legend(handles[::-1], labels[::-1]) sns.despine() plt.savefig(SDCIT_FIGURE_DIR + '/postnonlinear_type_I_highdim.pdf', transparent=True, bbox_inches='tight', pad_inches=0.02) plt.close() if __name__ == '__main__': for data in ['chaotic', 'postnonlinear']: for algo in all_algos: assert exists(SDCIT_RESULT_DIR + '/' + algo.lower() + '_' + data + '.csv'), 'run tests first -- missing {}'.format(algo.lower() + '_' + data + '.csv') if True: # chaotic series draw_aupc_chaotic() draw_calib_chaotic() # postnonlinear-noise draw_aupc_postnonlinear() draw_calib_postnonlinear() draw_aupc_postnonlinear_highdim() draw_calib_postnonlinear_highdim() # type I for both draw_type_I_error_chaotic() draw_type_I_postnonlinear_highdim()
mit