{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\n\nA: I strongly recommend against storing binary data in a relational database. Relational databases are designed to work with fixed-size data; that's where their performance strength is: remember Joel's old article on why databases are so fast? because it takes exactly 1 pointer increment to move from a record to another record. If you add BLOB data of undefined and vastly varying size, you'll screw up performance. \nInstead, store files in the file system, and store file names in your database. \n\nA: While you haven't said what you're storing, and you may have a great reason for doing so, often the answer is 'as a filesystem reference' and the actual data is on the filesystem somewhere.\nhttp://www.onlamp.com/pub/a/onlamp/2002/07/11/MySQLtips.html\n\nA: It depends on the data you wish to store. The above example uses the LONGBLOB data type, but you should be aware that there are other binary data types:\nTINYBLOB/BLOB/MEDIUMBLOB/LONGBLOB\nVARBINARY\nBINARY\nEach has its use cases. If it is a known (short) length (e.g. packed data), BINARY or VARBINARY will work most of the time. They have the added benefit of being able to index on them.\n\nA: While it shouldn't be necessary, you could try base64 encoding data in and decoding it out. That means the db will just have ascii characters. It will take a bit more space and time, but any issue to do with the binary data will be eliminated.\n\nA: The answer by phpguy is correct but I think there is a lot of confusion in the additional details there.\nThe basic answer is in a BLOB data type / attribute domain. BLOB is short for Binary Large Object and that column data type is specific for handling binary data.\nSee the relevant manual page for MySQL.\n\nA: If the - not recommended - BLOB field exists, you can save data this way:\nmysql_query(\"UPDATE table SET field=X'\".bin2hex($bin_data).\"' WHERE id=$id\");\n\nIdea taken from here.\n\nA: When I need to store binary data I always use VARBINARY format as introduced by d0nut in one of the previous answers.\nYou can find documentation at MySQL website under documented topic: 12.4.2 The BINARY and VARBINARY Types.\nIf you are asking what are advantages, please read the question: why-varbinary-instead-of-varchar.\n\nA: The question also arises how to get the data into the BLOB. You can put the data in an INSERT statement, as the PHP example shows (although you should use mysql_real_escape_string instead of addslashes). If the file exists on the database server, you can also use MySQL's LOAD_FILE\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/17\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"198\"\n}"}}},{"rowIdx":8,"cells":{"text":{"kind":"string","value":"Q: What is the fastest way to get the value of π? I'm looking for the fastest way to obtain the value of π, as a personal challenge. More specifically, I'm using ways that don't involve using #define constants like M_PI, or hard-coding the number in.\nThe program below tests the various ways I know of. The inline assembly version is, in theory, the fastest option, though clearly not portable. I've included it as a baseline to compare against the other versions. In my tests, with built-ins, the 4 * atan(1) version is fastest on GCC 4.2, because it auto-folds the atan(1) into a constant. With -fno-builtin specified, the atan2(0, -1) version is fastest.\nHere's the main testing program (pitimes.c):\n#include \n#include \n#include \n\n#define ITERS 10000000\n#define TESTWITH(x) { \\\n diff = 0.0; \\\n time1 = clock(); \\\n for (i = 0; i < ITERS; ++i) \\\n diff += (x) - M_PI; \\\n time2 = clock(); \\\n printf(\"%s\\t=> %e, time => %f\\n\", #x, diff, diffclock(time2, time1)); \\\n}\n\nstatic inline double\ndiffclock(clock_t time1, clock_t time0)\n{\n return (double) (time1 - time0) / CLOCKS_PER_SEC;\n}\n\nint\nmain()\n{\n int i;\n clock_t time1, time2;\n double diff;\n\n /* Warmup. The atan2 case catches GCC's atan folding (which would\n * optimise the ``4 * atan(1) - M_PI'' to a no-op), if -fno-builtin\n * is not used. */\n TESTWITH(4 * atan(1))\n TESTWITH(4 * atan2(1, 1))\n\n#if defined(__GNUC__) && (defined(__i386__) || defined(__amd64__))\n extern double fldpi();\n TESTWITH(fldpi())\n#endif\n\n /* Actual tests start here. */\n TESTWITH(atan2(0, -1))\n TESTWITH(acos(-1))\n TESTWITH(2 * asin(1))\n TESTWITH(4 * atan2(1, 1))\n TESTWITH(4 * atan(1))\n\n return 0;\n}\n\nAnd the inline assembly stuff (fldpi.c) that will only work for x86 and x64 systems:\ndouble\nfldpi()\n{\n double pi;\n asm(\"fldpi\" : \"=t\" (pi));\n return pi;\n}\n\nAnd a build script that builds all the configurations I'm testing (build.sh):\n#!/bin/sh\ngcc -O3 -Wall -c -m32 -o fldpi-32.o fldpi.c\ngcc -O3 -Wall -c -m64 -o fldpi-64.o fldpi.c\n\ngcc -O3 -Wall -ffast-math -m32 -o pitimes1-32 pitimes.c fldpi-32.o\ngcc -O3 -Wall -m32 -o pitimes2-32 pitimes.c fldpi-32.o -lm\ngcc -O3 -Wall -fno-builtin -m32 -o pitimes3-32 pitimes.c fldpi-32.o -lm\ngcc -O3 -Wall -ffast-math -m64 -o pitimes1-64 pitimes.c fldpi-64.o -lm\ngcc -O3 -Wall -m64 -o pitimes2-64 pitimes.c fldpi-64.o -lm\ngcc -O3 -Wall -fno-builtin -m64 -o pitimes3-64 pitimes.c fldpi-64.o -lm\n\nApart from testing between various compiler flags (I've compared 32-bit against 64-bit too because the optimizations are different), I've also tried switching the order of the tests around. But still, the atan2(0, -1) version still comes out on top every time.\n\nA: Here's a general description of a technique for calculating pi that I learnt in high school.\nI only share this because I think it is simple enough that anyone can remember it, indefinitely, plus it teaches you the concept of \"Monte-Carlo\" methods -- which are statistical methods of arriving at answers that don't immediately appear to be deducible through random processes.\nDraw a square, and inscribe a quadrant (one quarter of a semi-circle) inside that square (a quadrant with radius equal to the side of the square, so it fills as much of the square as possible)\nNow throw a dart at the square, and record where it lands -- that is, choose a random point anywhere inside the square. Of course, it landed inside the square, but is it inside the semi-circle? Record this fact.\nRepeat this process many times -- and you will find there is a ratio of the number of points inside the semi-circle versus the total number thrown, call this ratio x.\nSince the area of the square is r times r, you can deduce that the area of the semi circle is x times r times r (that is, x times r squared). Hence x times 4 will give you pi. \nThis is not a quick method to use. But it's a nice example of a Monte Carlo method. And if you look around, you may find that many problems otherwise outside your computational skills can be solved by such methods.\n\nA: In the interests of completeness, a C++ template version, which, for an optimised build, will compute an approximation of PI at compile time, and will inline to a single value.\n#include \n\ntemplate\nstruct sign\n{\n enum {value = (I % 2) == 0 ? 1 : -1};\n};\n\ntemplate\nstruct pi_calc\n{\n inline static double value ()\n {\n return (pi_calc::value () + pi_calc::value ()) / 2.0;\n }\n};\n\ntemplate\nstruct pi_calc<0, J>\n{\n inline static double value ()\n {\n return (sign::value * 4.0) / (2.0 * J + 1.0) + pi_calc<0, J-1>::value ();\n }\n};\n\n\ntemplate<>\nstruct pi_calc<0, 0>\n{\n inline static double value ()\n {\n return 4.0;\n }\n};\n\ntemplate\nstruct pi\n{\n inline static double value ()\n {\n return pi_calc::value ();\n }\n};\n\nint main ()\n{\n std::cout.precision (12);\n\n const double pi_value = pi<10>::value ();\n\n std::cout << \"pi ~ \" << pi_value << std::endl;\n\n return 0;\n}\n\nNote for I > 10, optimised builds can be slow, likewise for non-optimised runs. For 12 iterations I believe there are around 80k calls to value() (in the absence of memoisation).\n\nA: There's actually a whole book dedicated (amongst other things) to fast methods for the computation of \\pi: 'Pi and the AGM', by Jonathan and Peter Borwein (available on Amazon).\nI studied the AGM and related algorithms quite a bit: it's quite interesting (though sometimes non-trivial).\nNote that to implement most modern algorithms to compute \\pi, you will need a multiprecision arithmetic library (GMP is quite a good choice, though it's been a while since I last used it).\nThe time-complexity of the best algorithms is in O(M(n)log(n)), where M(n) is the time-complexity for the multiplication of two n-bit integers (M(n)=O(n log(n) log(log(n))) using FFT-based algorithms, which are usually needed when computing digits of \\pi, and such an algorithm is implemented in GMP).\nNote that even though the mathematics behind the algorithms might not be trivial, the algorithms themselves are usually a few lines of pseudo-code, and their implementation is usually very straightforward (if you chose not to write your own multiprecision arithmetic :-) ). \n\nA: The following answers precisely how to do this in the fastest possible way -- with the least computing effort. Even if you don't like the answer, you have to admit that it is indeed the fastest way to get the value of PI.\nThe FASTEST way to get the value of Pi is:\n\n*\n\n*chose your favourite programming language\n\n*load its Math library\n\n*and find that Pi is already defined there -- ready for use!\n\nIn case you don't have a Math library at hand..\nThe SECOND FASTEST way (more universal solution) is:\nlook up Pi on the Internet, e.g. here:\nhttp://www.eveandersson.com/pi/digits/1000000 (1 million digits .. what's your floating point precision? )\nor here:\nhttp://3.141592653589793238462643383279502884197169399375105820974944592.com/\nor here:\nhttp://en.wikipedia.org/wiki/Pi\nIt's really fast to find the digits you need for whatever precision arithmetic you would like to use, and by defining a constant, you can make sure that you don't waste precious CPU time.\nNot only is this a partly humorous answer, but in reality, if anybody would go ahead and compute the value of Pi in a real application .. that would be a pretty big waste of CPU time, wouldn't it? At least I don't see a real application for trying to re-compute this.\nAlso consider that NASA only uses 15 digits of Pi for calculating interplanetary travel:\n\n*\n\n*TL;DR: https://twitter.com/Rainmaker1973/status/1463477499434835968\n\n*JPL Explanation: https://www.jpl.nasa.gov/edu/news/2016/3/16/how-many-decimals-of-pi-do-we-really-need/\nDear Moderator: please note that the OP asked: \"Fastest Way to get the value of PI\"\n\nA: The BBP formula allows you to compute the nth digit - in base 2 (or 16) - without having to even bother with the previous n-1 digits first :)\n\nA: Instead of defining pi as a constant, I always use acos(-1).\n\nA: This is a \"classic\" method, very easy to implement.\nThis implementation in python (not the fastest language) does it:\nfrom math import pi\nfrom time import time\n\n\nprecision = 10**6 # higher value -> higher precision\n # lower value -> higher speed\n\nt = time()\n\ncalc = 0\nfor k in xrange(0, precision):\n calc += ((-1)**k) / (2*k+1.)\ncalc *= 4. # this is just a little optimization\n\nt = time()-t\n\nprint \"Calculated: %.40f\" % calc\nprint \"Constant pi: %.40f\" % pi\nprint \"Difference: %.40f\" % abs(calc-pi)\nprint \"Time elapsed: %s\" % repr(t)\n\nYou can find more information here.\nAnyway, the fastest way to get a precise as-much-as-you-want value of pi in python is:\nfrom gmpy import pi\nprint pi(3000) # the rule is the same as \n # the precision on the previous code\n\nHere is the piece of source for the gmpy pi method, I don't think the code is as useful as the comment in this case:\nstatic char doc_pi[]=\"\\\npi(n): returns pi with n bits of precision in an mpf object\\n\\\n\";\n\n/* This function was originally from netlib, package bmp, by\n * Richard P. Brent. Paulo Cesar Pereira de Andrade converted\n * it to C and used it in his LISP interpreter.\n *\n * Original comments:\n * \n * sets mp pi = 3.14159... to the available precision.\n * uses the gauss-legendre algorithm.\n * this method requires time o(ln(t)m(t)), so it is slower\n * than mppi if m(t) = o(t**2), but would be faster for\n * large t if a faster multiplication algorithm were used\n * (see comments in mpmul).\n * for a description of the method, see - multiple-precision\n * zero-finding and the complexity of elementary function\n * evaluation (by r. p. brent), in analytic computational\n * complexity (edited by j. f. traub), academic press, 1976, 151-176.\n * rounding options not implemented, no guard digits used.\n*/\nstatic PyObject *\nPygmpy_pi(PyObject *self, PyObject *args)\n{\n PympfObject *pi;\n int precision;\n mpf_t r_i2, r_i3, r_i4;\n mpf_t ix;\n\n ONE_ARG(\"pi\", \"i\", &precision);\n if(!(pi = Pympf_new(precision))) {\n return NULL;\n }\n\n mpf_set_si(pi->f, 1);\n\n mpf_init(ix);\n mpf_set_ui(ix, 1);\n\n mpf_init2(r_i2, precision);\n\n mpf_init2(r_i3, precision);\n mpf_set_d(r_i3, 0.25);\n\n mpf_init2(r_i4, precision);\n mpf_set_d(r_i4, 0.5);\n mpf_sqrt(r_i4, r_i4);\n\n for (;;) {\n mpf_set(r_i2, pi->f);\n mpf_add(pi->f, pi->f, r_i4);\n mpf_div_ui(pi->f, pi->f, 2);\n mpf_mul(r_i4, r_i2, r_i4);\n mpf_sub(r_i2, pi->f, r_i2);\n mpf_mul(r_i2, r_i2, r_i2);\n mpf_mul(r_i2, r_i2, ix);\n mpf_sub(r_i3, r_i3, r_i2);\n mpf_sqrt(r_i4, r_i4);\n mpf_mul_ui(ix, ix, 2);\n /* Check for convergence */\n if (!(mpf_cmp_si(r_i2, 0) && \n mpf_get_prec(r_i2) >= (unsigned)precision)) {\n mpf_mul(pi->f, pi->f, r_i4);\n mpf_div(pi->f, pi->f, r_i3);\n break;\n }\n }\n\n mpf_clear(ix);\n mpf_clear(r_i2);\n mpf_clear(r_i3);\n mpf_clear(r_i4);\n\n return (PyObject*)pi;\n}\n\n\nEDIT: I had some problems with cut and paste and indentation, you can find the source here.\n\nA: The Monte Carlo method, as mentioned, applies some great concepts but it is, clearly, not the fastest, not by a long shot, not by any reasonable measure. Also, it all depends on what kind of accuracy you are looking for. The fastest π I know of is the one with the digits hard coded. Looking at Pi and Pi[PDF], there are a lot of formulae.\nHere is a method that converges quickly — about 14 digits per iteration. PiFast, the current fastest application, uses this formula with the FFT. I'll just write the formula, since the code is straightforward. This formula was almost found by Ramanujan and discovered by Chudnovsky. It is actually how he calculated several billion digits of the number — so it isn't a method to disregard. The formula will overflow quickly and, since we are dividing factorials, it would be advantageous then to delay such calculations to remove terms.\n\n\nwhere,\n\nBelow is the Brent–Salamin algorithm. Wikipedia mentions that when a and b are \"close enough\" then (a + b)² / 4t will be an approximation of π. I'm not sure what \"close enough\" means, but from my tests, one iteration got 2 digits, two got 7, and three had 15, of course this is with doubles, so it might have an error based on its representation and the true calculation could be more accurate.\nlet pi_2 iters =\n let rec loop_ a b t p i =\n if i = 0 then a,b,t,p\n else\n let a_n = (a +. b) /. 2.0 \n and b_n = sqrt (a*.b)\n and p_n = 2.0 *. p in\n let t_n = t -. (p *. (a -. a_n) *. (a -. a_n)) in\n loop_ a_n b_n t_n p_n (i - 1)\n in \n let a,b,t,p = loop_ (1.0) (1.0 /. (sqrt 2.0)) (1.0/.4.0) (1.0) iters in\n (a +. b) *. (a +. b) /. (4.0 *. t)\n\nLastly, how about some pi golf (800 digits)? 160 characters!\nint a=10000,b,c=2800,d,e,f[2801],g;main(){for(;b-c;)f[b++]=a/5;for(;d=0,g=c*2;c-=14,printf(\"%.4d\",e+d/a),e=d%a)for(b=c;d+=f[b]*a,f[b]=d%--g,d/=g--,--b;d*=b);}\n\n\nA: If by fastest you mean fastest to type in the code, here's the golfscript solution:\n;''6666,-2%{2+.2/@*\\/10.3??2*+}*`1000<~\\;\n\n\nA: Basically the C version of paperclip optimizer's answer, and much more simpilified:\n#include \n#include \n\ndouble calc_PI(int K) {\n static const int A = 545140134;\n static const int B = 13591409;\n static const int D = 640320;\n const double ID3 = 1.0 / ((double) D * (double) D * (double) D);\n double sum = 0.0;\n double b = sqrt(ID3);\n long long int p = 1;\n long long int a = B;\n sum += (double) p * (double) a * b;\n for (int k = 1; k < K; ++k) {\n a += A;\n b *= ID3;\n p *= (6 * k) * (6 * k - 1) * (6 * k - 2) * (6 * k - 3) * (6 * k - 4) * (6 * k - 5);\n p /= (3 * k) * (3 * k - 1) * (3 * k - 2) * k * k * k;\n p = -p;\n sum += (double) p * (double) a * b;\n }\n return 1.0 / (12 * sum);\n}\n\nint main() {\n for (int k = 1; k <= 5; ++k) {\n printf(\"k = %i, PI = %.16f\\n\", k, calc_PI(k));\n }\n}\n\nBut for more simplification, this algorithm takes Chudnovsky's formula, which I can fully simplify if you don't really understand the code.\nSummary: We will get a number from 1 to 5 and add it in to a function we will use to get PI. Then 3 numbers are given to you: 545140134 (A), 13591409 (B), 640320 (D). Then we will use D as a double multiplying itself 3 times into another double (ID3). We will then take the square root of ID3 into another double (b) and assign 2 numbers: 1 (p), the value of B (a). Take note that C is case-insensitive. Then a double (sum) will be created by multiplying the value's of p, a and b, all in doubles. Then a loop up until the number given for the function will start and add up A's value to a, b's value gets multiplied by ID3, p's value will be multiplied by multiple values that I hope you can understand and also gets divided by multiple values as well. The sum will add up by p, a and b once again and the loop will repeat until the value of the loop's number is greater or equal to 5. Later, the sum is multiplied by 12 and returned by the function giving us the result of PI.\nOkay, that was long, but I guess you will understand it...\n\nA: Pi is exactly 3! [Prof. Frink (Simpsons)]\nJoke, but here's one in C# (.NET-Framework required).\nusing System;\nusing System.Text;\n\nclass Program {\n static void Main(string[] args) {\n int Digits = 100;\n\n BigNumber x = new BigNumber(Digits);\n BigNumber y = new BigNumber(Digits);\n x.ArcTan(16, 5);\n y.ArcTan(4, 239);\n x.Subtract(y);\n string pi = x.ToString();\n Console.WriteLine(pi);\n }\n}\n\npublic class BigNumber {\n private UInt32[] number;\n private int size;\n private int maxDigits;\n\n public BigNumber(int maxDigits) {\n this.maxDigits = maxDigits;\n this.size = (int)Math.Ceiling((float)maxDigits * 0.104) + 2;\n number = new UInt32[size];\n }\n public BigNumber(int maxDigits, UInt32 intPart)\n : this(maxDigits) {\n number[0] = intPart;\n for (int i = 1; i < size; i++) {\n number[i] = 0;\n }\n }\n private void VerifySameSize(BigNumber value) {\n if (Object.ReferenceEquals(this, value))\n throw new Exception(\"BigNumbers cannot operate on themselves\");\n if (value.size != this.size)\n throw new Exception(\"BigNumbers must have the same size\");\n }\n\n public void Add(BigNumber value) {\n VerifySameSize(value);\n\n int index = size - 1;\n while (index >= 0 && value.number[index] == 0)\n index--;\n\n UInt32 carry = 0;\n while (index >= 0) {\n UInt64 result = (UInt64)number[index] +\n value.number[index] + carry;\n number[index] = (UInt32)result;\n if (result >= 0x100000000U)\n carry = 1;\n else\n carry = 0;\n index--;\n }\n }\n public void Subtract(BigNumber value) {\n VerifySameSize(value);\n\n int index = size - 1;\n while (index >= 0 && value.number[index] == 0)\n index--;\n\n UInt32 borrow = 0;\n while (index >= 0) {\n UInt64 result = 0x100000000U + (UInt64)number[index] -\n value.number[index] - borrow;\n number[index] = (UInt32)result;\n if (result >= 0x100000000U)\n borrow = 0;\n else\n borrow = 1;\n index--;\n }\n }\n public void Multiply(UInt32 value) {\n int index = size - 1;\n while (index >= 0 && number[index] == 0)\n index--;\n\n UInt32 carry = 0;\n while (index >= 0) {\n UInt64 result = (UInt64)number[index] * value + carry;\n number[index] = (UInt32)result;\n carry = (UInt32)(result >> 32);\n index--;\n }\n }\n public void Divide(UInt32 value) {\n int index = 0;\n while (index < size && number[index] == 0)\n index++;\n\n UInt32 carry = 0;\n while (index < size) {\n UInt64 result = number[index] + ((UInt64)carry << 32);\n number[index] = (UInt32)(result / (UInt64)value);\n carry = (UInt32)(result % (UInt64)value);\n index++;\n }\n }\n public void Assign(BigNumber value) {\n VerifySameSize(value);\n for (int i = 0; i < size; i++) {\n number[i] = value.number[i];\n }\n }\n\n public override string ToString() {\n BigNumber temp = new BigNumber(maxDigits);\n temp.Assign(this);\n\n StringBuilder sb = new StringBuilder();\n sb.Append(temp.number[0]);\n sb.Append(System.Globalization.CultureInfo.CurrentCulture.NumberFormat.CurrencyDecimalSeparator);\n\n int digitCount = 0;\n while (digitCount < maxDigits) {\n temp.number[0] = 0;\n temp.Multiply(100000);\n sb.AppendFormat(\"{0:D5}\", temp.number[0]);\n digitCount += 5;\n }\n\n return sb.ToString();\n }\n public bool IsZero() {\n foreach (UInt32 item in number) {\n if (item != 0)\n return false;\n }\n return true;\n }\n\n public void ArcTan(UInt32 multiplicand, UInt32 reciprocal) {\n BigNumber X = new BigNumber(maxDigits, multiplicand);\n X.Divide(reciprocal);\n reciprocal *= reciprocal;\n\n this.Assign(X);\n\n BigNumber term = new BigNumber(maxDigits);\n UInt32 divisor = 1;\n bool subtractTerm = true;\n while (true) {\n X.Divide(reciprocal);\n term.Assign(X);\n divisor += 2;\n term.Divide(divisor);\n if (term.IsZero())\n break;\n\n if (subtractTerm)\n this.Subtract(term);\n else\n this.Add(term);\n subtractTerm = !subtractTerm;\n }\n }\n}\n\n\nA: If you are willing to use an approximation, 355 / 113 is good for 6 decimal digits, and has the added advantage of being usable with integer expressions. That's not as important these days, as \"floating point math co-processor\" ceased to have any meaning, but it was quite important once.\n\nA: Use the Machin-like formula \n176 * arctan (1/57) + 28 * arctan (1/239) - 48 * arctan (1/682) + 96 * arctan(1/12943) \n\n[; \\left( 176 \\arctan \\frac{1}{57} + 28 \\arctan \\frac{1}{239} - 48 \\arctan \\frac{1}{682} + 96 \\arctan \\frac{1}{12943}\\right) ;], for you TeX the World people.\n\nImplemented in Scheme, for instance: \n(+ (- (+ (* 176 (atan (/ 1 57))) (* 28 (atan (/ 1 239)))) (* 48 (atan (/ 1 682)))) (* 96 (atan (/ 1 12943))))\n\nA: With doubles:\n4.0 * (4.0 * Math.Atan(0.2) - Math.Atan(1.0 / 239.0))\n\nThis will be accurate up to 14 decimal places, enough to fill a double (the inaccuracy is probably because the rest of the decimals in the arc tangents are truncated).\nAlso Seth, it's 3.141592653589793238463, not 64.\n\nA: Calculate PI at compile-time with D.\n( Copied from DSource.org )\n/** Calculate pi at compile time\n *\n * Compile with dmd -c pi.d\n */\nmodule calcpi;\n\nimport meta.math;\nimport meta.conv;\n\n/** real evaluateSeries!(real x, real metafunction!(real y, int n) term)\n *\n * Evaluate a power series at compile time.\n *\n * Given a metafunction of the form\n * real term!(real y, int n),\n * which gives the nth term of a convergent series at the point y\n * (where the first term is n==1), and a real number x,\n * this metafunction calculates the infinite sum at the point x\n * by adding terms until the sum doesn't change any more.\n */\ntemplate evaluateSeries(real x, alias term, int n=1, real sumsofar=0.0)\n{\n static if (n>1 && sumsofar == sumsofar + term!(x, n+1)) {\n const real evaluateSeries = sumsofar;\n } else {\n const real evaluateSeries = evaluateSeries!(x, term, n+1, sumsofar + term!(x, n));\n }\n}\n\n/*** Calculate atan(x) at compile time.\n *\n * Uses the Maclaurin formula\n * atan(z) = z - z^3/3 + Z^5/5 - Z^7/7 + ...\n */\ntemplate atan(real z)\n{\n const real atan = evaluateSeries!(z, atanTerm);\n}\n\ntemplate atanTerm(real x, int n)\n{\n const real atanTerm = (n & 1 ? 1 : -1) * pow!(x, 2*n-1)/(2*n-1);\n}\n\n/// Machin's formula for pi\n/// pi/4 = 4 atan(1/5) - atan(1/239).\npragma(msg, \"PI = \" ~ fcvt!(4.0 * (4*atan!(1/5.0) - atan!(1/239.0))) );\n\n\nA: This version (in Delphi) is nothing special, but it is at least faster than the version Nick Hodge posted on his blog :). On my machine, it takes about 16 seconds to do a billion iterations, giving a value of 3.1415926525879 (the accurate part is in bold).\nprogram calcpi;\n\n{$APPTYPE CONSOLE}\n\nuses\n SysUtils;\n\nvar\n start, finish: TDateTime;\n\nfunction CalculatePi(iterations: integer): double;\nvar\n numerator, denominator, i: integer;\n sum: double;\nbegin\n {\n PI may be approximated with this formula:\n 4 * (1 - 1/3 + 1/5 - 1/7 + 1/9 - 1/11 .......)\n //}\n numerator := 1;\n denominator := 1;\n sum := 0;\n for i := 1 to iterations do begin\n sum := sum + (numerator/denominator);\n denominator := denominator + 2;\n numerator := -numerator;\n end;\n Result := 4 * sum;\nend;\n\nbegin\n try\n start := Now;\n WriteLn(FloatToStr(CalculatePi(StrToInt(ParamStr(1)))));\n finish := Now;\n WriteLn('Seconds:' + FormatDateTime('hh:mm:ss.zz',finish-start));\n except\n on E:Exception do\n Writeln(E.Classname, ': ', E.Message);\n end;\nend.\n\n\nA: Back in the old days, with small word sizes and slow or non-existent floating-point operations, we used to do stuff like this:\n/* Return approximation of n * PI; n is integer */\n#define pi_times(n) (((n) * 22) / 7)\n\nFor applications that don't require a lot of precision (video games, for example), this is very fast and is accurate enough.\n\nA: If you want to compute an approximation of the value of π (for some reason), you should try a binary extraction algorithm. Bellard's improvement of BBP gives does PI in O(N^2). \n\nIf you want to obtain an approximation of the value of π to do calculations, then: \nPI = 3.141592654\n\nGranted, that's only an approximation, and not entirely accurate. It's off by a little more than 0.00000000004102. (four ten-trillionths, about 4/10,000,000,000).\n\nIf you want to do math with π, then get yourself a pencil and paper or a computer algebra package, and use π's exact value, π. \nIf you really want a formula, this one is fun: \nπ = -i ln(-1)\n\nA: I really like this program, because it approximates π by looking at its own area.\nIOCCC 1988 : westley.c \n\n#define _ -F<00||--F-OO--;\nint F=00,OO=00;main(){F_OO();printf(\"%1.3f\\n\",4.*-F/OO/OO);}F_OO()\n{\n _-_-_-_\n _-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_-_-_-_-_\n _-_-_-_-_-_-_-_\n _-_-_-_\n}\n\n\n\nA: Calculating π from circle area :-)\n\n\n\r\n
\r\n
\r\n\r\n\n\n\n\nA: The Chudnovsky algorithm is pretty fast if you don't mind performing a square root and a couple inverses. It converges to double precision in just 2 iterations.\n/*\n Chudnovsky algorithm for computing PI\n*/\n\n#include \n#include \nusing namespace std;\n\ndouble calc_PI(int K=2) {\n\n static const int A = 545140134;\n static const int B = 13591409;\n static const int D = 640320;\n\n const double ID3 = 1./ (double(D)*double(D)*double(D));\n\n double sum = 0.;\n double b = sqrt(ID3);\n long long int p = 1;\n long long int a = B;\n\n sum += double(p) * double(a)* b;\n\n // 2 iterations enough for double convergence\n for (int k=1; k 14.181647462725476\n DIGITS_PER_TERM = 14.181647462725476\n MMILL = mpz(1000000)\n\n def __init__(self,ndigits):\n \"\"\" Initialization\n :param int ndigits: digits of PI computation\n \"\"\"\n self.ndigits = ndigits\n self.n = mpz(self.ndigits // self.DIGITS_PER_TERM + 1)\n self.prec = mpz((self.ndigits + 1) * LOG2_10)\n self.one_sq = pow(mpz(10),mpz(2 * ndigits))\n self.sqrt_c = isqrt(self.E * self.one_sq)\n self.iters = mpz(0)\n self.start_time = 0\n\n def compute(self):\n \"\"\" Computation \"\"\"\n try:\n self.start_time = time.time()\n logging.debug(\"Starting {} formula to {:,} decimal places\"\n .format(name,ndigits) )\n __, q, t = self.__bs(mpz(0), self.n) # p is just for recursion\n pi = (q * self.D * self.sqrt_c) // t\n logging.debug('{} calulation Done! {:,} iterations and {:.2f} seconds.'\n .format( name, int(self.iters),time.time() - self.start_time))\n get_context().precision= int((self.ndigits+10) * LOG2_10)\n pi_s = pi.digits() # digits() gmpy2 creates a string \n pi_o = pi_s[:1] + \".\" + pi_s[1:]\n return pi_o,int(self.iters),time.time() - self.start_time\n except Exception as e:\n print (e.message, e.args)\n raise\n\n def __bs(self, a, b):\n \"\"\" PQT computation by BSA(= Binary Splitting Algorithm)\n :param int a: positive integer\n :param int b: positive integer\n :return list [int p_ab, int q_ab, int t_ab]\n \"\"\"\n try:\n self.iters += mpz(1)\n if self.iters % self.MMILL == mpz(0):\n logging.debug('Chudnovsky ... {:,} iterations and {:.2f} seconds.'\n .format( int(self.iters),time.time() - self.start_time))\n if a + mpz(1) == b:\n if a == mpz(0):\n p_ab = q_ab = mpz(1)\n else:\n p_ab = mpz((mpz(6) * a - mpz(5)) * (mpz(2) * a - mpz(1)) * (mpz(6) * a - mpz(1)))\n q_ab = pow(a,mpz(3)) * self.C3_24\n t_ab = p_ab * (self.A + self.B * a)\n if a & 1:\n t_ab *= mpz(-1)\n else:\n m = (a + b) // mpz(2)\n p_am, q_am, t_am = self.__bs(a, m)\n p_mb, q_mb, t_mb = self.__bs(m, b)\n p_ab = p_am * p_mb\n q_ab = q_am * q_mb\n t_ab = q_mb * t_am + p_am * t_mb\n return [p_ab, q_ab, t_ab]\n except Exception as e:\n print (e.message, e.args)\n raise\n\nHere is the output of 1,000,000,000 digits less than 45 minutes:\npython pi-pourri.py -v -d 1,000,000,000 -a 10 \n\n[INFO] 2022-10-03 09:22:51,860 : MainProcess Computing π to 1,000,000,000 digits.\n[DEBUG] 2022-10-03 09:25:00,543 compute: MainProcess Starting Chudnovsky brothers 1988 \n π = (Q(0, N) / 12T(0, N) + 12AQ(0, N))**(C**(3/2))\n formula to 1,000,000,000 decimal places\n[DEBUG] 2022-10-03 09:25:04,995 __bs: MainProcess Chudnovsky ... 1,000,000 iterations and 4.45 seconds.\n[DEBUG] 2022-10-03 09:25:10,836 __bs: MainProcess Chudnovsky ... 2,000,000 iterations and 10.29 seconds.\n[DEBUG] 2022-10-03 09:25:18,227 __bs: MainProcess Chudnovsky ... 3,000,000 iterations and 17.68 seconds.\n[DEBUG] 2022-10-03 09:25:24,512 __bs: MainProcess Chudnovsky ... 4,000,000 iterations and 23.97 seconds.\n[DEBUG] 2022-10-03 09:25:35,670 __bs: MainProcess Chudnovsky ... 5,000,000 iterations and 35.13 seconds.\n[DEBUG] 2022-10-03 09:25:41,376 __bs: MainProcess Chudnovsky ... 6,000,000 iterations and 40.83 seconds.\n[DEBUG] 2022-10-03 09:25:49,238 __bs: MainProcess Chudnovsky ... 7,000,000 iterations and 48.69 seconds.\n[DEBUG] 2022-10-03 09:25:55,646 __bs: MainProcess Chudnovsky ... 8,000,000 iterations and 55.10 seconds.\n[DEBUG] 2022-10-03 09:26:15,043 __bs: MainProcess Chudnovsky ... 9,000,000 iterations and 74.50 seconds.\n[DEBUG] 2022-10-03 09:26:21,437 __bs: MainProcess Chudnovsky ... 10,000,000 iterations and 80.89 seconds.\n[DEBUG] 2022-10-03 09:26:26,587 __bs: MainProcess Chudnovsky ... 11,000,000 iterations and 86.04 seconds.\n[DEBUG] 2022-10-03 09:26:34,777 __bs: MainProcess Chudnovsky ... 12,000,000 iterations and 94.23 seconds.\n[DEBUG] 2022-10-03 09:26:41,231 __bs: MainProcess Chudnovsky ... 13,000,000 iterations and 100.69 seconds.\n[DEBUG] 2022-10-03 09:26:52,972 __bs: MainProcess Chudnovsky ... 14,000,000 iterations and 112.43 seconds.\n[DEBUG] 2022-10-03 09:26:59,517 __bs: MainProcess Chudnovsky ... 15,000,000 iterations and 118.97 seconds.\n[DEBUG] 2022-10-03 09:27:07,932 __bs: MainProcess Chudnovsky ... 16,000,000 iterations and 127.39 seconds.\n[DEBUG] 2022-10-03 09:27:14,036 __bs: MainProcess Chudnovsky ... 17,000,000 iterations and 133.49 seconds.\n[DEBUG] 2022-10-03 09:27:51,629 __bs: MainProcess Chudnovsky ... 18,000,000 iterations and 171.09 seconds.\n[DEBUG] 2022-10-03 09:27:58,176 __bs: MainProcess Chudnovsky ... 19,000,000 iterations and 177.63 seconds.\n[DEBUG] 2022-10-03 09:28:06,704 __bs: MainProcess Chudnovsky ... 20,000,000 iterations and 186.16 seconds.\n[DEBUG] 2022-10-03 09:28:13,376 __bs: MainProcess Chudnovsky ... 21,000,000 iterations and 192.83 seconds.\n[DEBUG] 2022-10-03 09:28:18,737 __bs: MainProcess Chudnovsky ... 22,000,000 iterations and 198.19 seconds.\n[DEBUG] 2022-10-03 09:28:31,095 __bs: MainProcess Chudnovsky ... 23,000,000 iterations and 210.55 seconds.\n[DEBUG] 2022-10-03 09:28:37,789 __bs: MainProcess Chudnovsky ... 24,000,000 iterations and 217.25 seconds.\n[DEBUG] 2022-10-03 09:28:46,171 __bs: MainProcess Chudnovsky ... 25,000,000 iterations and 225.63 seconds.\n[DEBUG] 2022-10-03 09:28:52,933 __bs: MainProcess Chudnovsky ... 26,000,000 iterations and 232.39 seconds.\n[DEBUG] 2022-10-03 09:29:13,524 __bs: MainProcess Chudnovsky ... 27,000,000 iterations and 252.98 seconds.\n[DEBUG] 2022-10-03 09:29:19,676 __bs: MainProcess Chudnovsky ... 28,000,000 iterations and 259.13 seconds.\n[DEBUG] 2022-10-03 09:29:28,196 __bs: MainProcess Chudnovsky ... 29,000,000 iterations and 267.65 seconds.\n[DEBUG] 2022-10-03 09:29:34,720 __bs: MainProcess Chudnovsky ... 30,000,000 iterations and 274.18 seconds.\n[DEBUG] 2022-10-03 09:29:47,075 __bs: MainProcess Chudnovsky ... 31,000,000 iterations and 286.53 seconds.\n[DEBUG] 2022-10-03 09:29:53,746 __bs: MainProcess Chudnovsky ... 32,000,000 iterations and 293.20 seconds.\n[DEBUG] 2022-10-03 09:29:59,099 __bs: MainProcess Chudnovsky ... 33,000,000 iterations and 298.56 seconds.\n[DEBUG] 2022-10-03 09:30:07,511 __bs: MainProcess Chudnovsky ... 34,000,000 iterations and 306.97 seconds.\n[DEBUG] 2022-10-03 09:30:14,279 __bs: MainProcess Chudnovsky ... 35,000,000 iterations and 313.74 seconds.\n[DEBUG] 2022-10-03 09:31:31,710 __bs: MainProcess Chudnovsky ... 36,000,000 iterations and 391.17 seconds.\n[DEBUG] 2022-10-03 09:31:38,454 __bs: MainProcess Chudnovsky ... 37,000,000 iterations and 397.91 seconds.\n[DEBUG] 2022-10-03 09:31:46,437 __bs: MainProcess Chudnovsky ... 38,000,000 iterations and 405.89 seconds.\n[DEBUG] 2022-10-03 09:31:53,285 __bs: MainProcess Chudnovsky ... 39,000,000 iterations and 412.74 seconds.\n[DEBUG] 2022-10-03 09:32:05,602 __bs: MainProcess Chudnovsky ... 40,000,000 iterations and 425.06 seconds.\n[DEBUG] 2022-10-03 09:32:12,220 __bs: MainProcess Chudnovsky ... 41,000,000 iterations and 431.68 seconds.\n[DEBUG] 2022-10-03 09:32:20,708 __bs: MainProcess Chudnovsky ... 42,000,000 iterations and 440.17 seconds.\n[DEBUG] 2022-10-03 09:32:27,552 __bs: MainProcess Chudnovsky ... 43,000,000 iterations and 447.01 seconds.\n[DEBUG] 2022-10-03 09:32:32,986 __bs: MainProcess Chudnovsky ... 44,000,000 iterations and 452.44 seconds.\n[DEBUG] 2022-10-03 09:32:53,904 __bs: MainProcess Chudnovsky ... 45,000,000 iterations and 473.36 seconds.\n[DEBUG] 2022-10-03 09:33:00,832 __bs: MainProcess Chudnovsky ... 46,000,000 iterations and 480.29 seconds.\n[DEBUG] 2022-10-03 09:33:09,198 __bs: MainProcess Chudnovsky ... 47,000,000 iterations and 488.66 seconds.\n[DEBUG] 2022-10-03 09:33:16,000 __bs: MainProcess Chudnovsky ... 48,000,000 iterations and 495.46 seconds.\n[DEBUG] 2022-10-03 09:33:27,921 __bs: MainProcess Chudnovsky ... 49,000,000 iterations and 507.38 seconds.\n[DEBUG] 2022-10-03 09:33:34,778 __bs: MainProcess Chudnovsky ... 50,000,000 iterations and 514.24 seconds.\n[DEBUG] 2022-10-03 09:33:43,298 __bs: MainProcess Chudnovsky ... 51,000,000 iterations and 522.76 seconds.\n[DEBUG] 2022-10-03 09:33:49,959 __bs: MainProcess Chudnovsky ... 52,000,000 iterations and 529.42 seconds.\n[DEBUG] 2022-10-03 09:34:29,294 __bs: MainProcess Chudnovsky ... 53,000,000 iterations and 568.75 seconds.\n[DEBUG] 2022-10-03 09:34:36,176 __bs: MainProcess Chudnovsky ... 54,000,000 iterations and 575.63 seconds.\n[DEBUG] 2022-10-03 09:34:41,576 __bs: MainProcess Chudnovsky ... 55,000,000 iterations and 581.03 seconds.\n[DEBUG] 2022-10-03 09:34:50,161 __bs: MainProcess Chudnovsky ... 56,000,000 iterations and 589.62 seconds.\n[DEBUG] 2022-10-03 09:34:56,811 __bs: MainProcess Chudnovsky ... 57,000,000 iterations and 596.27 seconds.\n[DEBUG] 2022-10-03 09:35:09,382 __bs: MainProcess Chudnovsky ... 58,000,000 iterations and 608.84 seconds.\n[DEBUG] 2022-10-03 09:35:16,206 __bs: MainProcess Chudnovsky ... 59,000,000 iterations and 615.66 seconds.\n[DEBUG] 2022-10-03 09:35:24,295 __bs: MainProcess Chudnovsky ... 60,000,000 iterations and 623.75 seconds.\n[DEBUG] 2022-10-03 09:35:31,095 __bs: MainProcess Chudnovsky ... 61,000,000 iterations and 630.55 seconds.\n[DEBUG] 2022-10-03 09:35:52,139 __bs: MainProcess Chudnovsky ... 62,000,000 iterations and 651.60 seconds.\n[DEBUG] 2022-10-03 09:35:58,781 __bs: MainProcess Chudnovsky ... 63,000,000 iterations and 658.24 seconds.\n[DEBUG] 2022-10-03 09:36:07,399 __bs: MainProcess Chudnovsky ... 64,000,000 iterations and 666.86 seconds.\n[DEBUG] 2022-10-03 09:36:12,847 __bs: MainProcess Chudnovsky ... 65,000,000 iterations and 672.30 seconds.\n[DEBUG] 2022-10-03 09:36:19,763 __bs: MainProcess Chudnovsky ... 66,000,000 iterations and 679.22 seconds.\n[DEBUG] 2022-10-03 09:36:32,351 __bs: MainProcess Chudnovsky ... 67,000,000 iterations and 691.81 seconds.\n[DEBUG] 2022-10-03 09:36:39,078 __bs: MainProcess Chudnovsky ... 68,000,000 iterations and 698.53 seconds.\n[DEBUG] 2022-10-03 09:36:47,830 __bs: MainProcess Chudnovsky ... 69,000,000 iterations and 707.29 seconds.\n[DEBUG] 2022-10-03 09:36:54,701 __bs: MainProcess Chudnovsky ... 70,000,000 iterations and 714.16 seconds.\n[DEBUG] 2022-10-03 09:39:39,357 __bs: MainProcess Chudnovsky ... 71,000,000 iterations and 878.81 seconds.\n[DEBUG] 2022-10-03 09:39:46,199 __bs: MainProcess Chudnovsky ... 72,000,000 iterations and 885.66 seconds.\n[DEBUG] 2022-10-03 09:39:54,956 __bs: MainProcess Chudnovsky ... 73,000,000 iterations and 894.41 seconds.\n[DEBUG] 2022-10-03 09:40:01,639 __bs: MainProcess Chudnovsky ... 74,000,000 iterations and 901.10 seconds.\n[DEBUG] 2022-10-03 09:40:14,219 __bs: MainProcess Chudnovsky ... 75,000,000 iterations and 913.68 seconds.\n[DEBUG] 2022-10-03 09:40:19,680 __bs: MainProcess Chudnovsky ... 76,000,000 iterations and 919.14 seconds.\n[DEBUG] 2022-10-03 09:40:26,625 __bs: MainProcess Chudnovsky ... 77,000,000 iterations and 926.08 seconds.\n[DEBUG] 2022-10-03 09:40:35,212 __bs: MainProcess Chudnovsky ... 78,000,000 iterations and 934.67 seconds.\n[DEBUG] 2022-10-03 09:40:41,914 __bs: MainProcess Chudnovsky ... 79,000,000 iterations and 941.37 seconds.\n[DEBUG] 2022-10-03 09:41:03,218 __bs: MainProcess Chudnovsky ... 80,000,000 iterations and 962.68 seconds.\n[DEBUG] 2022-10-03 09:41:10,213 __bs: MainProcess Chudnovsky ... 81,000,000 iterations and 969.67 seconds.\n[DEBUG] 2022-10-03 09:41:18,344 __bs: MainProcess Chudnovsky ... 82,000,000 iterations and 977.80 seconds.\n[DEBUG] 2022-10-03 09:41:25,261 __bs: MainProcess Chudnovsky ... 83,000,000 iterations and 984.72 seconds.\n[DEBUG] 2022-10-03 09:41:37,663 __bs: MainProcess Chudnovsky ... 84,000,000 iterations and 997.12 seconds.\n[DEBUG] 2022-10-03 09:41:44,680 __bs: MainProcess Chudnovsky ... 85,000,000 iterations and 1004.14 seconds.\n[DEBUG] 2022-10-03 09:41:53,411 __bs: MainProcess Chudnovsky ... 86,000,000 iterations and 1012.87 seconds.\n[DEBUG] 2022-10-03 09:41:58,926 __bs: MainProcess Chudnovsky ... 87,000,000 iterations and 1018.38 seconds.\n[DEBUG] 2022-10-03 09:42:05,858 __bs: MainProcess Chudnovsky ... 88,000,000 iterations and 1025.32 seconds.\n[DEBUG] 2022-10-03 09:42:46,163 __bs: MainProcess Chudnovsky ... 89,000,000 iterations and 1065.62 seconds.\n[DEBUG] 2022-10-03 09:42:53,054 __bs: MainProcess Chudnovsky ... 90,000,000 iterations and 1072.51 seconds.\n[DEBUG] 2022-10-03 09:43:02,030 __bs: MainProcess Chudnovsky ... 91,000,000 iterations and 1081.49 seconds.\n[DEBUG] 2022-10-03 09:43:09,192 __bs: MainProcess Chudnovsky ... 92,000,000 iterations and 1088.65 seconds.\n[DEBUG] 2022-10-03 09:43:21,533 __bs: MainProcess Chudnovsky ... 93,000,000 iterations and 1100.99 seconds.\n[DEBUG] 2022-10-03 09:43:28,643 __bs: MainProcess Chudnovsky ... 94,000,000 iterations and 1108.10 seconds.\n[DEBUG] 2022-10-03 09:43:37,372 __bs: MainProcess Chudnovsky ... 95,000,000 iterations and 1116.83 seconds.\n[DEBUG] 2022-10-03 09:43:44,558 __bs: MainProcess Chudnovsky ... 96,000,000 iterations and 1124.02 seconds.\n[DEBUG] 2022-10-03 09:44:06,555 __bs: MainProcess Chudnovsky ... 97,000,000 iterations and 1146.01 seconds.\n[DEBUG] 2022-10-03 09:44:12,220 __bs: MainProcess Chudnovsky ... 98,000,000 iterations and 1151.68 seconds.\n[DEBUG] 2022-10-03 09:44:19,278 __bs: MainProcess Chudnovsky ... 99,000,000 iterations and 1158.74 seconds.\n[DEBUG] 2022-10-03 09:44:28,323 __bs: MainProcess Chudnovsky ... 100,000,000 iterations and 1167.78 seconds.\n[DEBUG] 2022-10-03 09:44:35,211 __bs: MainProcess Chudnovsky ... 101,000,000 iterations and 1174.67 seconds.\n[DEBUG] 2022-10-03 09:44:48,331 __bs: MainProcess Chudnovsky ... 102,000,000 iterations and 1187.79 seconds.\n[DEBUG] 2022-10-03 09:44:54,835 __bs: MainProcess Chudnovsky ... 103,000,000 iterations and 1194.29 seconds.\n[DEBUG] 2022-10-03 09:45:03,869 __bs: MainProcess Chudnovsky ... 104,000,000 iterations and 1203.33 seconds.\n[DEBUG] 2022-10-03 09:45:10,967 __bs: MainProcess Chudnovsky ... 105,000,000 iterations and 1210.42 seconds.\n[DEBUG] 2022-10-03 09:46:32,760 __bs: MainProcess Chudnovsky ... 106,000,000 iterations and 1292.22 seconds.\n[DEBUG] 2022-10-03 09:46:39,872 __bs: MainProcess Chudnovsky ... 107,000,000 iterations and 1299.33 seconds.\n[DEBUG] 2022-10-03 09:46:48,948 __bs: MainProcess Chudnovsky ... 108,000,000 iterations and 1308.41 seconds.\n[DEBUG] 2022-10-03 09:46:54,611 __bs: MainProcess Chudnovsky ... 109,000,000 iterations and 1314.07 seconds.\n[DEBUG] 2022-10-03 09:47:01,727 __bs: MainProcess Chudnovsky ... 110,000,000 iterations and 1321.18 seconds.\n[DEBUG] 2022-10-03 09:47:14,525 __bs: MainProcess Chudnovsky ... 111,000,000 iterations and 1333.98 seconds.\n[DEBUG] 2022-10-03 09:47:21,682 __bs: MainProcess Chudnovsky ... 112,000,000 iterations and 1341.14 seconds.\n[DEBUG] 2022-10-03 09:47:30,610 __bs: MainProcess Chudnovsky ... 113,000,000 iterations and 1350.07 seconds.\n[DEBUG] 2022-10-03 09:47:37,176 __bs: MainProcess Chudnovsky ... 114,000,000 iterations and 1356.63 seconds.\n[DEBUG] 2022-10-03 09:47:59,642 __bs: MainProcess Chudnovsky ... 115,000,000 iterations and 1379.10 seconds.\n[DEBUG] 2022-10-03 09:48:06,702 __bs: MainProcess Chudnovsky ... 116,000,000 iterations and 1386.16 seconds.\n[DEBUG] 2022-10-03 09:48:15,483 __bs: MainProcess Chudnovsky ... 117,000,000 iterations and 1394.94 seconds.\n[DEBUG] 2022-10-03 09:48:22,537 __bs: MainProcess Chudnovsky ... 118,000,000 iterations and 1401.99 seconds.\n[DEBUG] 2022-10-03 09:48:35,714 __bs: MainProcess Chudnovsky ... 119,000,000 iterations and 1415.17 seconds.\n[DEBUG] 2022-10-03 09:48:41,321 __bs: MainProcess Chudnovsky ... 120,000,000 iterations and 1420.78 seconds.\n[DEBUG] 2022-10-03 09:48:48,408 __bs: MainProcess Chudnovsky ... 121,000,000 iterations and 1427.87 seconds.\n[DEBUG] 2022-10-03 09:48:57,138 __bs: MainProcess Chudnovsky ... 122,000,000 iterations and 1436.60 seconds.\n[DEBUG] 2022-10-03 09:49:04,328 __bs: MainProcess Chudnovsky ... 123,000,000 iterations and 1443.79 seconds.\n[DEBUG] 2022-10-03 09:49:46,274 __bs: MainProcess Chudnovsky ... 124,000,000 iterations and 1485.73 seconds.\n[DEBUG] 2022-10-03 09:49:52,833 __bs: MainProcess Chudnovsky ... 125,000,000 iterations and 1492.29 seconds.\n[DEBUG] 2022-10-03 09:50:01,786 __bs: MainProcess Chudnovsky ... 126,000,000 iterations and 1501.24 seconds.\n[DEBUG] 2022-10-03 09:50:08,975 __bs: MainProcess Chudnovsky ... 127,000,000 iterations and 1508.43 seconds.\n[DEBUG] 2022-10-03 09:50:21,850 __bs: MainProcess Chudnovsky ... 128,000,000 iterations and 1521.31 seconds.\n[DEBUG] 2022-10-03 09:50:28,962 __bs: MainProcess Chudnovsky ... 129,000,000 iterations and 1528.42 seconds.\n[DEBUG] 2022-10-03 09:50:34,594 __bs: MainProcess Chudnovsky ... 130,000,000 iterations and 1534.05 seconds.\n[DEBUG] 2022-10-03 09:50:43,647 __bs: MainProcess Chudnovsky ... 131,000,000 iterations and 1543.10 seconds.\n[DEBUG] 2022-10-03 09:50:50,724 __bs: MainProcess Chudnovsky ... 132,000,000 iterations and 1550.18 seconds.\n[DEBUG] 2022-10-03 09:51:12,742 __bs: MainProcess Chudnovsky ... 133,000,000 iterations and 1572.20 seconds.\n[DEBUG] 2022-10-03 09:51:19,799 __bs: MainProcess Chudnovsky ... 134,000,000 iterations and 1579.26 seconds.\n[DEBUG] 2022-10-03 09:51:28,824 __bs: MainProcess Chudnovsky ... 135,000,000 iterations and 1588.28 seconds.\n[DEBUG] 2022-10-03 09:51:35,324 __bs: MainProcess Chudnovsky ... 136,000,000 iterations and 1594.78 seconds.\n[DEBUG] 2022-10-03 09:51:48,419 __bs: MainProcess Chudnovsky ... 137,000,000 iterations and 1607.88 seconds.\n[DEBUG] 2022-10-03 09:51:55,634 __bs: MainProcess Chudnovsky ... 138,000,000 iterations and 1615.09 seconds.\n[DEBUG] 2022-10-03 09:52:04,435 __bs: MainProcess Chudnovsky ... 139,000,000 iterations and 1623.89 seconds.\n[DEBUG] 2022-10-03 09:52:11,583 __bs: MainProcess Chudnovsky ... 140,000,000 iterations and 1631.04 seconds.\n[DEBUG] 2022-10-03 09:52:17,222 __bs: MainProcess Chudnovsky ... 141,000,000 iterations and 1636.68 seconds.\n[DEBUG] 2022-10-03 10:02:43,939 compute: MainProcess Chudnovsky brothers 1988 \n π = (Q(0, N) / 12T(0, N) + 12AQ(0, N))**(C**(3/2))\n calulation Done! 141,027,339 iterations and 2263.39 seconds.\n[INFO] 2022-10-03 10:09:07,119 : MainProcess Last 5 digits of π were 45519 as expected at offset 999,999,995\n[INFO] 2022-10-03 10:09:07,119 : MainProcess Calculated π to 1,000,000,000 digits using a formula of:\n 10 Chudnovsky brothers 1988 \n π = (Q(0, N) / 12T(0, N) + 12AQ(0, N))**(C**(3/2))\n \n[INFO] 2022-10-03 10:09:07,120 : MainProcess Calculation took 141,027,339 iterations and 0:44:06.398345.\n\nmath_pi.pi(b = 1000000)\nis faster to a million. About 40 times faster. But it cannot go to a Billion. 1 Million is the most digits.\nThe GMPY Builtin looks like:\npython pi-pourri.py -v -d 1,000,000,000 -a 11\n[INFO] 2022-10-03 14:33:34,729 : MainProcess Computing π to 1,000,000,000 digits.\n[DEBUG] 2022-10-03 14:33:34,729 compute: MainProcess Starting const_pi() function from the gmpy2 library formula to 1,000,000,000 decimal places\n[DEBUG] 2022-10-03 15:46:46,575 compute: MainProcess const_pi() function from the gmpy2 library calulation Done! 1 iterations and 4391.85 seconds.\n[INFO] 2022-10-03 15:46:46,575 : MainProcess Last 5 digits of π were 45519 as expected at offset 999,999,995\n[INFO] 2022-10-03 15:46:46,575 : MainProcess Calculated π to 1,000,000,000 digits using a formula of:\n 11 const_pi() function from the gmpy2 library \n[INFO] 2022-10-03 15:46:46,575 : MainProcess Calculation took 1 iterations and 1:13:11.845652.\n\nThe MPmath Builtin is almost as fast. About 12% (6 minutes) slower:\npython pi-pourri.py -v -a 12 -d 1,000,000,000 \n[INFO] 2022-10-04 09:10:37,085 : MainProcess Computing π to 1,000,000,000 digits.\n[DEBUG] 2022-10-04 09:10:37,085 compute: MainProcess Starting mp.pi() function from the mpmath library formula to 1,000,000,000 decimal places\n[DEBUG] 2022-10-04 10:01:25,321 compute: MainProcess mp.pi() function from the mpmath library calulation Done! 1 iterations and 3048.22 seconds.\n[INFO] 2022-10-04 10:01:25,338 : MainProcess Last 5 digits of π were 45519 as expected at offset 999,999,995\n[INFO] 2022-10-04 10:01:25,340 : MainProcess Calculated π to 1,000,000,000 digits using a formula of:\n 12 mp.pi() function from the mpmath library \n[INFO] 2022-10-04 10:01:25,343 : MainProcess Calculation took 1 iterations and 0:50:48.250337.\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/19\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"351\"\n}"}}},{"rowIdx":9,"cells":{"text":{"kind":"string","value":"Q: Throw an error preventing a table update in a MySQL trigger If I have a trigger before the update on a table, how can I throw an error that prevents the update on that table?\n\nA: CREATE TRIGGER sample_trigger_msg \n BEFORE INSERT\nFOR EACH ROW\n BEGIN\nIF(NEW.important_value) < (1*2) THEN\n DECLARE dummy INT;\n SELECT \n Enter your Message Here!!!\n INTO dummy \n FROM mytable\n WHERE mytable.id=new.id\nEND IF;\nEND;\n\n\nA: Here is one hack that may work. It isn't clean, but it looks like it might work:\nEssentially, you just try to update a column that doesn't exist.\n\nA: Another (hack) method (if you are not on 5.5+ for some reason) that you can use:\nIf you have a required field, then within a trigger set the required field to an invalid value such as NULL. This will work for both INSERT and UPDATE. Do note that if NULL is a valid value for the required field (for some crazy reason) then this approach will not work.\nBEGIN\n -- Force one of the following to be assigned otherwise set required field to null which will throw an error\n IF (NEW.`nullable_field_1` IS NULL AND NEW.`nullable_field_2` IS NULL) THEN\n SET NEW.`required_id_field`=NULL;\n END IF;\nEND\n\nIf you are on 5.5+ then you can use the signal state as described in other answers:\nBEGIN\n -- Force one of the following to be assigned otherwise use signal sqlstate to throw a unique error\n IF (NEW.`nullable_field_1` IS NULL AND NEW.`nullable_field_2` IS NULL) THEN\n SIGNAL SQLSTATE '45000' set message_text='A unique identifier for nullable_field_1 OR nullable_field_2 is required!';\n END IF;\nEND\n\n\nA: Unfortunately, the answer provided by @RuiDC does not work in MySQL versions prior to 5.5 because there is no implementation of SIGNAL for stored procedures.\nThe solution I've found is to simulate a signal throwing a table_name doesn't exist error, pushing a customized error message into the table_name.\nThe hack could be implemented using triggers or using a stored procedure. I describe both options below following the example used by @RuiDC.\nUsing triggers\nDELIMITER $$\n-- before inserting new id\nDROP TRIGGER IF EXISTS before_insert_id$$\nCREATE TRIGGER before_insert_id\n BEFORE INSERT ON test FOR EACH ROW\n BEGIN\n -- condition to check\n IF NEW.id < 0 THEN\n -- hack to solve absence of SIGNAL/prepared statements in triggers\n UPDATE `Error: invalid_id_test` SET x=1;\n END IF;\n END$$\n\nDELIMITER ;\n\nUsing a stored procedure\nStored procedures allows you to use dynamic sql, which makes possible the encapsulation of the error generation functionality in one procedure. The counterpoint is that we should control the applications insert/update methods, so they use only our stored procedure (not granting direct privileges to INSERT/UPDATE).\nDELIMITER $$\n-- my_signal procedure\nCREATE PROCEDURE `my_signal`(in_errortext VARCHAR(255))\nBEGIN\n SET @sql=CONCAT('UPDATE `', in_errortext, '` SET x=1');\n PREPARE my_signal_stmt FROM @sql;\n EXECUTE my_signal_stmt;\n DEALLOCATE PREPARE my_signal_stmt;\nEND$$\n\nCREATE PROCEDURE insert_test(p_id INT)\nBEGIN\n IF NEW.id < 0 THEN\n CALL my_signal('Error: invalid_id_test; Id must be a positive integer');\n ELSE\n INSERT INTO test (id) VALUES (p_id);\n END IF;\nEND$$\nDELIMITER ;\n\n\nA: As of MySQL 5.5, you can use the SIGNAL syntax to throw an exception:\nsignal sqlstate '45000' set message_text = 'My Error Message';\n\nState 45000 is a generic state representing \"unhandled user-defined exception\".\n\nHere is a more complete example of the approach:\ndelimiter //\nuse test//\ncreate table trigger_test\n(\n id int not null\n)//\ndrop trigger if exists trg_trigger_test_ins //\ncreate trigger trg_trigger_test_ins before insert on trigger_test\nfor each row\nbegin\n declare msg varchar(128);\n if new.id < 0 then\n set msg = concat('MyTriggerError: Trying to insert a negative value in trigger_test: ', cast(new.id as char));\n signal sqlstate '45000' set message_text = msg;\n end if;\nend\n//\n\ndelimiter ;\n-- run the following as seperate statements:\ninsert into trigger_test values (1), (-1), (2); -- everything fails as one row is bad\nselect * from trigger_test;\ninsert into trigger_test values (1); -- succeeds as expected\ninsert into trigger_test values (-1); -- fails as expected\nselect * from trigger_test;\n\n\nA: The following procedure is (on mysql5) a way to throw custom errors , and log them at the same time:\ncreate table mysql_error_generator(error_field varchar(64) unique) engine INNODB;\nDELIMITER $$\nCREATE PROCEDURE throwCustomError(IN errorText VARCHAR(44))\nBEGIN\n DECLARE errorWithDate varchar(64);\n select concat(\"[\",DATE_FORMAT(now(),\"%Y%m%d %T\"),\"] \", errorText) into errorWithDate;\n INSERT IGNORE INTO mysql_error_generator(error_field) VALUES (errorWithDate);\n INSERT INTO mysql_error_generator(error_field) VALUES (errorWithDate);\nEND;\n$$\nDELIMITER ;\n\n\ncall throwCustomError(\"Custom error message with log support.\");\n\n\nA: DELIMITER @@\nDROP TRIGGER IF EXISTS trigger_name @@\nCREATE TRIGGER trigger_name \nBEFORE UPDATE ON table_name\nFOR EACH ROW\nBEGIN\n\n --the condition of error is: \n --if NEW update value of the attribute age = 1 and OLD value was 0\n --key word OLD and NEW let you distinguish between the old and new value of an attribute\n\n IF (NEW.state = 1 AND OLD.state = 0) THEN\n signal sqlstate '-20000' set message_text = 'hey it's an error!'; \n END IF;\n\nEND @@ \nDELIMITER ;\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/24\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"193\"\n}"}}},{"rowIdx":10,"cells":{"text":{"kind":"string","value":"Q: How to use the C socket API in C++ on z/OS I'm having issues getting the C sockets API to work properly in C++ on z/OS.\nAlthough I am including sys/socket.h, I still get compile time errors telling me that AF_INET is not defined.\nAm I missing something obvious, or is this related to the fact that being on z/OS makes my problems much more complicated?\nI discovered that there is an #ifdef that I'm hitting. Apparently z/OS isn't happy unless I define which \"type\" of sockets I'm using with:\n#define _OE_SOCKETS\n\nNow, I personally have no idea what this _OE_SOCKETS is actually for, so if any z/OS sockets programmers are out there (all 3 of you), perhaps you could give me a rundown of how this all works?\nTest App\n#include \n\nint main()\n{\n return AF_INET;\n}\n\nCompile/Link Output:\ncxx -Wc,xplink -Wl,xplink -o inet_test inet.C\n\n\"./inet.C\", line 5.16: CCN5274 (S) The name lookup for \"AF_INET\" did not find a declaration.\nCCN0797(I) Compilation failed for file ./inet.C. Object file not created.\n\nA check of sys/sockets.h does include the definition I need, and as far as I can tell, it is not being blocked by any #ifdef statements.\nI have however noticed it contains the following:\n#ifdef __cplusplus\n extern \"C\" {\n#endif\n\nwhich encapsulates basically the whole file? Not sure if it matters.\n\nA: Keep a copy of the IBM manuals handy:\n\n\n*\n\n* z/OS V1R11.0 XL C/C++ Programming Guide \n\n* z/OS V1R11.0 XL C/C++ Run-Time Library Reference\nThe IBM publications are generally very good, but you need to get used to their format, as well as knowing where to look for an answer. You'll find quite often that a feature that you want to use is guarded by a \"feature test macro\"\nYou should ask your friendly system programmer to install the XL C/C++ Run-Time Library Reference: Man Pages\n on your system. Then you can do things like \"man connect\" to pull up the man page for the socket connect() API. When I do that, this is what I see:\nFORMAT\nX/Open\n#define _XOPEN_SOURCE_EXTENDED 1\n#include \n\nint connect(int socket, const struct sockaddr *address, socklen_t address_len);\n\nBerkeley Sockets\n#define _OE_SOCKETS\n#include \n#include \n\nint connect(int socket, struct sockaddr *address, int address_len);\n\n\nA: I've had no trouble using the BSD sockets API in C++, in GNU/Linux. Here's the sample program I used:\n#include \n\nint\nmain()\n{\n return AF_INET;\n}\n\nSo my take on this is that z/OS is probably the complicating factor here, however, because I've never used z/OS before, much less programmed in it, I can't say this definitively. :-P\n\nA: See the Using z/OS UNIX System Services sockets section in the z/OS XL C/C++ Programming Guide. Make sure you're including the necessary header files and using the appropriate #defines.\nThe link to the doc has changed over the years, but you should be able to get to it easily enough by finding the current location of the Support & Downloads section on ibm.com and searching the documentation by title.\n\nA: So try\n#define _OE_SOCKETS\n\nbefore you include sys/socket.h\n\nA: The _OE_SOCKETS appears to be simply to enable/disable the definition of socket-related symbols. It is not uncommon in some libraries to have a bunch of macros to do that, to assure that you're not compiling/linking parts not needed. The macro is not standard in other sockets implementations, it appears to be something specific to z/OS.\nTake a look at this page:\nCompiling and Linking a z/VM C Sockets Program\n\nA: @Jax: The extern \"C\" thing matters, very very much. If a header file doesn't have one, then (unless it's a C++-only header file), you would have to enclose your #include with it:\nextern \"C\" {\n#include \n// include other similarly non-compliant header files\n}\n\nBasically, anytime where a C++ program wants to link to C-based facilities, the extern \"C\" is vital. In practical terms, it means that the names used in external references will not be mangled, like normal C++ names would. Reference.\n\nA: You may want to take a look to cpp-sockets, a C++ wrapper for the sockets system calls. It works with many operating systems (Win32, POSIX, Linux, *BSD). I don't think it will work with z/OS but you can take a look at the include files it uses and you'll have many examples of tested code that works well on other OSs.\n\nA: DISCLAIMER: I am not a C++ programmer, however I know C really well. I\nadapated these calls from some C code I have.\nAlso markdown put these strange _ as my underscores.\nYou should just be able to write an abstraction class around the C sockets with something like this:\nclass my_sock {\n private int sock;\n private int socket_type;\n private socklen_t sock_len;\n private struct sockaddr_in server_addr;\n public char *server_ip;\n public unsigned short server_port;\n};\n\nThen have methods for opening, closing, and sending packets down the socket.\nFor example, the open call might look something like this:\nint my_socket_connect()\n{\n int return_code = 0;\n\n if ( this->socket_type != CLIENT_SOCK ) {\n cout << \"This is a not a client socket!\\n\";\n return -1;\n }\n\n return_code = connect( this->local_sock, (struct sockaddr *) &this->server_addr, sizeof(this->server_addr));\n\n if( return_code < 0 ) {\n cout << \"Connect() failure! %s\\n\", strerror(errno);\n return return_code;\n }\n\n return return_code;\n}\n\n\nA: Use the following c89 flag:\n -D_OE_SOCKETS\n\nExample:\n bash-2.03$ c89 -D_OE_SOCKETS [filename].c\n\nFor more information look for c89 Options in the z/OS XLC/C++ User's Guide.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/25\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"175\"\n}"}}},{"rowIdx":11,"cells":{"text":{"kind":"string","value":"Q: How to unload a ByteArray using Actionscript 3? How do I forcefully unload a ByteArray from memory using ActionScript 3?\nI have tried the following:\n// First non-working solution\nbyteArray.length = 0;\nbyteArray = new ByteArray();\n\n// Second non-working solution\nfor ( var i:int=0; i < byteArray.length; i++ ) {\n byteArray[i] = null;\n}\n\n\nA: I don't think you have anything to worry about. If System.totalMemory goes down you can relax. It may very well be the OS that doesn't reclaim the newly freed memory (in anticipation of the next time Flash Player will ask for more memory).\nTry doing something else that is very memory intensive and I'm sure that you'll notice that the memory allocated to Flash Player will decrease and be used for the other process instead.\nAs I've understood it, memory management in modern OS's isn't intuitive from the perspective of looking at the amounts allocated to each process, or even the total amount allocated.\nWhen I've used my Mac for 5 minutes 95% of my 3 GB RAM is used, and it will stay that way, it never goes down. That's just the way the OS handles memory.\nAs long as it's not needed elsewhere even processes that have quit still have memory assigned to them (this can make them launch quicker the next time, for example).\n\nA: (I'm not positive about this, but...)\nAS3 uses a non-deterministic garbage collection which means that dereferenced memory will be freed up whenever the runtime feels like it (typically not unless there's a reason to run, since it's an expensive operation to execute). This is the same approach used by most modern garbage collecting languages (like C# and Java as well).\nAssuming there are no other references to the memory pointed to by byteArray or the items within the array itself, the memory will be freed at some point after you exit the scope where byteArray is declared.\nYou can force a garbage collection, though you really shouldn't. If you do, do it only for testing. If you do it in production, you'll hurt performance much more than help it.\nTo force a GC, try (yes, twice):\nflash.system.System.gc();\nflash.system.System.gc();\n\nYou can read more here.\n\nA: Have a look at this article\nhttp://www.gskinner.com/blog/archives/2006/06/as3_resource_ma.html\nIANA actionscript programmer, however the feeling I'm getting is that, because the garbage collector might not run when you want it to.\nHence\nhttp://www.craftymind.com/2008/04/09/kick-starting-the-garbage-collector-in-actionscript-3-with-air/\nSo I'd recommend trying out their collection code and see if it helps\nprivate var gcCount:int;\nprivate function startGCCycle():void{\n gcCount = 0;\n addEventListener(Event.ENTER_FRAME, doGC);\n}\nprivate function doGC(evt:Event):void{\n flash.system.System.gc();\n if(++gcCount > 1){\n removeEventListener(Event.ENTER_FRAME, doGC);\n setTimeout(lastGC, 40);\n }\n}\nprivate function lastGC():void{\n flash.system.System.gc();\n}\n\n\nA: Unfortunately when it comes to memory management in Flash/actionscript there isn't a whole lot you can do. ActionScript was designed to be easy to use (so they didn't want people to have to worry about memory management)\nThe following is a workaround, instead of creating a ByteArray variable try this.\nvar byteObject:Object = new Object();\n\nbyteObject.byteArray = new ByteArray();\n\n...\n\n//Then when you are finished delete the variable from byteObject\ndelete byteObject.byteArray;\n\nWhere byteArray is a dynamic property of byteObject, you can free the memory that was allocated for it.\n\nA: I believe you have answered your own question.\nSystem.totalMemory gives you the total amount of memory being \"used\", not allocated. It is accurate that your application may only be using 20 MB, but it has 5 MB that is free for future allocations.\nI'm not sure whether the Adobe docs would shed light on the way that it manages memory.\n\nA: \nSo, if I load say 20MB from MySQL, in the Task Manager the RAM for the application goes up by about 25MB. Then when I close the connection and try to dispose the ByteArray, the RAM never frees up. However, if I use System.totalMemory, flash player shows that the memory is being released, which is not the case.\nIs the flash player doing something like Java and reserving heap space and not releasing it until the app quits?\n\nWell yes and no, as you might have read from countless blog posts that the GC in AVM2 is optimistic and will work its own mysterious ways. So it does work a bit like Java and tries to reserve heap space. However if you let it long enough and start doing other operations that are consuming some significant memory, it will free that previous space. You can see this using the profiler overnight with some tests running on top of your app.\n\nA: \nSo, if I load say 20MB from MySQL, in the Task Manager the RAM for the application goes up by about 25MB. Then when I close the connection and try to dispose the ByteArray, the RAM never frees up. However, if I use System.totalMemory, flash player shows that the memory is being released, which is not the case.\n\nThe player is \"releasing\" the memory. If you minimize the window and restore it you should see that the memeory is now much closer to what System.totalMemory shows.\nYou might also be interested in using FlexBuilder's profiling tools which can show you if you really have memory leaks.\n\nA: Use bytearray.clear()\nAs per the Language Reference\nthis\n\nClears the contents of the byte array and resets the length and position properties to 0. Calling this method explicitly frees up the memory used by the ByteArray instance.\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/34\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"96\"\n}"}}},{"rowIdx":12,"cells":{"text":{"kind":"string","value":"Q: Check for changes to an SQL Server table? How can I monitor an SQL Server database for changes to a table without using triggers or modifying the structure of the database in any way? My preferred programming environment is .NET and C#.\nI'd like to be able to support any SQL Server 2000 SP4 or newer. My application is a bolt-on data visualization for another company's product. Our customer base is in the thousands, so I don't want to have to put in requirements that we modify the third-party vendor's table at every installation.\nBy \"changes to a table\" I mean changes to table data, not changes to table structure.\nUltimately, I would like the change to trigger an event in my application, instead of having to check for changes at an interval.\n\nThe best course of action given my requirements (no triggers or schema modification, SQL Server 2000 and 2005) seems to be to use the BINARY_CHECKSUM function in T-SQL. The way I plan to implement is this:\nEvery X seconds run the following query:\nSELECT CHECKSUM_AGG(BINARY_CHECKSUM(*))\nFROM sample_table\nWITH (NOLOCK);\n\nAnd compare that against the stored value. If the value has changed, go through the table row by row using the query:\nSELECT row_id, BINARY_CHECKSUM(*)\nFROM sample_table\nWITH (NOLOCK);\n\nAnd compare the returned checksums against stored values.\n\nA: Check the last commit date. Every database has a history of when each commit is made. I believe its a standard of ACID compliance.\n\nA: Unfortunately CHECKSUM does not always work properly to detect changes.\nIt is only a primitive checksum and no cyclic redundancy check (CRC) calculation.\nTherefore you can't use it to detect all changes, e. g. symmetrical changes result in the same CHECKSUM!\nE. g. the solution with CHECKSUM_AGG(BINARY_CHECKSUM(*)) will always deliver 0 for all 3 tables with different content:\n\nSELECT CHECKSUM_AGG(BINARY_CHECKSUM(*)) FROM \n(\n SELECT 1 as numA, 1 as numB\n UNION ALL\n SELECT 1 as numA, 1 as numB\n) q\n-- delivers 0!\n\nSELECT CHECKSUM_AGG(BINARY_CHECKSUM(*)) FROM \n(\n SELECT 1 as numA, 2 as numB\n UNION ALL\n SELECT 1 as numA, 2 as numB\n) q\n-- delivers 0!\n\nSELECT CHECKSUM_AGG(BINARY_CHECKSUM(*)) FROM \n(\n SELECT 0 as numA, 0 as numB\n UNION ALL\n SELECT 0 as numA, 0 as numB\n) q\n-- delivers 0!\n\n\nA: Why don't you want to use triggers? They are a good thing if you use them correctly. If you use them as a way to enforce referential integrity that is when they go from good to bad. But if you use them for monitoring, they are not really considered taboo.\n\nA: How often do you need to check for changes and how large (in terms of row size) are the tables in the database? If you use the CHECKSUM_AGG(BINARY_CHECKSUM(*)) method suggested by John, it will scan every row of the specified table. The NOLOCK hint helps, but on a large database, you are still hitting every row. You will also need to store the checksum for every row so that you tell one has changed.\nHave you considered going at this from a different angle? If you do not want to modify the schema to add triggers, (which makes a sense, it's not your database), have you considered working with the application vendor that does make the database? \nThey could implement an API that provides a mechanism for notifying accessory apps that data has changed. It could be as simple as writing to a notification table that lists what table and which row were modified. That could be implemented through triggers or application code. From your side, ti wouldn't matter, your only concern would be scanning the notification table on a periodic basis. The performance hit on the database would be far less than scanning every row for changes.\nThe hard part would be convincing the application vendor to implement this feature. Since this can be handles entirely through SQL via triggers, you could do the bulk of the work for them by writing and testing the triggers and then bringing the code to the application vendor. By having the vendor support the triggers, it prevent the situation where your adding a trigger inadvertently replaces a trigger supplied by the vendor.\n\nA: Unfortunately, I do not think that there is a clean way to do this in SQL2000. If you narrow your requirements to SQL Server 2005 (and later), then you are in business. You can use the SQLDependency class in System.Data.SqlClient. See Query Notifications in SQL Server (ADO.NET).\n\nA: Have a DTS job (or a job that is started by a windows service) that runs at a given interval. Each time it is run, it gets information about the given table by using the system INFORMATION_SCHEMA tables, and records this data in the data repository. Compare the data returned regarding the structure of the table with the data returned the previous time. If it is different, then you know that the structure has changed.\nExample query to return information regarding all of the columns in table ABC (ideally listing out just the columns from the INFORMATION_SCHEMA table that you want, instead of using *select ** like I do here):\nselect * from INFORMATION_SCHEMA.COLUMNS where TABLE_NAME = 'ABC'\n\nYou would monitor different columns and INFORMATION_SCHEMA views depending on how exactly you define \"changes to a table\".\n\nA: Wild guess here: If you don't want to modify the third party's tables, Can you create a view and then put a trigger on that view?\n\nA: Take a look at the CHECKSUM command:\nSELECT CHECKSUM_AGG(BINARY_CHECKSUM(*)) FROM sample_table WITH (NOLOCK);\n\nThat will return the same number each time it's run as long as the table contents haven't changed. See my post on this for more information:\nCHECKSUM\nHere's how I used it to rebuild cache dependencies when tables changed:\nASP.NET 1.1 database cache dependency (without triggers)\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/36\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"153\"\n}"}}},{"rowIdx":13,"cells":{"text":{"kind":"string","value":"Q: Reliable timer in a console application I am aware that in .NET there are three timer types (see Comparing the Timer Classes in the .NET Framework Class Library). I have chosen a threaded timer as the other types can drift if the main thread is busy, and I need this to be reliable.\nThe way this timer works in the control of the timer is put on another thread so it can always tick along with the work begin completed on the parent thread when it is not busy.\nThe issue with this timer in a console application is that while the timer is ticking along on another thread the main thread is not doing anything to the application closes.\nI tried adding a while true loop, but then the main thread is too busy when the timer does go off.\n\nA: You can use something like Console.ReadLine() to block the main thread, so other background threads (like timer threads) will still work. You may also use an AutoResetEvent to block the execution, then (when you need to) you can call Set() method on that AutoResetEvent object to release the main thread. Also ensure that your reference to Timer object doesn't go out of scope and garbage collected.\n\nA: Consider using a ManualResetEvent to block the main thread at the end of its processing, and call Reset() on it once the timer's processing has finished. If this is something that needs to run continuously, consider moving this into a service process instead of a console app.\n\nA: According to MSDN and the other answers, a minimal working example of a Console application using a System.Threading.Timer without exiting immediately :\nprivate static void Main()\n{\n using AutoResetEvent autoResetEvent = new AutoResetEvent(false);\n using Timer timer = new Timer(state => Console.WriteLine(\"One second has passed\"), autoResetEvent, TimeSpan.Zero, new TimeSpan(0, 0, 1));\n autoResetEvent.WaitOne();\n}\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/39\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"114\"\n}"}}},{"rowIdx":14,"cells":{"text":{"kind":"string","value":"Q: Best way to allow plugins for a PHP application I am starting a new web application in PHP and this time around I want to create something that people can extend by using a plugin interface. \nHow does one go about writing 'hooks' into their code so that plugins can attach to specific events?\n\nA: I am surprised that most of the answers here seem to be geared about plugins that are local to the web application, ie, plugins that run on the local web server.\nWhat about if you wanted the plugins to run on a different - remote - server? The best way to do this would be to provide a form that allows you to define different URLs that would be called when particular events occur in your application.\nDifferent events would send different information based on the event that just occurred.\nThis way, you would just perform a cURL call to the URL that has been provided to your application (eg over https) where remote servers can perform tasks based on information that has been sent by your application.\nThis provides two benefits:\n\n\n*\n\n*You don't have to host any code on your local server (security)\n\n*The code can be on remote servers (extensibility) in different languages other then PHP (portability)\n\n\nA: So let's say you don't want the Observer pattern because it requires that you change your class methods to handle the task of listening, and want something generic. And let's say you don't want to use extends inheritance because you may already be inheriting in your class from some other class. Wouldn't it be great to have a generic way to make any class pluggable without much effort? Here's how:\n_Class = get_class(&$RefObject);\n $this->_RefObject = $RefObject;\n }\n\n public function __set($sProperty,$mixed) {\n $sPlugin = $this->_Class . '_' . $sProperty . '_setEvent';\n if (is_callable($sPlugin)) {\n $mixed = call_user_func_array($sPlugin, $mixed);\n } \n $this->_RefObject->$sProperty = $mixed;\n }\n\n public function __get($sProperty) {\n $asItems = (array) $this->_RefObject;\n $mixed = $asItems[$sProperty];\n $sPlugin = $this->_Class . '_' . $sProperty . '_getEvent';\n if (is_callable($sPlugin)) {\n $mixed = call_user_func_array($sPlugin, $mixed);\n } \n return $mixed;\n }\n\n public function __call($sMethod,$mixed) {\n $sPlugin = $this->_Class . '_' . $sMethod . '_beforeEvent';\n if (is_callable($sPlugin)) {\n $mixed = call_user_func_array($sPlugin, $mixed);\n }\n if ($mixed != 'BLOCK_EVENT') {\n call_user_func_array(array(&$this->_RefObject, $sMethod), $mixed);\n $sPlugin = $this->_Class . '_' . $sMethod . '_afterEvent';\n if (is_callable($sPlugin)) {\n call_user_func_array($sPlugin, $mixed);\n } \n } \n }\n\n} //end class Plugin\n\nclass Pluggable extends Plugin {\n} //end class Pluggable\n\n////////////////////\n// PART 2\n////////////////////\n\nclass Dog {\n\n public $Name = '';\n\n public function bark(&$sHow) {\n echo \"$sHow
\\n\";\n }\n\n public function sayName() {\n echo \"
\\nMy Name is: \" . $this->Name . \"
\\n\";\n }\n\n\n} //end class Dog\n\n$Dog = new Dog();\n\n////////////////////\n// PART 3\n////////////////////\n\n$PDog = new Pluggable($Dog);\n\nfunction Dog_bark_beforeEvent(&$mixed) {\n $mixed = 'Woof'; // Override saying 'meow' with 'Woof'\n //$mixed = 'BLOCK_EVENT'; // if you want to block the event\n return $mixed;\n}\n\nfunction Dog_bark_afterEvent(&$mixed) {\n echo $mixed; // show the override\n}\n\nfunction Dog_Name_setEvent(&$mixed) {\n $mixed = 'Coco'; // override 'Fido' with 'Coco'\n return $mixed;\n}\n\nfunction Dog_Name_getEvent(&$mixed) {\n $mixed = 'Different'; // override 'Coco' with 'Different'\n return $mixed;\n}\n\n////////////////////\n// PART 4\n////////////////////\n\n$PDog->Name = 'Fido';\n$PDog->Bark('meow');\n$PDog->SayName();\necho 'My New Name is: ' . $PDog->Name;\n\nIn Part 1, that's what you might include with a require_once() call at the top of your PHP script. It loads the classes to make something pluggable.\nIn Part 2, that's where we load a class. Note I didn't have to do anything special to the class, which is significantly different than the Observer pattern.\nIn Part 3, that's where we switch our class around into being \"pluggable\" (that is, supports plugins that let us override class methods and properties). So, for instance, if you have a web app, you might have a plugin registry, and you could activate plugins here. Notice also the Dog_bark_beforeEvent() function. If I set $mixed = 'BLOCK_EVENT' before the return statement, it will block the dog from barking and would also block the Dog_bark_afterEvent because there wouldn't be any event.\nIn Part 4, that's the normal operation code, but notice that what you might think would run does not run like that at all. For instance, the dog does not announce it's name as 'Fido', but 'Coco'. The dog does not say 'meow', but 'Woof'. And when you want to look at the dog's name afterwards, you find it is 'Different' instead of 'Coco'. All those overrides were provided in Part 3.\nSo how does this work? Well, let's rule out eval() (which everyone says is \"evil\") and rule out that it's not an Observer pattern. So, the way it works is the sneaky empty class called Pluggable, which does not contain the methods and properties used by the Dog class. Thus, since that occurs, the magic methods will engage for us. That's why in parts 3 and 4 we mess with the object derived from the Pluggable class, not the Dog class itself. Instead, we let the Plugin class do the \"touching\" on the Dog object for us. (If that's some kind of design pattern I don't know about -- please let me know.)\n\nA: The hook and listener method is the most commonly used, but there are other things you can do. Depending on the size of your app, and who your going to allow see the code (is this going to be a FOSS script, or something in house) will influence greatly how you want to allow plugins.\nkdeloach has a nice example, but his implementation and hook function is a little unsafe. I would ask for you to give more information of the nature of php app your writing, And how you see plugins fitting in. \n+1 to kdeloach from me.\n\nA: Here is an approach I've used, it's an attempt to copy from Qt signals/slots mechanism, a kind of Observer pattern.\nObjects can emit signals.\nEvery signal has an ID in the system - it's composed by sender's id + object name\nEvery signal can be binded to the receivers, which simply is a \"callable\"\nYou use a bus class to pass the signals to anybody interested in receiving them\nWhen something happens, you \"send\" a signal. \nBelow is and example implementation\n login();\n\n?>\n\n\nA: I believe the easiest way would be to follow Jeff's own advice and have a look around the existing code. Try looking at WordPress, Drupal, Joomla, and other well-known PHP-based CMS to see how their API hooks look and feel. This way you can even get ideas you may have not thought of previously to make things a little more robust.\nA more direct answer would be to write general files that they would \"include_once\" into their file that would provide the usability they would need. This would be broken up into categories and NOT provided in one MASSIVE \"hooks.php\" file. Be careful though, because what ends up happening is that files that they include end up having more and more dependencies and functionality improves. Try to keep API dependencies low. I.E fewer files for them to include.\n\nA: You could use an Observer pattern. A simple functional way to accomplish this:\n\n\nOutput:\nThis is my CRAZY application\n4 + 5 = 9\n4 * 5 = 20\n\nNotes:\nFor this example source code, you must declare all your plugins before the actual source code that you want to be extendable. I've included an example of how to handle single or multiple values being passed to the plugin. The hardest part of this is writing the actual documentation which lists what arguments get passed to each hook.\nThis is just one method of accomplishing a plugin system in PHP. There are better alternatives, I suggest you check out the WordPress Documentation for more information.\n\nA: There's a neat project called Stickleback by Matt Zandstra at Yahoo that handles much of the work for handling plugins in PHP.\nIt enforces the interface of a plugin class, supports a command line interface and isn't too hard to get up and running - especially if you read the cover story about it in the PHP architect magazine.\n\nA: Good advice is to look how other projects have done it. Many call for having plugins installed and their \"name\" registered for services (like wordpress does) so you have \"points\" in your code where you call a function that identifies registered listeners and executes them. A standard OO design patter is the Observer Pattern, which would be a good option to implement in a truly object oriented PHP system.\nThe Zend Framework makes use of many hooking methods, and is very nicely architected. That would be a good system to look at.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/42\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"294\"\n}"}}},{"rowIdx":15,"cells":{"text":{"kind":"string","value":"Q: Multiple submit buttons in an HTML form Let's say you create a wizard in an HTML form. One button goes back, and one goes forward. Since the back button appears first in the markup when you press Enter, it will use that button to submit the form.\nExample:\n\n\n
\r\n \r\n \r\n\r\n \r\n \r\n\r\n \r\n \r\n
\n\n\nI would like to get to decide which button is used to submit the form when a user presses Enter. That way, when you press Enter the wizard will move to the next page, not the previous. Do you have to use tabindex to do this?\n\nA: From https://html.spec.whatwg.org/multipage/forms.html#implicit-submission\n\nA form element's default button is the first submit button in tree\n order whose form owner is that form element.\nIf the user agent supports letting the user submit a form implicitly\n (for example, on some platforms hitting the \"enter\" key while a text\n field is focused implicitly submits the form)...\n\nHaving the next input be type=\"submit\" and changing the previous input to type=\"button\" should give the desired default behavior.\n
\n \n\n \n \n
\n\n\nA: This is what I have tried out:\n\n\n*\n\n*You need to make sure you give your buttons different names\n\n*Write an if statement that will do the required action if either button is clicked.\n\n\n \n
\n \n\n \n \n
\n\nIn PHP,\nif(isset($_POST['prev']))\n{\n header(\"Location: previous.html\");\n die();\n}\n\nif(isset($_POST['next']))\n{\n header(\"Location: next.html\");\n die();\n}\n\n\nA: \n \n\nKeep the name of all submit buttons the same: \"prev\".\nThe only difference is the value attribute with unique values. When we create the script, these unique values will help us to figure out which of the submit buttons was pressed.\nAnd write the following coding:\n btnID = \"\"\nif Request.Form(\"prev\") = \"Previous Page\" then\n btnID = \"1\"\nelse if Request.Form(\"prev\") = \"Next Page\" then\n btnID = \"2\"\nend if\n\n\nA: I came across this question when trying to find an answer to basically the same thing, only with ASP.NET controls, when I figured out that the ASP button has a property called UseSubmitBehavior that allows you to set which one does the submitting.\n\n\nJust in case someone is looking for the ASP.NET button way to do it.\n\nA: Change the previous button type into a button like this: \n\n\nNow the Next button would be the default, plus you could also add the default attribute to it so that your browser will highlight it like so:\n\n\n\nA: Give your submit buttons the same name like this:\n\n\n\nWhen the user presses Enter and the request goes to the server, you can check the value for submitButton on your server-side code which contains a collection of form name/value pairs. For example, in ASP Classic:\nIf Request.Form(\"submitButton\") = \"Previous Page\" Then\n ' Code for the previous page\nElseIf Request.Form(\"submitButton\") = \"Next Page\" Then\n ' Code for the next page\nEnd If\n\nReference: Using multiple submit buttons on a single form\n\nA: With JavaScript (here jQuery), you can disable the prev button before submitting the form.\n$('form').on('keypress', function(event) {\n if (event.which == 13) {\n $('input[name=\"prev\"]').prop('type', 'button');\n }\n});\n\n\nA: If the fact that the first button is used by default is consistent across browsers, put them the right way around in the source code, and then use CSS to switch their apparent positions.\nfloat them left and right to switch them around visually, for example.\n\nA: Sometimes the provided solution by palotasb is not sufficient. There are use cases where for example a \"Filter\" submits button is placed above buttons like \"Next and Previous\". I found a workaround for this: copy the submit button which needs to act as the default submit button in a hidden div and place it inside the form above any other submit button.\nTechnically it will be submitted by a different button when pressing Enter than when clicking on the visible Next button. But since the name and value are the same, there's no difference in the result.\n\n\n\n\n \n\n\n
\n
\n \n
\n

\n

Filtered results

\n Filtered result 1\n Filtered result 2\n Filtered result 3\n
\n \n \n
\n
\n\n\n\n\n\nA: I solved a very similar problem in this way:\n\n\n*\n\n*If JavaScript is enabled (in most cases nowadays) then all the submit buttons are \"degraded\" to buttons at page load via JavaScript (jQuery). Click events on the \"degraded\" button typed buttons are also handled via JavaScript.\n\n*If JavaScript is not enabled then the form is served to the browser with multiple submit buttons. In this case hitting Enter on a textfield within the form will submit the form with the first button instead of the intended default, but at least the form is still usable: you can submit with both the prev and next buttons.\nWorking example:\n\n\n\r\n \r\n \r\n \r\n\r\n \r\n
\r\n If JavaScript is disabled, then you CAN submit the form\r\n with button1, button2 or button3.\r\n\r\n If you press enter on a text field, then the form is\r\n submitted with the first submit button.\r\n\r\n If JavaScript is enabled, then the submit typed buttons\r\n without the 'defaultSubmitButton' style are converted\r\n to button typed buttons.\r\n\r\n If you press Enter on a text field, then the form is\r\n submitted with the only submit button\r\n (the one with class defaultSubmitButton)\r\n\r\n If you click on any other button in the form, then the\r\n form is submitted with that button's value.\r\n\r\n
\r\n\r\n \r\n \r\n
\r\n\r\n \r\n \r\n
\r\n\r\n \r\n \r\n
\r\n\r\n \r\n \r\n\n\n\n\nA: This cannot be done with pure HTML. You must rely on JavaScript for this trick.\nHowever, if you place two forms on the HTML page you can do this.\nForm1 would have the previous button.\nForm2 would have any user inputs + the next button.\nWhen the user presses Enter in Form2, the Next submit button would fire.\n\nA: I would use JavaScript to submit the form. The function would be triggered by the OnKeyPress event of the form element and would detect whether the Enter key was selected. If this is the case, it will submit the form.\nHere are two pages that give techniques on how to do this: 1, 2. Based on these, here is an example of usage (based on here):\n\n\n\n\n\nA: If you really just want it to work like an install dialog, just give focus to the \"Next\" button OnLoad. \nThat way if the user hits Return, the form submits and goes forward. If they want to go back they can hit Tab or click on the button.\n\nA: You can do it with CSS.\nPut the buttons in the markup with the Next button first, then the Prev button afterwards.\nThen use CSS to position them to appear the way you want.\n\nA: I'm just doing the trick of floating the buttons to the right.\nThis way the Prev button is left of the Next button, but the Next comes first in the HTML structure:\n\n\n.f {\n float: right;\n}\n.clr {\n clear: both;\n}\n
\n \n
\n \n \n
\n
\n
\n\n\nBenefits over other suggestions: no JavaScript code, accessible, and both buttons remain type=\"submit\".\n\nA: This works without JavaScript or CSS in most browsers:\n
\n

\n

\n \n

\n
\n\nFirefox, Opera, Safari, and Google Chrome all work. As always, Internet Explorer is the problem.\nThis version works when JavaScript is turned on:\n
\n

\n

\n \n

\n
\n\nSo the flaw in this solution is:\nPrevious Page does not work if you use Internet Explorer with JavaScript off.\nMind you, the back button still works!\n\nA: If you have multiple active buttons on one page then you can do something like this:\nMark the first button you want to trigger on the Enter keypress as the default button on the form. For the second button, associate it to the Backspace button on the keyboard. The Backspace eventcode is 8.\n\n\n$(document).on(\"keydown\", function(event) {\r\n if (event.which.toString() == \"8\") {\r\n var findActiveElementsClosestForm = $(document.activeElement).closest(\"form\");\r\n\r\n if (findActiveElementsClosestForm && findActiveElementsClosestForm.length) {\r\n $(\"form#\" + findActiveElementsClosestForm[0].id + \" .secondary_button\").trigger(\"click\");\r\n }\r\n }\r\n});\n\r\n\r\n
\r\n \r\n \r\n
\n\n\n\nA: Changing the tab order should be all it takes to accomplish this. Keep it simple. \nAnother simple option would be to put the back button after the submit button in the HTML code but float it to the left so it appears on the page before the submit button. \n\nA: Another simple option would be to put the back button after the submit button in the HTML code, but float it to the left, so it appears on the page before the submit button.\nChanging the tab order should be all it takes to accomplish this. Keep it simple.\n\nA: The first time I came up against this, I came up with an onclick()/JavaScript hack when choices are not prev/next that I still like for its simplicity. It goes like this:\n@model myApp.Models.myModel\n\n\n\n
\n \n \n \n \n
\n\nWhen either submit button is clicked, it stores the desired operation in a hidden field (which is a string field included in the model the form is associated with) and submits the form to the Controller, which does all the deciding. In the Controller, you simply write:\n// Do operation according to which submit button was clicked\n// based on the contents of the hidden Operation field.\nif (myModel.Operation == \"Read\")\n{\n // Do read logic\n}\nelse if (myModel.Operation == \"Write\")\n{\n // Do write logic\n}\nelse\n{\n // Do error logic\n}\n\nYou can also tighten this up slightly using numeric operation codes to avoid the string parsing, but unless you play with enumerations, the code is less readable, modifiable, and self-documenting and the parsing is trivial, anyway.\n\nA: You can use Tabindex to solve this issue. Also changing the order of the buttons would be a more efficient way to achieve this.\nChange the order of the buttons and add float values to assign them the desired position you want to show in your HTML view.\n\nA: A maybe somewhat more modern approach over the CSS float method could be a solution using flexbox with the order property on the flex items. It could be something along those lines:\n
\n \n \n
\n\nOf course it depends on your document structure whether this is a feasible approach or not, but I find flex items much easier to control than floating elements.\n\nA: Instead of struggling with multiple submits, JavaScript or anything like that to do some previous/next stuff, an alternative would be to use a carousel to simulate the different pages.\nDoing this :\n\n*\n\n*You don't need multiple buttons, inputs or submits to do the previous/next thing, you have only one input type=\"submit\" in only one form.\n\n*The values in the whole form are there until the form is submitted.\n\n*The user can go to any previous page and any next page flawlessly to modify the values.\n\nExample using Bootstrap 5.0.0 :\n
\n
\n
\n \n
\n
\n \n
\n
\n \n
\n
\n Previous page\n Next page\n
\n\n\nA: I think this is an easy solution for this. Change the Previous button type to button, and add a new onclick attribute to the button with value jQuery(this).attr('type','submit');.\nSo, when the user clicks on the Previous button then its type will be changed to submit and the form will be submitted with the Previous button.\n
\n \n \n\n \n \n\n \n \n
\n\n\nA: Problem\nA form may have several submit buttons.\nWhen pressing return in any input, the first submit button is used by the browser.\nHowever, sometimes we want to use a different/later button as default.\nOptions\n\n*\n\n*Add a hidden submit button with the same action first (☹️ duplication)\n\n*Put the desired submit button first in the form and then move it to the correct place via CSS (☹️ may not be feasible, may result in cumbersome styling)\n\n*Change the handling of the return key in all form inputs via JavaScript (☹️ needs javascript)\n\nNone of the options is ideal, so we choose 3. because most browsers have JavaScript enabled.\nChosen solution\n\n\n// example implementation\ndocument.addEventListener('DOMContentLoaded', (ev) => {\n for (const defaultSubmitInput of document.querySelectorAll('[data-default-submit]')) {\n for (const formInput of defaultSubmitInput.form.querySelectorAll('input')) {\n if (formInput.dataset.ignoreDefaultSubmit != undefined) { continue; }\n formInput.addEventListener('keypress', (ev) => {\n if (ev.keyCode == 13) {\n ev.preventDefault();\n defaultSubmitInput.click();\n }\n })\n }\n }\n});\n \n
\n \n \n \n
\n\n\nIt may be useful to be able to remove the enhancement from some inputs. This can be achieved by:\n \n\nHere a complete code pen.\n\nA: Using the example you gave:\n
\n \n \n \n
\n\nIf you click on \"Previous Page\", only the value of \"prev\" will be submitted. If you click on \"Next Page\" only the value of \"next\" will be submitted.\nIf however, you press Enter somewhere on the form, neither \"prev\" nor \"next\" will be submitted.\nSo using pseudocode you could do the following:\nIf \"prev\" submitted then\n Previous Page was click\nElse If \"next\" submitted then\n Next Page was click\nElse\n No button was click\n\n\nA: When a button is clicked with a mouse (and hopefully by touch), it records the X,Y coordinates. This is not the case when it is invoked by a form, and these values are normally zero.\nSo you can do something like this:\nfunction(e) {\n const isArtificial = e.screenX === 0 && e.screenY === 0\n && e.x === 0 && e.y === 0\n && e.clientX === 0 && e.clientY === 0;\n\n if (isArtificial) {\n return; // DO NOTHING\n } else {\n // OPTIONAL: Don't submit the form when clicked\n // e.preventDefault();\n // e.stopPropagation();\n }\n\n // ...Natural code goes here\n}\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/48\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"286\"\n}"}}},{"rowIdx":16,"cells":{"text":{"kind":"string","value":"Q: How do I get a distinct, ordered list of names from a DataTable using LINQ? I have a DataTable with a Name column. I want to generate a collection of the unique names ordered alphabetically. The following query ignores the order by clause.\nvar names =\n (from DataRow dr in dataTable.Rows\n orderby (string)dr[\"Name\"]\n select (string)dr[\"Name\"]).Distinct();\n\nWhy does the orderby not get enforced?\n\nA: Try out the following:\ndataTable.Rows.Cast().select(dr => dr[\"Name\"].ToString()).Distinct().OrderBy(name => name);\n\n\nA: The problem is that the Distinct\n operator does not grant that it will\n maintain the original order of\n values.\nSo your query will need to work like this\nvar names = (from DataRow dr in dataTable.Rows\n select (string)dr[\"Name\"]).Distinct().OrderBy( name => name );\n\n\nA: To make it more readable and maintainable, you can also split it up into multiple LINQ statements.\n\n\n*\n\n*First, select your data into a new list, let's call it x1, do a projection if desired\n\n*Next, create a distinct list, from x1 into x2, using whatever distinction you require\n\n*Finally, create an ordered list, from x2 into x3, sorting by whatever you desire \n\n\nA: Try the following\nvar names = (from dr in dataTable.Rows\n select (string)dr[\"Name\"]).Distinct().OrderBy(name => name);\n\nthis should work for what you need.\n\nA: To abstract: all of the answers have something in common.\nOrderBy needs to be the final operation.\n\nA: You can use something like that:\ndataTable.Rows.Cast().GroupBy(g => g[\"Name\"]).Select(s => s.First()).OrderBy(o => o[\"Name\"]);\n\n\nA: var sortedTable = (from results in resultTable.AsEnumerable()\nselect (string)results[attributeList]).Distinct().OrderBy(name => name);\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/59\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"117\"\n}"}}},{"rowIdx":17,"cells":{"text":{"kind":"string","value":"Q: Microsoft Office 2007 file type, Mime types and identifying characters Where can I find a list of all of the MIME types and the identifying characters for Microsoft Office 2007 files?\nI have an upload form that is restricting uploads based on the extensions and identifying characters, but I cannot seem to find the Office 2007 MIME types.\nCan anyone help?\n\nA: Office 2007 MIME Types for IIS\n\n\n*\n\n*.docm, application/vnd.ms-word.document.macroEnabled.12\n\n*.docx, application/vnd.openxmlformats-officedocument.wordprocessingml.document\n\n*.dotm, application/vnd.ms-word.template.macroEnabled.12\n\n*.dotx, application/vnd.openxmlformats-officedocument.wordprocessingml.template\n\n*.potm, application/vnd.ms-powerpoint.template.macroEnabled.12\n\n*.potx, application/vnd.openxmlformats-officedocument.presentationml.template\n\n*.ppam, application/vnd.ms-powerpoint.addin.macroEnabled.12\n\n*.ppsm, application/vnd.ms-powerpoint.slideshow.macroEnabled.12\n\n*.ppsx, application/vnd.openxmlformats-officedocument.presentationml.slideshow\n\n*.pptm, application/vnd.ms-powerpoint.presentation.macroEnabled.12\n\n*.pptx, application/vnd.openxmlformats-officedocument.presentationml.presentation\n\n*.xlam, application/vnd.ms-excel.addin.macroEnabled.12\n\n*.xlsb, application/vnd.ms-excel.sheet.binary.macroEnabled.12\n\n*.xlsm, application/vnd.ms-excel.sheet.macroEnabled.12\n\n*.xlsx, application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\n\n*.xltm, application/vnd.ms-excel.template.macroEnabled.12\n\n*.xltx, application/vnd.openxmlformats-officedocument.spreadsheetml.template\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/61\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"52\"\n}"}}},{"rowIdx":18,"cells":{"text":{"kind":"string","value":"Q: Paging a collection with LINQ How do you page through a collection in LINQ given that you have a startIndex and a count?\n\nA: It is very simple with the Skip and Take extension methods.\nvar query = from i in ideas\n select i;\n\nvar paggedCollection = query.Skip(startIndex).Take(count);\n\n\nA: A few months back I wrote a blog post about Fluent Interfaces and LINQ which used an Extension Method on IQueryable and another class to provide the following natural way of paginating a LINQ collection.\nvar query = from i in ideas\n select i;\nvar pagedCollection = query.InPagesOf(10);\nvar pageOfIdeas = pagedCollection.Page(2);\n\nYou can get the code from the MSDN Code Gallery Page: Pipelines, Filters, Fluent API and LINQ to SQL.\n\nA: I solved this a bit differently than what the others have as I had to make my own paginator, with a repeater. So I first made a collection of page numbers for the collection of items that I have:\n// assumes that the item collection is \"myItems\"\n\nint pageCount = (myItems.Count + PageSize - 1) / PageSize;\n\nIEnumerable pageRange = Enumerable.Range(1, pageCount);\n // pageRange contains [1, 2, ... , pageCount]\n\nUsing this I could easily partition the item collection into a collection of \"pages\". A page in this case is just a collection of items (IEnumerable). This is how you can do it using Skip and Take together with selecting the index from the pageRange created above:\nIEnumerable> pageRange\n .Select((page, index) => \n myItems\n .Skip(index*PageSize)\n .Take(PageSize));\n\nOf course you have to handle each page as an additional collection but e.g. if you're nesting repeaters then this is actually easy to handle.\n\nThe one-liner TLDR version would be this:\nvar pages = Enumerable\n .Range(0, pageCount)\n .Select((index) => myItems.Skip(index*PageSize).Take(PageSize));\n\nWhich can be used as this:\nfor (Enumerable page : pages) \n{\n // handle page\n\n for (Item item : page) \n {\n // handle item in page\n }\n}\n\n\nA: This question is somewhat old, but I wanted to post my paging algorithm that shows the whole procedure (including user interaction).\nconst int pageSize = 10;\nconst int count = 100;\nconst int startIndex = 20;\n\nint took = 0;\nbool getNextPage;\nvar page = ideas.Skip(startIndex);\n\ndo\n{\n Console.WriteLine(\"Page {0}:\", (took / pageSize) + 1);\n foreach (var idea in page.Take(pageSize))\n {\n Console.WriteLine(idea);\n }\n\n took += pageSize;\n if (took < count)\n {\n Console.WriteLine(\"Next page (y/n)?\");\n char answer = Console.ReadLine().FirstOrDefault();\n getNextPage = default(char) != answer && 'y' == char.ToLowerInvariant(answer);\n\n if (getNextPage)\n {\n page = page.Skip(pageSize);\n }\n }\n}\nwhile (getNextPage && took < count);\n\nHowever, if you are after performance, and in production code, we're all after performance, you shouldn't use LINQ's paging as shown above, but rather the underlying IEnumerator to implement paging yourself. As a matter of fact, it is as simple as the LINQ-algorithm shown above, but more performant:\nconst int pageSize = 10;\nconst int count = 100;\nconst int startIndex = 20;\n\nint took = 0;\nbool getNextPage = true;\nusing (var page = ideas.Skip(startIndex).GetEnumerator())\n{\n do \n {\n Console.WriteLine(\"Page {0}:\", (took / pageSize) + 1);\n\n int currentPageItemNo = 0;\n while (currentPageItemNo++ < pageSize && page.MoveNext())\n {\n var idea = page.Current;\n Console.WriteLine(idea);\n }\n\n took += pageSize;\n if (took < count)\n {\n Console.WriteLine(\"Next page (y/n)?\");\n char answer = Console.ReadLine().FirstOrDefault();\n getNextPage = default(char) != answer && 'y' == char.ToLowerInvariant(answer);\n }\n }\n while (getNextPage && took < count);\n}\n\nExplanation: The downside of using Skip() for multiple times in a \"cascading manner\" is, that it will not really store the \"pointer\" of the iteration, where it was last skipped. - Instead the original sequence will be front-loaded with skip calls, which will lead to \"consuming\" the already \"consumed\" pages over and over again. - You can prove that yourself, when you create the sequence ideas so that it yields side effects. -> Even if you have skipped 10-20 and 20-30 and want to process 40+, you'll see all side effects of 10-30 being executed again, before you start iterating 40+.\nThe variant using IEnumerable's interface directly, will instead remember the position of the end of the last logical page, so no explicit skipping is needed and side effects won't be repeated.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/66\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"89\"\n}"}}},{"rowIdx":19,"cells":{"text":{"kind":"string","value":"Q: How do I add existing comments to RDoc in Ruby? I want to format my existing comments as 'RDoc comments' so they can be viewed using ri.\nWhat are some recommended resources for starting out using RDoc?\n\nA: A few things that have bitten me:\n\n\n*\n\n*:main: -- RDoc uses only the last one evaluated; best to make sure there's only one in your project and you don't also use the --main command-line argument.\n\n*same as previous, but for :title:\n\n*:section: doesn't work very well\n\n\nA: RDoc uses SimpleMarkup so it's fairly simple to create lists, etc. using *, - or a number. It also treats lines that are indented at the same column number as part of the same paragraph until there is an empty line which signifies a new paragraph. Do you have a few examples of comments you want RDoc'ed so we could show you how to do them and then you could extrapolate that for the rest of your comments?\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/72\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"48\"\n}"}}},{"rowIdx":20,"cells":{"text":{"kind":"string","value":"Q: Getting Subclipse in Aptana to work with the newest release of Subversion The version of Subclipse (1.2.4) currently available through Aptana's automatic Plugins Manager does not work with the newest version of Subversion.\nI see on the Subclipse website however that they have 1.4.2 out for Eclipse. So I added a new remote update site to my Update manager. When I tried to install it, it told me I needed Mylyn 3.0.0. So after much searching I found Mylyn 3.0.0 and added another new remote update site to my update manager. Then when I tried to install that, it told me I needed org.eclipse.ui 3.3.0 or equivalent.\nLooking at the configuration details for Aptana, it looks like it is built against eclipse 3.2.2.\nDoes anyone know if there is a way to upgrade the version of Eclipse Aptana that is built against to 3.3.0? Or if there is some other way to get Subclipse to work with the very newest version of Subversion?\nI know this isn't necessarily a \"programming\" question, but I hope it's ok since it's highly relevant to the programming experience.\n\nA: I've had problems with JavaHL in Eclipse Ganymede, when it worked fine in Eclipse Europa. I'm not sure how Aptana is different, but try either upgrading JavaHL or switching to the pure-java SVNKit implementation within the Subclipse config.\n\nA: if you're not going to be using mylyn just uncheck that dependency. I'm not really familiar with Aptana, but in eclipse you can expand whats being installed and uncheck anything you don't need.\n\nA: I used the update url and I installed the JavaHL adapter, the Subclipse project itself and the SVNKit adapter BETA.\nAfter this it worked fine for me, this is for linux platform hope it works for you.\n\nA: Subclipse does not require Mylyn, but the update site includes a plugin that integrates Mylyn and Subclipse. This is intended for people that use Mylyn. In your case, you would want to just de-select Mylyn in the update dialog.\nSubclipse also requires Subversion 1.5 and the corresponding version of the JavaHL native libraries. I have written the start of an FAQ to help people understand JavaHL and how to get it. See: http://desktop-eclipse.open.collab.net/wiki/JavaHL\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/79\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"50\"\n}"}}},{"rowIdx":21,"cells":{"text":{"kind":"string","value":"Q: SQLStatement.execute() - multiple queries in one statement I've written a database generation script in SQL and want to execute it in my Adobe AIR application:\nCreate Table tRole (\n roleID integer Primary Key\n ,roleName varchar(40)\n);\nCreate Table tFile (\n fileID integer Primary Key\n ,fileName varchar(50)\n ,fileDescription varchar(500)\n ,thumbnailID integer\n ,fileFormatID integer\n ,categoryID integer\n ,isFavorite boolean\n ,dateAdded date\n ,globalAccessCount integer\n ,lastAccessTime date\n ,downloadComplete boolean\n ,isNew boolean\n ,isSpotlight boolean\n ,duration varchar(30)\n);\nCreate Table tCategory (\n categoryID integer Primary Key\n ,categoryName varchar(50)\n ,parent_categoryID integer\n);\n...\n\nI execute this in Adobe AIR using the following methods:\npublic static function RunSqlFromFile(fileName:String):void {\n var file:File = File.applicationDirectory.resolvePath(fileName);\n var stream:FileStream = new FileStream();\n stream.open(file, FileMode.READ)\n var strSql:String = stream.readUTFBytes(stream.bytesAvailable);\n NonQuery(strSql);\n}\n\npublic static function NonQuery(strSQL:String):void {\n var sqlConnection:SQLConnection = new SQLConnection();\n sqlConnection.open(File.applicationStorageDirectory.resolvePath(DBPATH));\n var sqlStatement:SQLStatement = new SQLStatement();\n sqlStatement.text = strSQL;\n sqlStatement.sqlConnection = sqlConnection;\n try {\n sqlStatement.execute();\n } catch (error:SQLError) {\n Alert.show(error.toString());\n }\n}\n\nNo errors are generated, however only tRole exists. It seems that it only looks at the first query (up to the semicolon- if I remove it, the query fails). Is there a way to call multiple queries in one statement?\n\nA: What about making your delimiter something a little more complex like \";\\n\" which would not show up all that often. You just have to ensure when creating the file you have a line return or two in there. I end up putting two \"\\n\\n\" into the creation of my files which works well.\n\nA: I wound up using this. It is a kind of a hack, but it actually works pretty well. \nThe only thing is you have to be very careful with your semicolons. : D\nvar strSql:String = stream.readUTFBytes(stream.bytesAvailable); \nvar i:Number = 0;\nvar strSqlSplit:Array = strSql.split(\";\");\nfor (i = 0; i < strSqlSplit.length; i++){\n NonQuery(strSqlSplit[i].toString());\n}\n\n\nA: The SQLite API has a function called something like sqlite_prepare which takes one statement and prepares it for execution, essentially parsing the SQL and storing it in memory. This means that the SQL only has to be sent once to the database engine even though the statement is executed many times.\nAnyway, a statement is a single SQL query, that's just the rule. The AIR SQL API doesn't allow sending raw SQL to SQLite, only single statements, and the reason is, likely, that AIR uses the sqlite_prepare function when it talks to SQLite.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/80\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"54\"\n}"}}},{"rowIdx":22,"cells":{"text":{"kind":"string","value":"Q: Flat file databases What are the best practices around creating flat file database structures in PHP?\nA lot of more matured PHP flat file frameworks out there which I attempt to implement SQL-like query syntax which is over the top for my purposes in most cases. (I would just use a database at that point).\nAre there any elegant tricks out there to get good performance and features with a small code overhead?\n\nA: Here's the code we use for Lilina:\n\n * @package Lilina\n * @version 1.0\n * @license http://opensource.org/licenses/gpl-license.php GNU Public License\n */\n\n/**\n * Handler for persistent data files\n *\n * @package Lilina\n */\nclass DataHandler {\n /**\n * Directory to store data.\n *\n * @since 1.0\n *\n * @var string\n */\n protected $directory;\n\n /**\n * Constructor, duh.\n *\n * @since 1.0\n * @uses $directory Holds the data directory, which the constructor sets.\n *\n * @param string $directory \n */\n public function __construct($directory = null) {\n if ($directory === null)\n $directory = get_data_dir();\n\n if (substr($directory, -1) != '/')\n $directory .= '/';\n\n $this->directory = (string) $directory;\n }\n\n /**\n * Prepares filename and content for saving\n *\n * @since 1.0\n * @uses $directory\n * @uses put()\n *\n * @param string $filename Filename to save to\n * @param string $content Content to save to cache\n */\n public function save($filename, $content) {\n $file = $this->directory . $filename;\n\n if(!$this->put($file, $content)) {\n trigger_error(get_class($this) . \" error: Couldn't write to $file\", E_USER_WARNING);\n return false;\n }\n\n return true;\n }\n\n /**\n * Saves data to file\n *\n * @since 1.0\n * @uses $directory\n *\n * @param string $file Filename to save to\n * @param string $data Data to save into $file\n */\n protected function put($file, $data, $mode = false) {\n if(file_exists($file) && file_get_contents($file) === $data) {\n touch($file);\n return true;\n }\n\n if(!$fp = @fopen($file, 'wb')) {\n return false;\n }\n\n fwrite($fp, $data);\n fclose($fp);\n\n $this->chmod($file, $mode);\n return true;\n\n }\n\n /**\n * Change the file permissions\n *\n * @since 1.0\n *\n * @param string $file Absolute path to file\n * @param integer $mode Octal mode\n */\n protected function chmod($file, $mode = false){\n if(!$mode)\n $mode = 0644;\n return @chmod($file, $mode);\n }\n\n /**\n * Returns the content of the cached file if it is still valid\n *\n * @since 1.0\n * @uses $directory\n * @uses check() Check if cache file is still valid\n *\n * @param string $id Unique ID for content type, used to distinguish between different caches\n * @return null|string Content of the cached file if valid, otherwise null\n */\n public function load($filename) {\n return $this->get($this->directory . $filename);\n }\n\n /**\n * Returns the content of the file\n *\n * @since 1.0\n * @uses $directory\n * @uses check() Check if file is valid\n *\n * @param string $id Filename to load data from\n * @return bool|string Content of the file if valid, otherwise null\n */\n protected function get($filename) {\n if(!$this->check($filename))\n return null;\n\n return file_get_contents($filename);\n }\n\n /**\n * Check a file for validity\n *\n * Basically just a fancy alias for file_exists(), made primarily to be\n * overriden.\n *\n * @since 1.0\n * @uses $directory\n *\n * @param string $id Unique ID for content type, used to distinguish between different caches\n * @return bool False if the cache doesn't exist or is invalid, otherwise true\n */\n protected function check($filename){\n return file_exists($filename);\n }\n\n /**\n * Delete a file\n *\n * @param string $filename Unique ID\n */\n public function delete($filename) {\n return unlink($this->directory . $filename);\n }\n}\n\n?>\n\nIt stores each entry as a separate file, which we found is efficient enough for use (no unneeded data is loaded and it's faster to save).\n\nA: IMHO, you have two... er, three options if you want to avoid homebrewing something:\n\n*\n\n*SQLite\nIf you're familiar with PDO, you can install a PDO driver that supports SQLite. Never used it, but I have used PDO a ton with MySQL. I'm going to give this a shot on a current project.\n\n\n*XML\nDone this many times for relatively small amounts of data. XMLReader is a lightweight, read-forward, cursor-style class. SimpleXML makes it simple to read an XML document into an object that you can access just like any other class instance.\n\n\n*JSON (update)\n\nGood option for smallish amounts of data, just read/write file and json_decode/json_encode. Not sure if PHP offers a structure to navigate a JSON tree without loading it all in memory though.\n\nA: Well, what is the nature of the flat databases. Are they large or small. Is it simple arrays with arrays in them? if its something simple say userprofiles built as such:\n$user = array(\"name\" => \"bob\", \n \"age\" => 20,\n \"websites\" => array(\"example.com\",\"bob.example.com\",\"bob2.example.com\"),\n \"and_one\" => \"more\");\n\nand to save or update the db record for that user.\n$dir = \"../userdata/\"; //make sure to put it bellow what the server can reach.\nfile_put_contents($dir.$user['name'],serialize($user));\n\nand to load the record for the user\nfunction &get_user($name){\n return unserialize(file_get_contents(\"../userdata/\".$name));\n}\n\nbut again this implementation will vary on the application and nature of the database you need.\n\nA: If you're going to use a flat file to persist data, use XML to structure the data. PHP has a built-in XML parser.\n\nA: If you want a human-readable result, you can also use this type of file :\nofaurax|27|male|something|\nanother|24|unknown||\n...\n\nThis way, you have only one file, you can debug it (and manually fix) easily, you can add fields later (at the end of each line) and the PHP code is simple (for each line, split according to |).\nHowever, the drawbacks is that you should parse the entire file to search something (if you have millions of entry, it's not fine) and you should handle the separator in data (for example if the nick is WaR|ordz).\n\nA: I have written two simple functions designed to store data in a file. You can judge for yourself if it's useful in this case.\nThe point is to save a php variable (if it's either an array a string or an object) to a file.\n $value) {\n if ($value === 'AAAAB3NzaC1yc2EAAAABIwAAAQEAqytmUAQKMOj24lAjqKJC2Gyqhbhb+DmB9eDDb8+QcFI+QOySUpYDn884rgKB6EAtoFyOZVMA6HlNj0VxMKAGE+sLTJ40rLTcieGRCeHJ/TI37e66OrjxgB+7tngKdvoG5EF9hnoGc4eTMpVUDdpAK3ykqR1FIclgk0whV7cEn/6K4697zgwwb5R2yva/zuTX+xKRqcZvyaF3Ur0Q8T+gvrAX8ktmpE18MjnA5JuGuZFZGFzQbvzCVdN52nu8i003GEFmzp0Ny57pWClKkAy3Q5P5AR2BCUwk8V0iEX3iu7J+b9pv4LRZBQkDujaAtSiAaeG2cjfzL9xIgWPf+J05IQ==')\n {\n $var=$oldvalue;\n return $var_name;\n }\n }\n $var=$oldvalue;\n return false;\n}\n\nfunction putphp(&$var, $file=false)\n {\n $varname=varname($var);\n if(!$file)\n {\n $file=$varname.'.php';\n }\n $pathinfo=pathinfo($file);\n if(file_exists($file))\n {\n if(is_dir($file))\n {\n $file=$pathinfo['dirname'].'/'.$pathinfo['basename'].'/'.$varname.'.php';\n }\n }\n file_put_contents($file,' array([Field_0]=>Value_0,[Field_1]=>Value_1...\n- MIXD (Mixed)\nMixed databases can store both free elements and tables.If you add a table to a free db or a free element to a structured db, flat fire will automatically convert FREE or SRCT to MIXD database.\n[DATABASE]\n/ \\\nEX TY\n\n\nA: You might consider SQLite. It's almost as simple as flat files, but you do get a SQL engine for querying. It works well with PHP too.\n\nA: Just pointing out a potential problem with a flat file database with this type of system:\ndata|some text|more data\n\nrow 2 data|bla hbalh|more data\n\n...etc\nThe problem is that the cell data contains a \"|\" or a \"\\n\" then the data will be lost. Sometimes it would be easier to split by combinations of letters that most people wouldn't use.\nFor example:\nColumn splitter: #$% (Shift+345)\nRow splitter: ^&* (Shift+678)\nText file: test data#$%blah blah#$%^&*new row#$%new row data 2\nThen use: explode(\"#$%\", $data); use foreach, the explode again to separate columns\nOr anything along these lines. Also, I might add that flat file databases are good for systems with small amounts of data (ie. less than 20 rows), but become huge memory hogs for larger databases.\n\nA: In my opinion, using a \"Flat File Database\" in the sense you're meaning (and the answer you've accepted) isn't necessarily the best way to go about things. First of all, using serialize() and unserialize() can cause MAJOR headaches if someone gets in and edits the file (they can, in fact, put arbitrary code in your \"database\" to be run each time.)\nPersonally, I'd say - why not look to the future? There have been so many times that I've had issues because I've been creating my own \"proprietary\" files, and the project has exploded to a point where it needs a database, and I'm thinking \"you know, I wish I'd written this for a database to start with\" - because the refactoring of the code takes way too much time and effort.\nFrom this I've learnt that future proofing my application so that when it gets bigger I don't have to go and spend days refactoring is the way to go forward. How do I do this?\nSQLite. It works as a database, uses SQL, and is pretty easy to change over to MySQL (especially if you're using abstracted classes for database manipulation like I do!)\nIn fact, especially with the \"accepted answer\"'s method, it can drastically cut the memory usage of your app (you don't have to load all the \"RECORDS\" into PHP)\n\nA: One framework I'm considering would be for a blogging platform. Since just about any possible view of data you would want would be sorted by date, I was thinking about this structure:\nOne directory per content node:\n./content/YYYYMMDDHHMMSS/\n\nSubdirectories of each node including \n/tags \n/authors \n/comments \n\nAs well as simple text files in the node directory for pre- and post-rendered content and the like.\nThis would allow a simple PHP glob() call (and probably a reversal of the result array) to query on just about anything within the content structure:\nglob(\"content/*/tags/funny\"); \n\nWould return paths including all articles tagged \"funny\".\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/85\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"131\"\n}"}}},{"rowIdx":23,"cells":{"text":{"kind":"string","value":"Q: Is gettimeofday() guaranteed to be of microsecond resolution? I am porting a game, that was originally written for the Win32 API, to Linux (well, porting the OS X port of the Win32 port to Linux).\nI have implemented QueryPerformanceCounter by giving the uSeconds since the process start up: \nBOOL QueryPerformanceCounter(LARGE_INTEGER* performanceCount)\n{\n gettimeofday(&currentTimeVal, NULL);\n performanceCount->QuadPart = (currentTimeVal.tv_sec - startTimeVal.tv_sec);\n performanceCount->QuadPart *= (1000 * 1000);\n performanceCount->QuadPart += (currentTimeVal.tv_usec - startTimeVal.tv_usec);\n\n return true;\n}\n\nThis, coupled with QueryPerformanceFrequency() giving a constant 1000000 as the frequency, works well on my machine, giving me a 64-bit variable that contains uSeconds since the program's start-up.\nSo is this portable? I don't want to discover it works differently if the kernel was compiled in a certain way or anything like that. I am fine with it being non-portable to something other than Linux, however.\n\nA: \nThe actual resolution of gettimeofday() depends on the hardware architecture. Intel processors as well as SPARC machines offer high resolution timers that measure microseconds. Other hardware architectures fall back to the system’s timer, which is typically set to 100 Hz. In such cases, the time resolution will be less accurate. \n\nI obtained this answer from High Resolution Time Measurement and Timers, Part I\n\nA: \nSo it says microseconds explicitly, but says the resolution of the system clock is unspecified. I suppose resolution in this context means how the smallest amount it will ever be incremented?\n\nThe data structure is defined as having microseconds as a unit of measurement, but that doesn't mean that the clock or operating system is actually capable of measuring that finely.\nLike other people have suggested, gettimeofday() is bad because setting the time can cause clock skew and throw off your calculation. clock_gettime(CLOCK_MONOTONIC) is what you want, and clock_getres() will tell you the precision of your clock.\n\nA: Maybe. But you have bigger problems. gettimeofday() can result in incorrect timings if there are processes on your system that change the timer (ie, ntpd). On a \"normal\" linux, though, I believe the resolution of gettimeofday() is 10us. It can jump forward and backward and time, consequently, based on the processes running on your system. This effectively makes the answer to your question no.\nYou should look into clock_gettime(CLOCK_MONOTONIC) for timing intervals. It suffers from several less issues due to things like multi-core systems and external clock settings.\nAlso, look into the clock_getres() function.\n\nA: This answer mentions problems with the clock being adjusted. Both your problems guaranteeing tick units and the problems with the time being adjusted are solved in C++11 with the library.\nThe clock std::chrono::steady_clock is guaranteed not to be adjusted, and furthermore it will advance at a constant rate relative to real time, so technologies like SpeedStep must not affect it.\nYou can get typesafe units by converting to one of the std::chrono::duration specializations, such as std::chrono::microseconds. With this type there's no ambiguity about the units used by the tick value. However, keep in mind that the clock doesn't necessarily have this resolution. You can convert a duration to attoseconds without actually having a clock that accurate.\n\nA: High Resolution, Low Overhead Timing for Intel Processors\nIf you're on Intel hardware, here's how to read the CPU real-time instruction counter. It will tell you the number of CPU cycles executed since the processor was booted. This is probably the finest-grained counter you can get for performance measurement.\nNote that this is the number of CPU cycles. On linux you can get the CPU speed from /proc/cpuinfo and divide to get the number of seconds. Converting this to a double is quite handy.\nWhen I run this on my box, I get\n11867927879484732\n11867927879692217\nit took this long to call printf: 207485\n\nHere's the Intel developer's guide that gives tons of detail.\n#include \n#include \n\ninline uint64_t rdtsc() {\n uint32_t lo, hi;\n __asm__ __volatile__ (\n \"xorl %%eax, %%eax\\n\"\n \"cpuid\\n\"\n \"rdtsc\\n\"\n : \"=a\" (lo), \"=d\" (hi)\n :\n : \"%ebx\", \"%ecx\");\n return (uint64_t)hi << 32 | lo;\n}\n\nmain()\n{\n unsigned long long x;\n unsigned long long y;\n x = rdtsc();\n printf(\"%lld\\n\",x);\n y = rdtsc();\n printf(\"%lld\\n\",y);\n printf(\"it took this long to call printf: %lld\\n\",y-x);\n}\n\n\nA: From my experience, and from what I've read across the internet, the answer is \"No,\" it is not guaranteed. It depends on CPU speed, operating system, flavor of Linux, etc.\n\nA: Reading the RDTSC is not reliable in SMP systems, since each CPU maintains their own counter and each counter is not guaranteed to by synchronized with respect to another CPU.\nI might suggest trying clock_gettime(CLOCK_REALTIME). The posix manual indicates that this should be implemented on all compliant systems. It can provide a nanosecond count, but you probably will want to check clock_getres(CLOCK_REALTIME) on your system to see what the actual resolution is.\n\nA: @Bernard:\n\nI have to admit, most of your example went straight over my head. It does compile, and seems to work, though. Is this safe for SMP systems or SpeedStep?\n\nThat's a good question... I think the code's ok.\nFrom a practical standpoint, we use it in my company every day,\nand we run on a pretty wide array of boxes, everything from 2-8 cores.\nOf course, YMMV, etc, but it seems to be a reliable and low-overhead\n(because it doesn't make a context switch into system-space) method\nof timing.\nGenerally how it works is:\n\n\n*\n\n*declare the block of code to be assembler (and volatile, so the\noptimizer will leave it alone).\n\n*execute the CPUID instruction. In addition to getting some CPU information\n(which we don't do anything with) it synchronizes the CPU's execution buffer\nso that the timings aren't affected by out-of-order execution.\n\n*execute the rdtsc (read timestamp) execution. This fetches the number of\nmachine cycles executed since the processor was reset. This is a 64-bit\nvalue, so with current CPU speeds it will wrap around every 194 years or so.\nInterestingly, in the original Pentium reference, they note it wraps around every\n5800 years or so.\n\n*the last couple of lines store the values from the registers into\nthe variables hi and lo, and put that into the 64-bit return value.\n\n\nSpecific notes:\n\n\n*\n\n*out-of-order execution can cause incorrect results, so we execute the\n\"cpuid\" instruction which in addition to giving you some information\nabout the cpu also synchronizes any out-of-order instruction execution.\n\n*Most OS's synchronize the counters on the CPUs when they start, so\nthe answer is good to within a couple of nano-seconds.\n\n*The hibernating comment is probably true, but in practice you\nprobably don't care about timings across hibernation boundaries.\n\n*regarding speedstep: Newer Intel CPUs compensate for the speed\nchanges and returns an adjusted count. I did a quick scan over\nsome of the boxes on our network and found only one box that\ndidn't have it: a Pentium 3 running some old database server.\n(these are linux boxes, so I checked with: grep constant_tsc /proc/cpuinfo)\n\n*I'm not sure about the AMD CPUs, we're primarily an Intel shop,\nalthough I know some of our low-level systems gurus did an\nAMD evaluation.\nHope this satisfies your curiosity, it's an interesting and (IMHO)\nunder-studied area of programming. You know when Jeff and Joel were\ntalking about whether or not a programmer should know C? I was\nshouting at them, \"hey forget that high-level C stuff... assembler\nis what you should learn if you want to know what the computer is\ndoing!\"\n\nA: You may be interested in Linux FAQ for clock_gettime(CLOCK_REALTIME)\n\nA: Wine is actually using gettimeofday() to implement QueryPerformanceCounter() and it is known to make many Windows games work on Linux and Mac.\nStarts http://source.winehq.org/source/dlls/kernel32/cpu.c#L312\nleads to http://source.winehq.org/source/dlls/ntdll/time.c#L448\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/88\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"106\"\n}"}}},{"rowIdx":24,"cells":{"text":{"kind":"string","value":"Q: How do you branch and merge with TortoiseSVN? How do you branch and merge with Apache Subversion using the TortoiseSVN client? \n\nA: You can also try Version Control for the Standalone Programmer - Part 1 or perhaps Merging with TortoiseSVN.\n\nA: My easy click-by-click instructions (specific to TortoiseSVN) are in Stack Overflow question What is the simplest way to do branching and merging using TortoiseSVN?.\n\nA: Version Control with Subversion\nA very good resource for source control in general. Not really TortoiseSVN specific, though.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/90\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"164\"\n}"}}},{"rowIdx":25,"cells":{"text":{"kind":"string","value":"Q: Anatomy of a \"Memory Leak\" In .NET perspective:\n\n\n*\n\n*What is a memory leak?\n\n*How can you determine whether your application leaks? What are the effects?\n\n*How can you prevent a memory leak?\n\n*If your application has memory leak, does it go away when the process exits or is killed? Or do memory leaks in your application affect other processes on the system even after process completion?\n\n*And what about unmanaged code accessed via COM Interop and/or P/Invoke?\n\n\nA: \nI guess in a managed environment, a\n leak would be you keeping an\n unnecessary reference to a large chunk\n of memory around.\n\nAbsolutely. Also, not using the .Dispose() method on disposable objects when appropriate can cause mem leaks. The easiest way to do it is with a using block because it automatically executes .Dispose() at the end:\nStreamReader sr;\nusing(sr = new StreamReader(\"somefile.txt\"))\n{\n //do some stuff\n}\n\nAnd if you create a class that is using unmanaged objects, if you're not implementing IDisposable correctly, you could be causing memory leaks for your class's users.\n\nA: All memory leaks are resolved by program termination. \nLeak enough memory and the Operating System may decide to resolve the problem on your behalf.\n\nA: I will concur with Bernard as to in .net what a mem leak would be.\nYou could profile your application to see its memory use, and determine that if its managing a lot of memory when it should not be you could say it has a leak.\nIn managed terms I will put my neck on the line to say it does go away once the process is killed/removed.\nUnmanaged code is its own beast and if a leak exists within it, it will follow a standard mem. leak definition.\n\nA: Also keep in mind that .NET has two heaps, one being the large object heap. I believe objects of roughly 85k or larger are put on this heap. This heap has a different lifetime rules than the regular heap.\nIf you are creating large memory structures (Dictionary's or List's) it would prudent to go lookup what the exact rules are.\nAs far as reclaiming the memory on process termination, unless your running Win98 or it equivalents, everything is released back to the OS on termination. The only exceptions are things that are opened cross-process and another process still has the resource open.\nCOM Objects can be tricky tho. If you always use the IDispose pattern, you'll be safe. But I've run across a few interop assemblies that implement IDispose. The key here is to call Marshal.ReleaseCOMObject when you're done with it. The COM Objects still use standard COM reference counting.\n\nA: I found .Net Memory Profiler a very good help when finding memory leaks in .Net. It's not free like the Microsoft CLR Profiler, but is faster and more to the point in my opinion. A\n\nA: Strictly speaking, a memory leak is consuming memory that is \"no longer used\" by the program.\n\"No longer used\" has more than one meaning, it could mean \"no more reference to it\", that is, totally unrecoverable, or it could mean, referenced, recoverable, unused but the program keeps the references anyway. Only the later applies to .Net for perfectly managed objects. However, not all classes are perfect and at some point an underlying unmanaged implementation could leak resources permanently for that process.\nIn all cases, the application consumes more memory than strictly needed. The sides effects, depending on the ammount leaked, could go from none, to slowdown caused by excessive collection, to a series of memory exceptions and finally a fatal error followed by forced process termination.\nYou know an application has a memory problem when monitoring shows that more and more memory is allocated to your process after each garbage collection cycle. In such case, you are either keeping too much in memory, or some underlying unmanaged implementation is leaking.\nFor most leaks, resources are recovered when the process is terminated, however some resources are not always recovered in some precise cases, GDI cursor handles are notorious for that. Of course, if you have an interprocess communication mechanism, memory allocated in the other process would not be freed until that process frees it or terminates.\n\nA: I think the \"what is a memory leak\" and \"what are the effects\" questions have been answered well already, but I wanted to add a few more things on the other questions...\nHow to understand whether your application leaks\nOne interesting way is to open perfmon and add traces for # bytes in all heaps and # Gen 2 collections , in each case looking just at your process. If exercising a particular feature causes the total bytes to increase, and that memory remains allocated after the next Gen 2 collection, you might say that the feature leaks memory.\nHow to prevent\nOther good opinions have been given. I would just add that perhaps the most commonly overlooked cause of .NET memory leaks is to add event handlers to objects without removing them. An event handler attached to an object is a form of reference to that object, so will prevent collection even after all other references have gone. Always remember to detach event handlers (using the -= syntax in C#).\nDoes the leak go away when the process exits, and what about COM interop?\nWhen your process exits, all memory mapped into its address space is reclaimed by the OS, including any COM objects served from DLLs. Comparatively rarely, COM objects can be served from separate processes. In this case, when your process exits, you may still be responsible for memory allocated in any COM server processes that you used.\n\nA: I would define memory leaks as an object not freeing up all the memory allocated after it has completed. I have found this can happen in your application if you are using Windows API and COM (i.e. unmanaged code that has a bug in it or is not being managed correctly), in the framework and in third party components. I have also found not tiding up after using certain objects like pens can cause the issue.\nI personally have suffered Out of Memory Exceptions which can be caused but are not exclusive to memory leaks in dot net applications. (OOM can also come from pinning see Pinning Artical). If you are not getting OOM errors or need to confirm if it is a memory leak causing it then the only way is to profile your application.\nI would also try and ensure the following:\na) Everything that implements Idisposable is disposed either using a finally block or the using statement these include brushes, pens etc.(some people argue to set everything to nothing in addition) \nb)Anything that has a close method is closed again using finally or the using statement (although I have found using does not always close depending if you declared the object outside the using statement)\nc)If you are using unmanaged code/windows API's that these are dealt with correctly after. (some have clean up methods to release resources)\nHope this helps.\n\nA: If you need to diagnose a memory leak in .NET, check these links:\nhttp://msdn.microsoft.com/en-us/magazine/cc163833.aspx\nhttp://msdn.microsoft.com/en-us/magazine/cc164138.aspx\nThose articles describe how to create a memory dump of your process and how to analyze it so that you can first determine if your leak is unmanaged or managed, and if it is managed, how to figure out where it is coming from.\nMicrosoft also has a newer tool to assist with generating crash dumps, to replace ADPlus, called DebugDiag.\nhttp://www.microsoft.com/downloads/details.aspx?FamilyID=28bd5941-c458-46f1-b24d-f60151d875a3&displaylang=en\n\nA: Using CLR Profiler from Microsoft http://www.microsoft.com/downloads/details.aspx?familyid=86ce6052-d7f4-4aeb-9b7a-94635beebdda&displaylang=en is a great way to determine which objects are holding memory, what execution flow leads to the creation of these objects, and also monitoring which objects live where on the heap (fragmentation, LOH, etc.).\n\nA: The best explanation of how the garbage collector works is in Jeff Richters CLR via C# book, (Ch. 20). Reading this gives a great grounding for understanding how objects persist.\nOne of the most common causes of rooting objects accidentally is by hooking up events outisde a class. If you hook up an external event \ne.g.\nSomeExternalClass.Changed += new EventHandler(HandleIt);\n\nand forget to unhook to it when you dispose, then SomeExternalClass has a ref to your class.\nAs mentioned above, the SciTech memory profiler is excellent at showing you roots of objects you suspect are leaking. \nBut there is also a very quick way to check a particular type is just use WnDBG (you can even use this in the VS.NET immediate window while attached):\n.loadby sos mscorwks\n!dumpheap -stat -type \n\nNow do something that you think will dispose the objects of that type (e.g. close a window). It's handy here to have a debug button somewhere that will run System.GC.Collect() a couple of times.\nThen run !dumpheap -stat -type again. If the number didn't go down, or didn't go down as much as you expect, then you have a basis for further investigation.\n(I got this tip from a seminar given by Ingo Rammer).\n\nA: I guess in a managed environment, a leak would be you keeping an unnecessary reference to a large chunk of memory around.\n\nA: The best explanation I've seen is in Chapter 7 of the free Foundations of Programming e-book.\nBasically, in .NET a memory leak occurs when referenced objects are rooted and thus cannot be garbage collected. This occurs accidentally when you hold on to references beyond the intended scope.\nYou'll know that you have leaks when you start getting OutOfMemoryExceptions or your memory usage goes up beyond what you'd expect (PerfMon has nice memory counters). \nUnderstanding .NET's memory model is your best way of avoiding it. Specifically, understanding how the garbage collector works and how references work — again, I refer you to chapter 7 of the e-book. Also, be mindful of common pitfalls, probably the most common being events. If object A is registered to an event on object B, then object A will stick around until object B disappears because B holds a reference to A. The solution is to unregister your events when you're done. \nOf course, a good memory profile will let you see your object graphs and explore the nesting/referencing of your objects to see where references are coming from and what root object is responsible (red-gate ants profile, JetBrains dotMemory, memprofiler are really good choices, or you can use the text-only WinDbg and SOS, but I'd strongly recommend a commercial/visual product unless you're a real guru).\nI believe unmanaged code is subject to its typical memory leaks, except that shared references are managed by the garbage collector. I could be wrong about this last point.\n\nA: Why do people think that an memory leak in .NET is not the same as any other leak?\nA memory leak is when you attach to a resource and do not let it go. You can do this both in managed and in unmanaged coding.\nRegarding .NET, and other programming tools, there have been ideas about garbage collecting, and other ways of minimizing situations that will make your application leak.\nBut the best method of preventing memory leaks is that you need to understand your underlying memory model, and how things works, on the platform you are using.\nBelieving that GC and other magic will clean up your mess is the short way to memory leaks, and will be difficult to find later.\nWhen coding unmanaged, you normally make sure to clean up, you know that the resources you take hold of, will be your responsibility to clean up, not the janitor's.\nIn .NET on the other hand, a lot of people think that the GC will clean up everything. Well, it does some for you, but you need to make sure that it is so. .NET does wrap lots of things, so you do not always know if you are dealing with a managed or unmanaged resource, and you need to make sure what what you're dealing with. Handling fonts, GDI resources, active directory, databases etc is typically things you need to look out for.\n\nIn managed terms I will put my neck on\n the line to say it does go away once\n the process is killed/removed.\n\nI see lots of people have this though, and I really hope this will end. You cannot ask the user to terminate your app to clean up your mess!\nTake a look at a browser, that can be IE, FF etc, then open, say, Google Reader, let it stay for some days, and look at what happens. \nIf you then open another tab in the browser, surf to some site, then close the tab that hosted the other page that made the browser leak, do you think the browser will release the memory? Not so with IE. On my computer IE will easily eat 1 GiB of memory in a short amount of time (about 3-4 days) if I use Google Reader. Some newspages are even worse.\n\nA: One definition is: Unable to release unreachable memory, which can no longer be allocated to new process during execution of allocating process. It can mostly be cured by using GC techniques or detected by automated tools.\nFor more information, please visit http://all-about-java-and-weblogic-server.blogspot.in/2014/01/what-is-memory-leak-in-java.html.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/104\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"181\"\n}"}}},{"rowIdx":26,"cells":{"text":{"kind":"string","value":"Q: Best Subversion clients for Windows Vista (64bit) I've been using TortoiseSVN in a Windows environment for quite some time. It seems very feature-complete and nicely integrated into the Windows shell, and more importantly, it's fairly painless to teach to colleagues with little or no experience with source control. However, since we have moved to Windows Vista 64bit, Tortoise has been very buggy and has seemed to cause lots of explorer.exe abnormalities and crashes. This has happened both with older versions of the software and the latest version (1.5.1 build 13563).\nI was curious if anyone has suggestions for other Subversion clients that will run on Windows (specifically Vista 64bit). Developers here use a variety of text editors so using Visual Studio or Dreamweaver for SVN is not ideal.\nI have heard great things about Cornerstone, and would love something similar for Windows if it exists.\n\nI'm correlating the Vista/explorer problems with Tortoise because they normally occur when I'm using the functionality in Tortoise. Sometimes bringing up the \"merge\" screen will cause the GUI to start acting very strange and eventually hang or crash.\nI did not see 1.5.2 -- I'm installing now, maybe that will fix some of my issues.\n\nA: I'll second Diago's answer. I use TortoiseSVN on Vista x64 pretty heavily.\nI did upgrade directly from an older version to 1.5.2 though, and never used 1.5.1. Have you tried 1.5.2?\n\nA: I used to have lots of Explorer crashes (on 32-bit) caused by Tortoise. They seem to have gone away since I used the Include/Exclude path settings in the \"Icon Overlays\" configuration of TSVN. Constraining icon overlays to specific directories where I keep my source made this much more stable.\n\nA: I too get explorer crashes in Vista (I'm not in the 64Bit version though). I'm using Vista Super Saijen (or whatever they are calling the most expensive version). I'm not having any bugs with Tortoise.\nMy explorer does, however, crash about every other day (sometimes multiple times a day if it's having an \"off\" day). I'm not positive it's being caused by TortoiseSVN though. From what I hear, the explorer just crashes a lot in Vista...\nHave you tried uninstalling Tortoise and using Windows for a day or two and seeing if it still crashes? Do you restart your computer at least once a day (It seems the longer I go between restarts, the worse the crashes get)?\n\nA: Tortoise SVN with Ankhsvn for VS 2005\n\n\nA: Run both 32 and 64 bit clients.... otherwise explorer instances launched from 32bit processes ( including load and save dialogs) will have no Tortoise menus.\nAlso upgrade to latest 1.5.3 at time of answer.\n\nA: I have Tortoise installed but rarely use it over SmartSVN. It is a Java-based application and so does not look like a native Windows application, but performs very well. There is a free version with reduced functionality, but the paid-for version is not very expensive ($79) and well worth the money. The biggest benefit I find is a real-time view similar to the \"check for modifications\" feature in Tortoise, which auto-refreshes every time the UI gets focus. You can easily see what you've changed across your whole source tree. It also has shell integration, although I can't comment on that feature as I haven't installed it because I already had Tortoise installed.\n\nA: I have been using the 64Bit version of TortoiseSVN for ages and I have never had issues with it on Windows 64Bit or Vista 64Bit. I am currently not aware of any other similiar SVN clients that do work on Vista. Is it possible the problem could lie within the configuration of TortoiseSVN or even the installation of Vista? Is the problem occurring on Vista native or SP 1?\n\nA: TortoiseSVN in combination with VisualSVN for Visual Studio.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/108\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"52\"\n}"}}},{"rowIdx":27,"cells":{"text":{"kind":"string","value":"Q: Decoding T-SQL CAST in C#/VB.NET Recently our site has been deluged with the resurgence of the Asprox botnet SQL injection attack. Without going into details, the attack attempts to execute SQL code by encoding the T-SQL commands in an ASCII encoded BINARY string. It looks something like this:\nDECLARE%20@S%20NVARCHAR(4000);SET%20@S=CAST(0x44004500...06F007200%20AS%20NVARCHAR(4000));EXEC(@S);--\n\nI was able to decode this in SQL, but I was a little wary of doing this since I didn't know exactly what was happening at the time.\nI tried to write a simple decode tool, so I could decode this type of text without even touching SQL  Server. The main part I need to be decoded is:\nCAST(0x44004500...06F007200 AS\nNVARCHAR(4000))\n\nI've tried all of the following commands with no luck:\ntxtDecodedText.Text =\n System.Web.HttpUtility.UrlDecode(txtURLText.Text);\ntxtDecodedText.Text =\n Encoding.ASCII.GetString(Encoding.ASCII.GetBytes(txtURLText.Text));\ntxtDecodedText.Text =\n Encoding.Unicode.GetString(Encoding.Unicode.GetBytes(txtURLText.Text));\ntxtDecodedText.Text =\n Encoding.ASCII.GetString(Encoding.Unicode.GetBytes(txtURLText.Text));\ntxtDecodedText.Text =\n Encoding.Unicode.GetString(Convert.FromBase64String(txtURLText.Text));\n\nWhat is the proper way to translate this encoding without using SQL Server? Is it possible? I'll take VB.NET code since I'm familiar with that too.\n\nOkay, I'm sure I'm missing something here, so here's where I'm at.\nSince my input is a basic string, I started with just a snippet of the encoded portion - 4445434C41 (which translates to DECLA) - and the first attempt was to do this...\ntxtDecodedText.Text = Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(txtURL.Text));\n\n...and all it did was return the exact same thing that I put in since it converted each character into is a byte.\nI realized that I need to parse every two characters into a byte manually since I don't know of any methods yet that will do that, so now my little decoder looks something like this:\nwhile (!boolIsDone)\n{\n bytURLChar = byte.Parse(txtURLText.Text.Substring(intParseIndex, 2));\n bytURL[intURLIndex] = bytURLChar;\n intParseIndex += 2;\n intURLIndex++;\n\n if (txtURLText.Text.Length - intParseIndex < 2)\n {\n boolIsDone = true;\n }\n}\n\ntxtDecodedText.Text = Encoding.UTF8.GetString(bytURL);\n\nThings look good for the first couple of pairs, but then the loop balks when it gets to the \"4C\" pair and says that the string is in the incorrect format.\nInterestingly enough, when I step through the debugger and to the GetString method on the byte array that I was able to parse up to that point, I get \",-+\" as the result.\nHow do I figure out what I'm missing - do I need to do a \"direct cast\" for each byte instead of attempting to parse it?\n\nA: Try removing the 0x first and then call Encoding.UTF8.GetString. I think that may work.\nEssentially: 0x44004500\nRemove the 0x, and then always two bytes are one character:\n44 00 = D\n\n45 00 = E\n\n6F 00 = o\n\n72 00 = r\n\nSo it's definitely a Unicode/UTF format with two bytes/character.\n\nA: I went back to Michael's post, did some more poking and realized that I did need to do a double conversion, and eventually worked out this little nugget:\nConvert.ToString(Convert.ToChar(Int32.Parse(EncodedString.Substring(intParseIndex, 2), System.Globalization.NumberStyles.HexNumber)));\n\nFrom there I simply made a loop to go through all the characters 2 by 2 and get them \"hexified\" and then translated to a string.\nTo Nick, and anybody else interested, I went ahead and posted my little application over in CodePlex. Feel free to use/modify as you need.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/109\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"68\"\n}"}}},{"rowIdx":28,"cells":{"text":{"kind":"string","value":"Q: ASP.NET Site Maps Does anyone have experience creating SQL-based ASP.NET site-map providers?\nI have the default XML file web.sitemap working properly with my Menu and SiteMapPath controls, but I'll need a way for the users of my site to create and modify pages dynamically.\nI need to tie page viewing permissions into the standard ASP.NET membership system as well.\n\nA: The Jeff Prosise version from MSDN magazine works pretty well, but it has a few flaws:\nAddNode freaks out with links to external sites on your menu (www.google.com, etc.)\nHere's my fix in BuildSiteMap():\nSiteMapNode node = GetSiteMapNodeFromReader(reader);\nstring url = node.Url;\nif (url.Contains(\":\"))\n{\n string garbage = Guid.NewGuid().ToString(); // SiteMapNode needs unique URLs\n node.Url = \"~/dummy_\" + garbage + \".aspx\";\n AddNode(node, _root);\n node.Url = url;\n}\nelse\n{\n AddNode(node, _root);\n}\n\nSQLDependency caching is cool, but if you don't want to make a trip to the DB everytime your menu loads (to check to see if the dependency has changed) and your menus don't change very often, then why not use HttpRuntime.Cache instead?\npublic override SiteMapNode RootNode\n{\n get\n {\n SiteMapNode temp = (SiteMapNode)HttpRuntime.Cache[\"SomeKeyName\"];\n if (temp == null)\n {\n temp = BuildSiteMap();\n HttpRuntime.Cache.Insert(\"SomeKeyName\", temp, null, DateTime.Now.AddHours(1), Cache.NoSlidingExpiration);\n }\n return temp;\n }\n}\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/120\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"45\"\n}"}}},{"rowIdx":29,"cells":{"text":{"kind":"string","value":"Q: Java lib or app to convert CSV to XML file? Is there an existing application or library in Java which will allow me to convert a CSV data file to XML file? \nThe XML tags would be provided through possibly the first row containing column headings.\n\nA: As far as I know, there's no ready-made library to do this for you, but producing a tool capable of translating from CSV to XML should only require you to write a crude CSV parser and hook up JDOM (or your XML Java library of choice) with some glue code.\n\nA: There is nothing I know of that can do this without you at least writing a little bit of code... You will need 2 separate library:\n\n\n*\n\n*A CSV Parser Framework \n\n*An XML Serialization Framework\n\n\nThe CSV parser I would recommend (unless you want to have a little bit of fun to write your own CSV Parser) is OpenCSV (A SourceForge Project for parsing CSV Data)\nThe XML Serialization Framework should be something that can scale in case you want to transform large (or huge) CSV file to XML: My recommendation is the Sun Java Streaming XML Parser Framework (See here) which allows pull-parsing AND serialization.\n\nA: There is also good library ServingXML by Daniel Parker, which is able to convert almost any plain text format to XML and back.\nThe example for your case can be found here: It uses heading of field in CSV file as the XML element name.\n\nA: Maybe this might help: JSefa\nYou can read CSV file with this tool and serialize it to XML.\n\nA: As the others above, I don't know any one-step way to do that, but if you are ready to use very simple external libraries, I would suggest:\nOpenCsv for parsing CSV (small, simple, reliable and easy to use)\nXstream to parse/serialize XML (very very easy to use, and creating fully human readable xml)\nUsing the same sample data as above, code would look like:\npackage fr.megiste.test;\n\nimport java.io.FileReader;\nimport java.io.FileWriter;\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport au.com.bytecode.opencsv.CSVReader;\n\nimport com.thoughtworks.xstream.XStream;\n\npublic class CsvToXml { \n\n public static void main(String[] args) {\n\n String startFile = \"./startData.csv\";\n String outFile = \"./outData.xml\";\n\n try {\n CSVReader reader = new CSVReader(new FileReader(startFile));\n String[] line = null;\n\n String[] header = reader.readNext();\n\n List out = new ArrayList();\n\n while((line = reader.readNext())!=null){\n List item = new ArrayList();\n for (int i = 0; i < header.length; i++) {\n String[] keyVal = new String[2];\n String string = header[i];\n String val = line[i];\n keyVal[0] = string;\n keyVal[1] = val;\n item.add(keyVal);\n }\n out.add(item);\n }\n\n XStream xstream = new XStream();\n\n xstream.toXML(out, new FileWriter(outFile,false));\n\n } catch (Exception e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n }\n}\n\nProducing the following result:\n(Xstream allows very fine tuning of the result...)\n\n \n \n string\n hello world\n \n \n float1\n 1.0\n \n \n float2\n 3.3\n \n \n integer\n 4\n \n \n \n \n string\n goodbye world\n \n \n float1\n 1e9\n \n \n float2\n -3.3\n \n \n integer\n 45\n \n \n \n \n string\n hello again\n \n \n float1\n -1\n \n \n float2\n 23.33\n \n \n integer\n 456\n \n \n \n \n string\n hello world 3\n \n \n float1\n 1.40\n \n \n float2\n 34.83\n \n \n integer\n 4999\n \n \n \n \n string\n hello 2 world\n \n \n float1\n 9981.05\n \n \n float2\n 43.33\n \n \n integer\n 444\n \n \n\n\n\nA: This may be too basic or limited of a solution, but couldn't you do a String.split() on each line of the file, remembering the result array of the first line to generate the XML, and just spit each line's array data out with the proper XML elements padding each iteration of a loop?\n\nA: Jackson processor family has backends for multiple data formats, not just JSON. This includes both XML (https://github.com/FasterXML/jackson-dataformat-xml) and CSV (https://github.com/FasterXML/jackson-dataformat-csv/) backends.\nConversion would rely on reading input with CSV backend, write using XML backend. This is easiest to do if you have (or can define) a POJO for per-row (CSV) entries. This is not a strict requirement, as content from CSV may be read \"untyped\" as well (a sequence of String arrays), but requires bit more work on XML output.\nFor XML side, you would need a wrapper root object to contain array or List of objects to serialize.\n\nA: I know you asked for Java, but this strikes me as a task well suited to a scripting language. Here is a quick (very simple) solution written in Groovy.\ntest.csv\nstring,float1,float2,integer\nhello world,1.0,3.3,4\ngoodbye world,1e9,-3.3,45\nhello again,-1,23.33,456\nhello world 3,1.40,34.83,4999\nhello 2 world,9981.05,43.33,444\n\ncsvtoxml.groovy\n#!/usr/bin/env groovy\n\ndef csvdata = []\nnew File(\"test.csv\").eachLine { line ->\n csvdata << line.split(',')\n}\n\ndef headers = csvdata[0]\ndef dataRows = csvdata[1..-1]\n\ndef xml = new groovy.xml.MarkupBuilder()\n\n// write 'root' element\nxml.root {\n dataRows.eachWithIndex { dataRow, index ->\n // write 'entry' element with 'id' attribute\n entry(id:index+1) {\n headers.eachWithIndex { heading, i ->\n // write each heading with associated content\n \"${heading}\"(dataRow[i])\n }\n }\n }\n}\n\nWrites the following XML to stdout:\n\n \n hello world\n 1.0\n 3.3\n 4\n \n \n goodbye world\n 1e9\n -3.3\n 45\n \n \n hello again\n -1\n 23.33\n 456\n \n \n hello world 3\n 1.40\n 34.83\n 4999\n \n \n hello 2 world\n 9981.05\n 43.33\n 444\n \n\n\nHowever, the code does very simple parsing (not taking into account quoted or escaped commas) and it does not account for possible absent data.\n\nA: For the CSV Part, you may use my little open source library\n\nA: I had the same problem and needed an application to convert a CSV file to a XML file for one of my projects, but didn't find anything free and good enough on the net, so I coded my own Java Swing CSVtoXML application.\nIt's available from my website HERE. Hope it will help you. \nIf not, you can easily code your own like I did; The source code is inside the jar file so modify it as you need if it doesn't fill your requirement.\n\nA: I have an opensource framework for working with CSV and flat files in general. Maybe it's worth looking: JFileHelpers.\nWith that toolkit you can write code using beans, like:\n@FixedLengthRecord()\npublic class Customer {\n @FieldFixedLength(4)\n public Integer custId;\n\n @FieldAlign(alignMode=AlignMode.Right)\n @FieldFixedLength(20)\n public String name;\n\n @FieldFixedLength(3)\n public Integer rating;\n\n @FieldTrim(trimMode=TrimMode.Right)\n @FieldFixedLength(10)\n @FieldConverter(converter = ConverterKind.Date, \n format = \"dd-MM-yyyy\")\n public Date addedDate;\n\n @FieldFixedLength(3)\n @FieldOptional\n public String stockSimbol; \n}\n\nand then just parse your text files using:\nFileHelperEngine engine = \n new FileHelperEngine(Customer.class); \nList customers = \n new ArrayList();\n\ncustomers = engine.readResource(\n \"/samples/customers-fixed.txt\");\n\nAnd you'll have a collection of parsed objects.\nHope that helps!\n\nA: This solution does not need any CSV or XML libraries and, I know, it does not handle any illegal characters and encoding issues, but you might be interested in it as well, provided your CSV input does not break the above mentioned rules.\nAttention: You should not use this code unless you know what you do or don't have the chance to use a further library (possible in some bureaucratic projects)... Use a StringBuffer for older Runtime Environments...\nSo here we go:\nBufferedReader reader = new BufferedReader(new InputStreamReader(\n Csv2Xml.class.getResourceAsStream(\"test.csv\")));\nStringBuilder xml = new StringBuilder();\nString lineBreak = System.getProperty(\"line.separator\");\nString line = null;\nList headers = new ArrayList();\nboolean isHeader = true;\nint count = 0;\nint entryCount = 1;\nxml.append(\"\");\nxml.append(lineBreak);\nwhile ((line = reader.readLine()) != null) {\n StringTokenizer tokenizer = new StringTokenizer(line, \",\");\n if (isHeader) {\n isHeader = false;\n while (tokenizer.hasMoreTokens()) {\n headers.add(tokenizer.nextToken());\n }\n } else {\n count = 0;\n xml.append(\"\\t\");\n xml.append(lineBreak);\n while (tokenizer.hasMoreTokens()) {\n xml.append(\"\\t\\t<\");\n xml.append(headers.get(count));\n xml.append(\">\");\n xml.append(tokenizer.nextToken());\n xml.append(\"\");\n xml.append(lineBreak);\n count++;\n }\n xml.append(\"\\t\");\n xml.append(lineBreak);\n entryCount++;\n }\n}\nxml.append(\"\");\nSystem.out.println(xml.toString());\n\nThe input test.csv (stolen from another answer on this page):\nstring,float1,float2,integer\nhello world,1.0,3.3,4\ngoodbye world,1e9,-3.3,45\nhello again,-1,23.33,456\nhello world 3,1.40,34.83,4999\nhello 2 world,9981.05,43.33,444\n\nThe resulting output:\n\n \n hello world\n 1.0\n 3.3\n 4\n \n \n goodbye world\n 1e9\n -3.3\n 45\n \n \n hello again\n -1\n 23.33\n 456\n \n \n hello world 3\n 1.40\n 34.83\n 4999\n \n \n hello 2 world\n 9981.05\n 43.33\n 444\n \n\n\n\nA: I don't understand why you would want to do this. It sounds almost like cargo cult coding.\nConverting a CSV file to XML doesn't add any value. Your program is already reading the CSV file, so arguing that you need XML doesn't work.\nOn the other hand, reading the CSV file, doing something with the values, and then serializing to XML does make sense (well, as much as using XML can make sense... ;)) but you would supposedly already have a means of serializing to XML.\n\nA: The big difference is that JSefa brings in is that it can serialize your java objects to CSV/XML/etc files and can deserialize back to java objects. And it's driven by annotations which gives you lot of control over the output. \nJFileHelpers also looks interesting.\n\nA: You can do this exceptionally easily using Groovy, and the code is very readable. \nBasically, the text variable will be written to contacts.xml for each line in the contactData.csv, and the fields array contains each column.\ndef file1 = new File('c:\\\\temp\\\\ContactData.csv')\ndef file2 = new File('c:\\\\temp\\\\contacts.xml')\n\ndef reader = new FileReader(file1)\ndef writer = new FileWriter(file2)\n\nreader.transformLine(writer) { line ->\n fields = line.split(',')\n\n text = \"\"\"\n ${fields[2]} \n ${fields[1]} \n ${fields[9]} \n password \n ${fields[4]} \n ${fields[3]} \n \"\"\"\n}\n\n\nA: You could use XSLT. Google it and you will find a few examples e.g. CSV to XML\nIf you use XSLT you can then convert the XML to whatever format you want.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/123\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"119\"\n}"}}},{"rowIdx":30,"cells":{"text":{"kind":"string","value":"Q: How would you access Object properties from within an object method? What is the \"purist\" or \"correct\" way to access an object's properties from within an object method that is not a getter/setter method?\nI know that from outside of the object you should use a getter/setter, but from within would you just do:\nJava:\nString property = this.property;\n\nPHP:\n$property = $this->property;\n\nor would you do:\nJava:\nString property = this.getProperty();\n\nPHP:\n$property = $this->getProperty();\n\nForgive me if my Java is a little off, it's been a year since I programmed in Java...\nEDIT:\nIt seems people are assuming I am talking about private or protected variables/properties only. When I learned OO I was taught to use getters/setters for every single property even if it was public (and actually I was told never to make any variable/property public). So, I may be starting off from a false assumption from the get go. It appears that people answering this question are maybe saying that you should have public properties and that those don't need getters and setters, which goes against what I was taught, and what I was talking about, although maybe that needs to be discussed as well. That's probably a good topic for a different question though...\n\nA: If you mean \"most encapsulation\" by \"purist\", then I typically declare all my fields as private and then use \"this.field\" from within the class itself. For other classes, including subclasses, I access instance state using the getters.\n\nA: The question doesn't require an opinion based answer. It is a subject well covered by computing science for decades from the principle of high cohesion, low coupling and the SOLID principles.\nThe purist, read correct, OO way is to minimise coupling and maximise cohesions. Therefore both should be avoided and the Law of Demeter followed by using the Tell Don't Ask approach.\nInstead of getting the value of the object's property, which tightly couples the two class, use the object as a parameter e.g.\n doSomethingWithProperty() {\n doSomethingWith( this.property ) ;\n }\n\nWhere the property was a native type, e.g. int, use an access method, name it for problem domain not the programming domain.\n doSomethingWithProperty( this.daysPerWeek() ) ;\n\nThese will allow you to maintain encapsulation and any post-conditions or dependent invariants. You can also use the setter method to maintain any pre-conditions or dependent invariants, however don't fall into the trap of naming them setters, go back to the Hollywood Principle for naming when using the idiom.\n\nA: It is better to use the accessor methods, even within the object. Here are the points that come to my mind immediately:\n\n*\n\n*It should be done in the interest of maintaining consistency with accesses made from outside the object.\n\n\n*In some cases, these accessor methods could be doing more than just accessing the field; they could be doing some additional processing (this is rare though). If this is the case, accessing the field directly would mean that you are missing that additional processing, and your program could go awry if this processing is always to be done during those accesses.\n\nA: I can be wrong because I'm autodidact, but I NEVER user public properties in my Java classes, they are always private or protected, so that outside code must access by getters/setters. It's better for maintenance / modification purposes. And for inside class code... If getter method is trivial I use the property directly, but I always use the setter methods because I could easily add code to fire events if I wish.\n\nA: i've found using setters/getters made my code easier to read. I also like the control it gives when other classes use the methods and if i change the data the property will store.\n\nA: Private fields with public or protected properties. Access to the values should go through the properties, and be copied to a local variable if they will be used more than once in a method. If and ONLY if you have the rest of your application so totally tweaked, rocked out, and otherwise optimized to where accessing values by going through their assosciated properties has become a bottleneck (And that will never EVER happen, I guarantee) should you even begin to consider letting anything other than the properties touch their backing variables directly.\n.NET developers can use automatic properties to enforce this since you can't even see the backing variables at design time.\n\nA: If I don't edit the property, I'll use a public method get_property() unless it's a special occasion such as a MySQLi object inside another object in which case I'll just make the property public and refer to it as $obj->object_property.\nInside the object it's always $this->property for me.\n\nA: It depends. It's more a style issue than anything else, and there is no hard rule.\n\nA: This has religious war potential, but it seems to me that if you're using a getter/setter, you should use it internally as well - using both will lead to maintenance problems down the road (e.g. somebody adds code to a setter that needs to run every time that property is set, and the property is being set internally w/o that setter being called).\n\nA: Well, it seems with C# 3.0 properties' default implementation, the decision is taken for you; you HAVE to set the property using the (possibly private) property setter.\nI personally only use the private member-behind when not doing so would cause the object to fall in an less than desirable state, such as when initializing or when caching/lazy loading is involved.\n\nA: I like the answer by cmcculloh, but it seems like the most correct one is the answer by Greg Hurlman. Use getter/setter all the time if you started using them from the get-go and/or you are used to working with them.\nAs an aside, I personally find that using getter/setter makes the code easier to read and to debug later on.\n\nA: As stated in some of the comments: Sometimes you should, sometimes you shouldn't. The great part about private variables is that you are able to see all the places they are used when you change something. If your getter/setter does something you need, use it. If it doesn't matter you decide.\nThe opposite case could be made that if you use the getter/setter and somebody changes the getter/setter they have to analyze all the places the getter and setter is used internally to see if it messes something up.\n\nA: Personally, I feel like it's important to remain consistent. If you have getters and setters, use them. The only time I would access a field directly is when the accessor has a lot of overhead. It may feel like you're bloating your code unnecessarily, but it can certainly save a whole lot of headache in the future. The classic example:\nLater on, you may desire to change the way that field works. Maybe it should be calculated on-the-fly or maybe you would like to use a different type for the backing store. If you are accessing properties directly, a change like that can break an awful lot of code in one swell foop.\n\nA: I'm fairly surprised at how unanimous the sentiment is that getters and setters are fine and good. I suggest the incendiary article by Allen Holub \"Getters And Setters Are Evil\". Granted, the title is for shock value, but the author makes valid points.\nEssentially, if you have getters and setters for each and every private field, you are making those fields as good as public. You'd be very hard-pressed to change the type of a private field without ripple effects to every class that calls that getter.\nMoreover, from a strictly OO point of view, objects should be responding to messages (methods) that correspond to their (hopefully) single responsibility. The vast majority of getters and setters don't make sense for their constituent objects;Pen.dispenseInkOnto(Surface) makes more sense to me than Pen.getColor().\nGetters and setters also encourage users of the class to ask the object for some data, perform a calculation, and then set some other value in the object, better known as procedural programming. You'd be better served to simply tell the object to do what you were going to in the first place; also known as the Information Expert idiom.\nGetters and setters, however, are necessary evils at the boundary of layers -- UI, persistence, and so forth. Restricted access to a class's internals, such as C++'s friend keyword, Java's package protected access, .NET's internal access, and the Friend Class Pattern can help you reduce the visibility of getters and setters to only those who need them.\n\nA: It depends on how the property is used. For example, say you have a student object that has a name property. You could use your Get method to pull the name from the database, if it hasn't been retrieved already. This way you are reducing unnecessary calls to the database.\nNow let's say you have a private integer counter in your object that counts the number of times the name has been called. You may want to not use the Get method from inside the object because it would produce an invalid count.\n\nA: PHP offers a myriad of ways to handle this, including magic methods __get and __set, but I prefer explicit getters and setters. Here's why:\n\n\n*\n\n*Validation can be placed in setters (and getters for that matter)\n\n*Intellisense works with explicit methods\n\n*No question whether a property is read only, write only or read-write\n\n*Retrieving virtual properties (ie, calculated values) looks the same as regular properties \n\n*You can easily set an object property that is never actually defined anywhere, which then goes undocumented \n\n\nA: \nAm I just going overboard here?\n\nPerhaps ;)\nAnother approach would be to utilize a private/protected method to actually do the getting (caching/db/etc), and a public wrapper for it that increments the count:\nPHP:\npublic function getName() {\n $this->incrementNameCalled();\n return $this->_getName();\n}\n\nprotected function _getName() {\n return $this->name;\n}\n\nand then from within the object itself:\nPHP:\n$name = $this->_getName();\n\nThis way you can still use that first argument for something else (like sending a flag for whether or not to used cached data here perhaps).\n\nA: I must be missing the point here, why would you use a getter inside an object to access a property of that object?\nTaking this to its conclusion the getter should call a getter, which should call a getter.\nSo I'd say inside an object method access a property directly, especially seeing as calling another method in that object (which will just access the property directly anyway then return it) is just a pointless, wasteful exercise (or have I misunderstood the question).\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/126\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"105\"\n}"}}},{"rowIdx":31,"cells":{"text":{"kind":"string","value":"Q: How to export data from SQL Server 2005 to MySQL I've been banging my head against SQL Server 2005 trying to get a lot of data out. I've been given a database with nearly 300 tables in it and I need to turn this into a MySQL database. My first call was to use bcp but unfortunately it doesn't produce valid CSV - strings aren't encapsulated, so you can't deal with any row that has a string with a comma in it (or whatever you use as a delimiter) and I would still have to hand write all of the create table statements, as obviously CSV doesn't tell you anything about the data types.\nWhat would be better is if there was some tool that could connect to both SQL Server and MySQL, then do a copy. You lose views, stored procedures, trigger, etc, but it isn't hard to copy a table that only uses base types from one DB to another... is it?\nDoes anybody know of such a tool? I don't mind how many assumptions it makes or what simplifications occur, as long as it supports integer, float, datetime and string. I have to do a lot of pruning, normalising, etc. anyway so I don't care about keeping keys, relationships or anything like that, but I need the initial set of data in fast!\n\nA: SQL Server 2005 \"Standard\", \"Developer\" and \"Enterprise\" editions have SSIS, which replaced DTS from SQL server 2000. SSIS has a built-in connection to its own DB, and you can find a connection that someone else has written for MySQL. Here is one example. Once you have your connections, you should be able to create an SSIS package that moves data between the two.\nI ddin't have to move data from SQLServer to MySQL, but I imagine that once the MySQL connection is installed, it works the same as moving data between two SQLServer DBs, which is pretty straight forward.\n\nA: Using MSSQL Management Studio i've transitioned tables with the MySQL OLE DB. Right click on your database and go to \"Tasks->Export Data\" from there you can specify a MsSQL OLE DB source, the MySQL OLE DB source and create the column mappings between the two data sources. \nYou'll most likely want to setup the database and tables in advance on the MySQL destination (the export will want to create the tables automatically, but this often results in failure). You can quickly create the tables in MySQL using the \"Tasks->Generate Scripts\" by right clicking on the database. Once your creation scripts are generated you'll need to step through and search/replace for keywords and types that exist in MSSQL to MYSQL. \nOf course you could also backup the database like normal and find a utility which will restore the MSSQL backup on MYSQL. I'm not sure if one exists however.\n\nA: Rolling your own PHP solution will certainly work though I'm not sure if there is a good way to automatically duplicate the schema from one DB to the other (maybe this was your question).\nIf you are just copying data, and/or you need custom code anyway to convert between modified schemas between the two DB's, I would recommend using PHP 5.2+ and the PDO libraries. You'll be able to connect using PDO ODBC (and use MSSQL drivers). I had a lot of problems getting large text fields and multi-byte characters from MSSQL into PHP using other libraries.\n\nA: Another tool to try would be the SQLMaestro suite. It is a little tricky nailing down the precise tool, but they have a variety of tools, both free and for purchase that handle a wide variety of tasks for multiple database platforms. I'd suggest trying the Data Wizard tool first for MySQL, since I believe that will have the proper \"import\" tool you need.\n\nA: The best way that I have found is the MySQL Migration Toolkit provided by MySQL. I have used it successfully for some large migration projects.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/129\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"90\"\n}"}}},{"rowIdx":32,"cells":{"text":{"kind":"string","value":"Q: XSD DataSets and ignoring foreign keys I have a pretty standard table set-up in a current application using the .NET XSD DataSet and TableAdapter features. My contracts table consists of some standard contract information, with a column for the primary department. This column is a foreign key to my Departments table, where I store the basic department name, id, notes. This is all setup and functioning in my SQL Server.\nWhen I use the XSD tool, I can drag both tables in at once and it auto detects/creates the foreign key I have between these two tables. This works great when I'm on my main page and am viewing contract data.\nHowever, when I go to my administrative page to modify the department data, I typically do something like this:\nDim dtDepartment As New DepartmentDataTable()\nDim taDepartment As New DepartmentTableAdapter()\n\ntaDepartment.Fill(dtDepartment)\n\nHowever, at this point an exception is thrown saying to the effect that there is a foreign key reference broken here, I'm guessing since I don't have the Contract DataTable filled.\nHow can I fix this problem? I know I can simply remove the foreign key from the XSD to make things work fine, but having the additional integrity check there and having the XSD schema match the SQL schema in the database is nice.\n\nA: You can try turning Check-constraints off on the DataSet (it's in its properties), or altering the properties of that relationship, and change the key to a simple reference - up to you.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/134\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"39\"\n}"}}},{"rowIdx":33,"cells":{"text":{"kind":"string","value":"Q: Compressing / Decompressing Folders & Files Does anyone know of a good way to compress or decompress files and folders in C# quickly? Handling large files might be necessary.\n\nA: My answer would be close your eyes and opt for DotNetZip. It's been tested by a large community.\n\nA: GZipStream is a really good utility to use.\n\nA: This is very easy to do in java, and as stated above you can reach into the java.util.zip libraries from C#. For references see:\njava.util.zip javadocs\n\nsample code\nI used this a while ago to do a deep (recursive) zip of a folder structure, but I don't think I ever used the unzipping. If I'm so motivated I may pull that code out and edit it into here later.\n\nA: Another good alternative is also DotNetZip.\n\nA: The .Net 2.0 framework namespace System.IO.Compression supports GZip and Deflate algorithms. Here are two methods that compress and decompress a byte stream which you can get from your file object. You can substitute GZipStream for DefaultStream in the methods below to use that algorithm. This still leaves the problem of handling files compressed with different algorithms though.\npublic static byte[] Compress(byte[] data)\n{\n MemoryStream output = new MemoryStream();\n\n GZipStream gzip = new GZipStream(output, CompressionMode.Compress, true);\n gzip.Write(data, 0, data.Length);\n gzip.Close();\n\n return output.ToArray();\n}\n\npublic static byte[] Decompress(byte[] data)\n{\n MemoryStream input = new MemoryStream();\n input.Write(data, 0, data.Length);\n input.Position = 0;\n\n GZipStream gzip = new GZipStream(input, CompressionMode.Decompress, true);\n\n MemoryStream output = new MemoryStream();\n\n byte[] buff = new byte[64];\n int read = -1;\n\n read = gzip.Read(buff, 0, buff.Length);\n\n while (read > 0)\n {\n output.Write(buff, 0, read);\n read = gzip.Read(buff, 0, buff.Length);\n }\n\n gzip.Close();\n\n return output.ToArray();\n}\n\n\nA: I've always used the SharpZip Library.\nHere's a link\n\nA: As of .Net 1.1 the only available method is reaching into the java libraries.\nUsing the Zip Classes in the J# Class Libraries to Compress Files and Data with C#\nNot sure if this has changed in recent versions.\n\nA: You can use a 3rd-party library such as SharpZip as Tom pointed out.\nAnother way (without going 3rd-party) is to use the Windows Shell API. You'll need to set a reference to the Microsoft Shell Controls and Automation COM library in your C# project. Gerald Gibson has an example at:\nInternet Archive's copy of the dead page\n\nA: You can create zip file with this method:\npublic async Task CreateZipFile(string sourceDirectoryPath, string name)\n{\n var path = HostingEnvironment.MapPath(TempPath) + name;\n await Task.Run(() =>\n {\n if (File.Exists(path)) File.Delete(path);\n ZipFile.CreateFromDirectory(sourceDirectoryPath, path);\n });\n return path;\n}\n\nand then you can unzip zip file with this methods:\n1- This method work with zip file path\npublic async Task ExtractZipFile(string filePath, string destinationDirectoryName)\n{\n await Task.Run(() =>\n {\n var archive = ZipFile.Open(filePath, ZipArchiveMode.Read);\n foreach (var entry in archive.Entries)\n {\n entry.ExtractToFile(Path.Combine(destinationDirectoryName, entry.FullName), true);\n }\n archive.Dispose();\n });\n}\n\n2- This method work with zip file stream\npublic async Task ExtractZipFile(Stream zipFile, string destinationDirectoryName)\n{\n string filePath = HostingEnvironment.MapPath(TempPath) + Utility.GetRandomNumber(1, int.MaxValue);\n using (FileStream output = new FileStream(filePath, FileMode.Create))\n {\n await zipFile.CopyToAsync(output);\n }\n await Task.Run(() => ZipFile.ExtractToDirectory(filePath, destinationDirectoryName));\n await Task.Run(() => File.Delete(filePath));\n}\n\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/145\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"60\"\n}"}}},{"rowIdx":34,"cells":{"text":{"kind":"string","value":"Q: How do I track file downloads I have a website that plays mp3s in a flash player. If a user clicks 'play' the flash player automatically downloads an mp3 and starts playing it. \nIs there an easy way to track how many times a particular song clip (or any binary file) has been downloaded?\n\n\nIs the play link a link to the actual\n mp3 file or to some javascript code\n that pops up a player?\nIf the latter, you can easily add your\n own logging code in there to track the\n number of hits to it.\nIf the former, you'll need something\n that can track the web server log\n itself and make that distinction. My\n hosting plan comes with Webalizer,\n which does this nicely.\n\nIt's a javascript code so that answers that. \nHowever, it would be nice to know how to track downloads using the other method (without switching hosts).\n\nA: Is the play link a link to the actual mp3 file or to some javascript code that pops up a player? \nIf the latter, you can easily add your own logging code in there to track the number of hits to it.\nIf the former, you'll need something that can track the web server log itself and make that distinction. My hosting plan comes with webalizer, which does this nicely.\n\nA: The funny thing is I wrote a php media gallery for all my musics 2 days ago. I had a similar problem. I'm using http://musicplayer.sourceforge.net/ for the player. And the playlist is built via php. All music requests go to a script called xfer.php?file=WHATEVER\n$filename = base64_url_decode($_REQUEST['file']);\nheader(\"Cache-Control: public\");\nheader('Content-disposition: attachment; filename='.basename($filename));\nheader(\"Content-Transfer-Encoding: binary\");\nheader('Content-Length: '. filesize($filename));\n\n// Put either file counting code here, either a db or static files\n//\nreadfile($filename); //and spit the user the file\n\nfunction base64_url_decode($input) {\n return base64_decode(strtr($input, '-_,', '+/='));\n}\n\nAnd when you call files use something like:\nfunction base64_url_encode($input) {\n return strtr(base64_encode($input), '+/=', '-_,');\n}\n\nhttp://us.php.net/manual/en/function.base64-encode.php\nIf you are using some JavaScript or a flash player (JW player for example) that requires the actual link of an mp3 file or whatever, you can append the text \"&type=.mp3\" so the final link becomes something like:\n\"www.example.com/xfer.php?file=34842ffjfjxfh&type=.mp3\". That way it looks like it ends with an mp3 extension without affecting the file link.\n\nA: Is there a database for your music library? If there is any server code that runs when downloading the mp3 then you can add extra code there to increment the play count. You could also have javascript make a second request to increment the play count, but this could lead to people/robots falsely incrementing counts.\nI used to work for an internet-radio site and we used separate tables to track the time every song was played. Our streams were powered by a perl script running icecast, so we triggered a database request every time a new track started playing. Then to compute the play count we would run a query to count how many times a song's id was in the play log.\n\nA: The problem I had with things like AWStats / reading through web server logs is that large downloads can often be split in data chunks within the logs. This makes reconciling the exact number of downloads quite hard.\nI'd suggest the Google Analytics Event Tracking, as this will register once per click on a download link.\n\nA: Use your httpd log files. Install http://awstats.sourceforge.net/\n\nA: Use bash:\ngrep mp3 /var/log/httpd/access_log | wc\n\n\nA: If your song / binary file was served by apache, you can easily grep the access_log to find out the number of downloads. A simple post-logrotate script can grep the logs and maintain your count statistics in a db.\nThis has the performance advantage by not being in your live request code path. Doing non-critical things like stats offline is a good idea to scale your website to large number of users.\n\nA: You could even set up an Apache .htaccess directive that converts *.mp3 requests into the querystring dubayou is working with. It might be an elegant way to keep the direct request and still be able to slipstream log function into the response.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/146\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"89\"\n}"}}},{"rowIdx":35,"cells":{"text":{"kind":"string","value":"Q: How do I sync the SVN revision number with my ASP.NET web site? Stack Overflow has a subversion version number at the bottom:\n\nsvn revision: 679\n\nI want to use such automatic versioning with my .NET Web Site/Application, Windows Forms, WPD projects/solutions.\nHow do I implement this?\n\nA: $rev and others like it are revisions for the individual files, so they won't change unless the file changes. The number on the webpage is (most likely, I'm assuming here) the svn revision number for the whole project. That is different than the file revisions, which others have been pointing to.\nIn this case I assume that CCNET is pulling the revision number of the project and rewriting a part of the webpage with that number. Any CI solution should be able to do this, set this up myself with CCNET and Teamcity (although not webpages, but automatic versioning of deployment/assembly versions).\nIn order for you to do this, use a CI solution that supports it, or use your build process (MSbuild/Nant) to store that version and write it to the files before \"deploying\" it.\n\nA: To add to @BradWilson's answer: \"You could also get your source control provider to provide the source revision number if you want\"\nTo connect Subversion and MSBuild:\nMSBuild Community Tasks Project\n\nA: Looks like Jeff is using CruiseControl.NET based on some leafing through the podcast transcripts. This seems to have automated deployment capabilities from source control to production. Might this be where the insertion is happening?\n\nA: We do this with xUnit.net for our automated builds. We use CruiseControl.net (and are trying out TeamCity). The MSBuild task that we run for continuous integration automatically changes the build number for us, so the resulting build ZIP file contains a properly versioned set of DLLs and EXEs.\nOur MSBuild file contains a UsingTask reference for a DLL which does regular expression replacements: (you're welcome to use this DLL, as it's covered by the MS-PL license as well)\n\n \n\nNext, we extract the build number, which is provided automatically by the CI system. You could also get your source control provider to provide the source revision number if you want, but we found the build # in the CI system was more useful, because not only can see the integration results by the CI build number, that also provides a link back to the changeset(s) which were included in the build.\n\n \n\n \n $(BUILD_NUMBER)\n \n \n $(ccnetlabel)\n \n \n 0\n \n\n(We try BUILD_NUMBER, which is from TeamCity, then ccnetlabel, which is from CC.net, and if neither is present, we default to 0, so that we can test the automated build script manually.)\nNext, we have a task which sets the build number into a GlobalAssemblyInfo.cs file that we link into all of our projects:\n\n \n \n \n \n\nThis find the AssemblyVersion attribute, and replaces the a.b.c.d version number with a.b.c.BuildNumber. We will usually leave the source checked into the tree with the first three parts of the builder number fixed, and the fourth at zero (f.e., today it's 1.0.2.0).\nIn your build process, make sure the SetVersionNumber task precedes your build task. At the end, we use our Zip task to zip up the build results so that we have a history of the binaries for every automated build.\n\nA: You can do it by adding the following anywhere in your code \n$Id:$\n\nSo for example @Jeff did:\n
svn revision: $Id:$
\n\nand when checked in the server replaced $Id:$ with the current revision number. I also found this reference.\nThere is also $Date:$, $Rev:$, $Revision:$\n\nA: If you're using ASP.Net MVC (as StackOverflow does), I've written an easy to follow 3-step guide on how to automatically get and display the latest SVN revision. The guide was inspired by thinking to myself about this very question! :o)\n\nA: @Balloon\nIf you are using TortoiseSVN, you can use the packaged SubWCRev program. It queries a working copy and tells you just the highest revision number. Admittedly, this seems to be a client-side approach to a server-side problem, but since it's a nice command line program, you should be able to capture its output for use fairly easily.\n"},"meta":{"kind":"string","value":"{\n \"language\": \"en\",\n \"url\": \"https://stackoverflow.com/questions/163\",\n \"timestamp\": \"2023-03-29T00:00:00\",\n \"source\": \"stackexchange\",\n \"question_score\": \"101\"\n}"}}},{"rowIdx":36,"cells":{"text":{"kind":"string","value":"Q: Embedding Windows Media Player for all browsers Edit: This question was written in 2008, which was like 3 internet ages ago. If this question is still relevant to your environment, please accept my condolences. Everyone else should convert into a format supported by your browsers (That would be H.264 if Internet Explorer is needed, and probably AV1, VP8/VP9 if not) and use the