Skip to content

Instantly share code, notes, and snippets.

View nelix's full-sized avatar
🎯
Focusing

Nathan Hutchision nelix

🎯
Focusing
View GitHub Profile
#!/usr/bin/env python
"""
Regex for URIs
These regex are directly derived from the collected ABNF in RFC3986
(except for DIGIT, ALPHA and HEXDIG, defined by RFC2234).
They should be processed with re.VERBOSE.
"""
@nelix
nelix / cont.as
Created July 3, 2009 15:33 — forked from cho45/cont.as
// 同時実行されてしまう
for (var i:uint = 0; i < len; i++) {
var loader:URLLoader = new URLLoader();
loader.addEventListener(Event.COMPLETE, fun (..._) {});
loader.load(t[i]);
}
// 順次実行
var i:uint = 0; (function cont (..._):void { if (i++ < parts) {
var loader:URLLoader = new URLLoader();
## Function
traverse("object", obj, function(key, val) {console.log(key, val)});
## Call
function traverse(key, jsonObj, func) {
if( typeof jsonObj == "object" ){
$.each(jsonObj, function(k,v) {
traverse(k,v);
})
} else {
LUAOBJECT_GRAMMAR = %q{
grammar LuaObject
rule luaobj
space value space { def to_ruby; value.to_ruby; end }
end
rule value
nil / float / number / string / table / boolean
end
@nelix
nelix / uuid.py
Created July 5, 2009 01:32 — forked from mmalone/uuid.py
import socket
def sockrecv(sock):
d = ''
while not d or d[-1] != '\n':
d += sock.recv(8192)
return d
/*
* Copyright (c) 2009 Gaute Hope <[email protected]>
* Distributed under the terms of the GNU General Public Licence v2
*
* Waits for change of ~/.plan
*
*/
# include <iostream>
# include <sys/inotify.h>
from itty import *
import Queue
import threading
queue = Queue.Queue()
message_count = 0
message_count_lock = threading.Lock()
finish_queue = False
finish_queue_lock = threading.Lock()
wait_for_it = threading.Event()
// TARGET.pid のファイルを削除で終了
if ( WScript.Count == 0 ) {
WScript.Echo("対象ファイルをD&Dしてください");
WScript.Quit();
}
var TARGET = WScript.Arguments(0);
// バックアップするかチェックする間隔(ミリ秒)
var PERIOD = 60 * 1000;
var AutoBackup = {
@nelix
nelix / sp.py
Created July 9, 2009 19:23 — forked from qingfeng/sp.py
from scrapy import log
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.xpath.selector import HtmlXPathSelector
from scrapy.item import ScrapedItem
def safecn(i):
try:
return unichr(int(i))
except:
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#define NOINLINE __attribute__((noinline))
const size_t CONTINUATION_STACK_SIZE = 8192;