Skip to content

Instantly share code, notes, and snippets.

View copyninja's full-sized avatar

Vasudev Kamath copyninja

View GitHub Profile
<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:orientation="vertical"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
>
<EditText android:id="@+id/string"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:layout_marginTop="10dp"
#!/usr/local/bin/python2.6
# Original Post http://segfault.in/2010/10/shorten-urls-using-google-and-python/
from urllib2 import urlopen, Request, HTTPError
from urllib import urlencode
from simplejson import loads
import sys
import re
<script>
s=Math.sin;
c=Math.cos;
z=0;
d=document;
function a(){
for(i=0;i<50;i++){
l = ((i!=7 && i != 8 && i != 36 && i!= 12 && i!=47) ? 3205+i : (i==7 ? 3207 + i : 3206+i));
z?0:d.write('<b id=x'+i+' style=position:relative>&#'+ l +';</b>');
w=i*s(z)/c(z);
#!/usr/bin/python
from simplejson import loads
from urllib2 import Request,urlopen,HTTPError
import sys
import os
def ensure_dir(folder):
"""
@copyninja
copyninja / kannada_word_extractor.py
Created October 15, 2010 11:14
Extracts Kannada language words from the Webpages
#!/usr/local/bin/python3
import re
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from multiprocessing import Process
def get_page(urlstring):
"""
#!/usr/local/bin/python3
import re
from urllib.request import urlopen, Request
from urllib.error import HTTPError
def get_page(urlstring):
"""
Arguments:
@copyninja
copyninja / crawler.py
Created October 19, 2010 11:38
A Simple Web crawler for getting words and links of a language from WWW
#!/usr/local/bin/python3
import re
import sqlite3
from urllib.request import urlopen,Request
from urllib.error import HTTPError
from multiprocessing import Process
# TODO Fill this table with other language regexps
lang_regexps = {
@copyninja
copyninja / driver.py
Created October 19, 2010 11:39
Simple Driver program for Crawler
#!/usr/local/bin/python3
from crawler import Crawler
def crawling_done(url):
print(("Crawling {} done".format(url)))
if __name__ == "__main__":
c = Crawler(crawling_done,"http://kn.wikipedia.com/wiki/Karnataka","kn_IN")
c.crawl()
@copyninja
copyninja / gpl.yasnippet
Created October 20, 2010 11:07
Improved yasnippet to insert comment depending on the mode.
# -*- mode: snippet -*-
# name: gplc
# contributor: Vasudev Kamath <[email protected]>
# --
${1:`(buffer-name)`}
Copyright ${2:`(nth 5 (decode-time))`} ${3:`(user-full-name)`} ${4:<`(replace-regexp-in-string "@" "@" user-mail-address)`>}
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU ${5:$$(yas/choose-value '("Lesser" "Affero" ""))} General Public License as published by
@copyninja
copyninja / my_html_parser.py
Created October 22, 2010 11:44
Trying to write a html parser using python3 html.parser module with pyquery and lxml
#!/usr/local/bin/python3
import sys
from urllib.request import Request,urlopen
from urllib.error import URLError,HTTPError
from html.parser import HTMLParser,HTMLParseError
from pyquery import PyQuery as pq
class MyHTMLParser(HTMLParser):
"""