Skip to content

Instantly share code, notes, and snippets.

View jennyonjourney's full-sized avatar

Kyungeun, Jeon (Jenny) jennyonjourney

View GitHub Profile
@jennyonjourney
jennyonjourney / gist:e4982d3fedd6c70f1da239f86f1918b7
Created March 25, 2018 03:19
Python for everybody - Assignment 4.6
def computepay(h,r):
if h<=40:
pay=h*r
elif h>40:
pay=40*r+(h-40)*r*1.5
return(pay)
hrs = input("Enter Hours:")
h = float(hrs)
rate = input("Enter rate:")
# ../는 이걸 나가겠다. 뜻임
# 줄을 바꾸고 싶다면 역슬러쉬를 항상 그어줘야 한다.
f = open('../fff/aaa.txt','w')
f.write('query')
f.write('aaa')
f.write('bbb\n')
f.write('ccc')
f.close()
# 한글이 안깨졌으면 좋겠다. 기본이 utf-8이다.
@jennyonjourney
jennyonjourney / gist:e3a7b0ef56b782bd01eede01706cf9a3
Created March 25, 2018 06:28
Python - file read/readline/readlines()
#readline()함수
#한글을 읽으려면 꼭 utf-8을 잊지말자.
fa=open('../fff/aaa.txt','r', encoding='utf-8')
dd=fa.read()
fa.close()
print(dd)
#한글을 읽으려면 꼭 utf-8을 잊지말자.
fa=open('../fff/stud.csv','r')
@jennyonjourney
jennyonjourney / gist:b01fd37fcc018c7e6dd319241bee1489
Created March 25, 2018 09:08
Python - copyree_movetree + 외장함수os
import shutil
shutil.copy('../fff/Chicago.jpg','../mmm/image2.jpg')
#시카고 이미지를 mmm폴더의 imgage2로 저장하겠다.
shutil.move('../mmm/image2.jpg','../fff/Chicago2.jpg')
#mmm폴더의 image2를 다시 fff폴더에 Chicago2이름으로 이동
--------------------------------------------------------
def order(ih,name='아시아노'):
dd = {'아메리카노': 2000,
'아프리카노': 3000,
'아시아노': 3500}
print(name, ih, dd[name])
order('아이스','아메리카노')
order('핫','아프리카노')
order('아이스')
print('----실습----')
largest = None
smallest = None
while True:
inp = raw_input("Enter a number: ")
if inp == "done" : break
try:
num = float(inp)
except:
print ("Invalid input")
@jennyonjourney
jennyonjourney / gist:804b2c2aaa17722d00572e26c0a6b903
Last active April 1, 2018 03:22
Python - web data crawling
import requests
from lxml.html import parse
from io import StringIO
url='http://finance.naver.com/sise/sise_market_sum.nhn'
text = requests.get(url)
ppp = parse(StringIO(text.text))
doc = ppp.getroot()
tables = doc.findall('.//table')
@jennyonjourney
jennyonjourney / gist:659bd1152fe516bc16c3d63d12c26dd9
Created April 1, 2018 03:44
Python - Web crawling (csv파일로 저장하기)
import requests
from lxml.html import parse
from io import StringIO
def rowData(rr, kind):
cols = rr.findall('.//'+kind)
res = [vv.text_content().replace("\t","").replace("\n","") for vv in cols]
return res[:-1]
def rowWrite(rr):
# developers/facebook.com 으로 접속
import urllib.request as ur
import json
page_name='jtbcnews'
base = "https://graph.facebook.com/v2.8"
app_id = "200920440387013"
app_secret = "daccef14d5cd41c0e95060d65e66c41d"
access_token = app_id+"|"+app_secret
import urllib.request as ur
import json
page_name='MBC'
base="https://graph.facebook.com/v2.8/"
page_id='240263402699918'
from_date='2018-01-01'