I hereby claim:
- I am Jwpe on github.
- I am jwpe (https://keybase.io/jwpe) on keybase.
- I have a public key whose fingerprint is 006C E8AB C67A CFFE CF3D 7026 ACBF E180 93DD 1B94
To claim this, I am signing this object:
I hereby claim:
To claim this, I am signing this object:
| opening_line = "{}. Marketers should learn to code because {}!" | |
| reasons = ["it will make them more efficient", "it's amazing and fun", | |
| "it will help them to understand the technology that they work with"] | |
| for i, reason in enumerate(reasons): | |
| print opening_line.format(i + 1, reason) |
| # First we 'import' a Python package - a piece of code written for a specific | |
| # job and freely available to install - called BeautifulSoup. This package | |
| # lets us search through the structure of HTML. | |
| # For installation instructions, see the guide to pip at the end of the post. | |
| from bs4 import BeautifulSoup | |
| # Here we add our HTML document. In a more advanced example, we could fetch | |
| # this directly from the web, but here we just copy it into our Python file. | |
| # Notice the triple-quotes around the HTML! | |
| html = """ |
| from flask import Flask, jsonify, abort, g | |
| import flask_nicely | |
| from flask_nicely.errors import NotFound | |
| app = Flask(__name__) | |
| ### With decorator ### |
| # Let's return to our list of leads, but add some additional information | |
| # that we can use to qualify them | |
| leads = [ | |
| {'first_name': 'Paul', 'last_name': 'McCartney', 'company': 'Beatles Inc.', | |
| 'title': 'Chief Marketing Officer', 'employees': 150, | |
| 'vertical': 'music'}, | |
| {'first_name': 'Nina', 'last_name': 'Simone' , 'company': 'Jazz & Soul', | |
| 'title': 'CEO', 'employees': 350, 'vertical': 'music'}, | |
| {'first_name': 'Michael', 'last_name': 'Jagger', | |
| 'company': 'Rolling Stone Corp', 'title': 'SVP Marketing', |
| <!-- Here are some common elements to look for in a web page's HTML code --> | |
| <!-- The 'description' meta tag gives us a short summary of the page as seen | |
| by search engines --> | |
| <meta name="description" content="A short description of the website that will | |
| show up below the page title in search engines"/> | |
| <!-- The 'keywords' meta tag lists important keywords that the page wants to | |
| rank well for in search engines. Using too many keywords can devalue each | |
| individual keyword --> |
| # First, we construct a list of 'dictionaries', each one a set of information | |
| # about a single lead | |
| leads = [ | |
| {'first_name': 'Paul', 'last_name': 'McCartney', 'company': 'Beatles Inc.', | |
| 'title': 'Chief Marketing Officer'}, | |
| {'first_name': 'Nina', 'last_name': 'Simone' , 'company': 'Jazz & Soul', | |
| 'title': 'CEO' }, | |
| {'first_name': 'Michael', 'last_name': 'Jagger', | |
| 'company': 'Rolling Stone Corp', 'title': 'SVP Marketing'}, | |
| ] |
| Handlebars.registerHelper 'exists', (value, options) -> | |
| if value? then options.fn(@) else options.inverse(@) |
| { | |
| "cmd": ["coffee","--compile", "--output", "${file_path/coffee/js/}", "$file"], | |
| "file_regex": "^(...*?):([0-9]*):?([0-9]*)", | |
| "selector": "source.coffee" | |
| } |
| sudo apt-get install python-software-properties python g++ make | |
| sudo add-apt-repository ppa:chris-lea/node.js | |
| sudo apt-get update | |
| sudo apt-get install nodejs | |
| sudo npm install -g coffee-script |