<Additional information about your API call. Try to use verbs that match both request type (fetching vs modifying) and plurality (one vs multiple).>
-
URL
<The URL Structure (path only, no root url)>
-
Method:
def show_coefs(data, classifier_name): | |
cols = data.columns | |
coefficients = pd.concat([pd.DataFrame(cols), pd.DataFrame(np.transpose(classifier_name.coef_))], axis = 1) | |
coefficients.columns = ['name', 'coef'] | |
coefficients['OR'] = np.e**coefficients['coef'] | |
return coefficients.dropna() |
import io | |
import requests | |
import pandas as pd | |
from bokeh.models import ColumnDataSource, HoverTool, ResizeTool, SaveTool | |
from bokeh.models.widgets import TextInput, Button | |
from bokeh.plotting import figure, curdoc | |
from bokeh.layouts import row, widgetbox | |
TICKER = "" |
A curated list of AWS resources to prepare for the AWS Certifications
A curated list of awesome AWS resources you need to prepare for the all 5 AWS Certifications. This gist will include: open source repos, blogs & blogposts, ebooks, PDF, whitepapers, video courses, free lecture, slides, sample test and many other resources.
For more about AWS and AWS Certifications and updates to this Gist you should follow me @leonardofed
AWS region code | AWS region name | Number of AZs | AZ names |
---|---|---|---|
us-east-1 | Virginia | 4 | us-east-1a, us-east-1b, us-east-1c, us-east-1e |
us-west-1 | N. California | 2 | us-west-1a, us-west-1b |
us-west-2 | Oregon | 3 | us-west-2a, us-west-2b, us-west-2c |
eu-west-1 | Ireland | 3 | eu-west-1a, eu-west-1b, eu-west-1c |
eu-central-1 | Frankfurt | 2 | eu-central-1a, eu-central-1b |
ap-southeast-1 | Singapore | 2 | ap-southeast-1a, ap-southeast-1b |
ap-southeast-2 | Sydney | 2 | ap-southeast-2a, ap-southeast-2b, ap-southeast-2c |
ap-northeast-1 | Tokyo | 2 | ap-northeast-1a, ap-nort |
import json | |
import urllib.parse | |
import boto3 | |
#need these to return random string | |
import random | |
import string | |
print('Loading function') |
import pandas as pd | |
from pyspark.sql.types import * | |
#Create Pandas DataFrame | |
pd_person = pd.DataFrame({'PERSONID':'0','LASTNAME':'Doe','FIRSTNAME':'John','ADDRESS':'Museumplein','CITY':'Amsterdam'}, index=[0]) | |
#Create PySpark DataFrame Schema | |
p_schema = StructType([StructField('ADDRESS',StringType(),True),StructField('CITY',StringType(),True),StructField('FIRSTNAME',StringType(),True),StructField('LASTNAME',StringType(),True),StructField('PERSONID',DecimalType(),True)]) | |
#Create Spark DataFrame from Pandas |