This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# train the model on the training set | |
gboost.fit(X_train, y_train) | |
# make class predictions for the testing set | |
y_pred_class = gboost.predict(X_test) | |
# IMPORTANT: first argument is true values, second argument is predicted values | |
print(metrics.confusion_matrix(y_test, y_pred_class)) | |
binary = np.array([[125, 14], |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
logreg = LogisticRegression() | |
logreg_cv = LogisticRegressionCV() | |
rf = RandomForestClassifier() | |
gboost = GradientBoostingClassifier() | |
svm = SVC(probability=True) | |
knn = KNeighborsClassifier() | |
dt = DecisionTreeClassifier() | |
models = [logreg, logreg_cv, rf, gboost, svm, knn, dt] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Tree-based estimators can be used to compute feature importances, which in turn can be used to discard irrelevant features. | |
clf = RandomForestClassifier(n_estimators=50, max_features='sqrt') | |
clf = clf.fit(train, targets) | |
# Let's have a look at the importance of each feature. | |
features = pd.DataFrame() | |
features['feature'] = train.columns | |
features['importance'] = clf.feature_importances_ | |
# Sorting values by feature importance. |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
wordcloud = WordCloud(background_color='white', mode = "RGB", width = 2000, height=1000).generate(str(postings['name'])) | |
plt.title("Craigslist Used Items Word Cloud") | |
plt.imshow(wordcloud) | |
plt.axis("off") | |
plt.show(); |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Removing all locations with 2 or less items. | |
counts = non_mv.location.value_counts() | |
loc_gt2 = counts[counts > 2] | |
popular_locations = non_mv[non_mv.location.isin(loc_gt2.keys())] | |
plt.figure(figsize=(10,5)) | |
sns.violinplot(x="location", y="price", data=popular_locations, scale="width", inner="stick") | |
plt.show(); |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# vehicles are skewing boxplot too much; all rows at or above 1.8k appear to be motor vehicles. | |
motor_vehicles = postings.loc[postings.price >= 1800.0, :] | |
motor_vehicles.plot.bar('name', 'price', figsize=(9,9)) | |
plt.ylabel("Price") | |
plt.xlabel("Vehicle") | |
plt.show(); |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import scrapy | |
class CraigslistWebscrapingItem(scrapy.Item): | |
name = scrapy.Field() | |
price = scrapy.Field() | |
location = scrapy.Field() | |
date = scrapy.Field() | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#execute Summary Extractor model | |
ml = MonkeyLearn('insert api key here') | |
data = list(nlp_df_sample.iloc[:,7]) | |
model_id = 'ex_94WD2XxD' | |
summary_model_results = ml.extractors.extract(model_id, data, production_model=True) | |
print(summary_model_results.body) | |
#execute Price Extractor model | |
data = list(nlp_df_sample.iloc[:,7]) | |
model_id = 'ex_wNDME4vE' |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
locations_ten_or_more = all_items_df.groupby(['Location']).filter(lambda g: g.Location.value_counts() >= 10) \ | |
.loc[:,['Location','Description', 'Price', 'Title', 'Url']] | |
#checking the number of locations with less than 10 items | |
len_of_locs = len(locations_ten_or_more.groupby("Location").size()) | |
print(f'There are {len_of_locs} cities with 10 items or more.') | |
print('\n') | |
#checking the locations with the most items in this subset | |
print('Locations with the most amount of items in this subset:') |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#facebook marketplace | |
from selenium import webdriver | |
from time import sleep | |
from selenium.webdriver.common.keys import Keys | |
from selenium.webdriver.support.ui import WebDriverWait | |
from selenium.webdriver.common.by import By | |
from selenium.webdriver.support import expected_conditions as EC | |
from pymongo import MongoClient | |
class App: |
NewerOlder