Skip to content

Instantly share code, notes, and snippets.

/* Before async / await */
function doWhenDataReceived(value){
returnNextElement.next(value);
}
function *createFlow(){
const data = yield fetch("/post/1");
console.log(data);
}
@devamitranjan
devamitranjan / EntropyCalculation.js
Created December 17, 2024 00:50
Calculating the entropy to understand the LCP
console.table(
[...document.images].map((img) => {
const entry = performance.getEntriesByName(img.currentSrc)[0];
const bytes = (entry?.encodedBodySize * 8);
const pixels = (img.width * img.height);
return { src: img.currentSrc, bytes, pixels, entropy: (bytes / pixels) };
})
)
@devamitranjan
devamitranjan / eslint.config.mjs
Created December 7, 2024 17:46
Eslint configuration
import js from "@eslint/js";
import globals from "globals";
import prettier from "eslint-config-prettier";
/** @type {import('eslint').Linter.Config[]} */
export default [
js.configs.recommended,
{
files: ["**/*.js"],
languageOptions: {
@devamitranjan
devamitranjan / package.json
Created June 19, 2024 17:27
package json file for the vite with jsdom
{
"name": "my-package",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"start": "vite",
"start:mock": "VITE_MOCK_SERVER=true vite",
"build": "tsc && vite build",
const getRandomNumberOfTodos = () => {
const todos = [];
for (let i = 0; i < Math.floor(Math.random() * 10); i++) {
todos.push({
id: i,
title: `My Todo - ${i}`,
description: `This is the description of the todo -${i}`
});
}
@devamitranjan
devamitranjan / SentimentAnalysis.py
Last active December 15, 2020 18:01
This is the final version of sentiment analysis on youtube video.
import os
import googleapiclient.discovery
# Fetching the data from Youtube API
def google_api(key,vidId):
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
DEVELOPER_KEY = key
import matplotlib.pyplot as plt
from sklearn.feature_extraction import text
from wordcloud import WordCloud
def generate_word_clouds(df):
allWords = ' '.join([twts for twts in df['Comments']])
wordCloud = WordCloud(stopwords = text.ENGLISH_STOP_WORDS ,width=1000, height=600, random_state=21, max_font_size=110).generate(allWords)
plt.imshow(wordCloud, interpolation="bilinear")
plt.axis('off')
plt.show()
def print_neutral_comments():
print('Printing neutral comments:\n')
sortedDF = df.sort_values(by=['Polarity'])
for i in range(0, sortedDF.shape[0] ):
if( sortedDF['Analysis'][i] == 'Neutral'):
print(str(i+1) + '> '+ sortedDF['Comments'][i])
print()
print_neutral_comments()
def print_negative_comments():
print('Printing negative comments:\n')
sortedDF = df.sort_values(by=['Polarity'])
for i in range(0, sortedDF.shape[0] ):
if( sortedDF['Analysis'][i] == 'Negative'):
print(str(i+1) + '> '+ sortedDF['Comments'][i])
print()
print_negative_comments()
def print_positive_comments():
print('Printing positive comments:\n')
sortedDF = df.sort_values(by=['Polarity'])
for i in range(0, sortedDF.shape[0] ):
if( sortedDF['Analysis'][i] == 'Positive'):
print(str(i+1) + '> '+ sortedDF['Comments'][i])
print()
print_positive_comments()