Skip to content

Instantly share code, notes, and snippets.

import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
X_b = np.c_[np.ones((100, 1)), X]
theta_best = np.linalg.inv(X_b.T @ X_b) @ (X_b.T @ y)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
np.random.seed(42)
data = {
"Age": np.random.randint(18, 70, 100),
"Salary": np.random.randint(20000, 120000, 100),
"Experience": np.random.randint(1, 40, 100),
00:00.000 --> 00:04.960
It's no secret that open AI is not open based on the English translation of the word.
00:04.960 --> 00:07.880
Most of their tech is not open source and not open to the public.
00:07.880 --> 00:11.000
In fact, to use their AI, the only thing that opens is your wallet.
00:11.000 --> 00:14.440
FROM python:3.12.3
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY app.py .
students = [
[12102130600101, 'Bhavya', 'CS', ['PWP','DAA','CAD'] , [80, 85, 90], [70, 75, 80], [90, 95, 100]],
[12102130600102, 'Dhruv', 'IT', ['PWP','DAA','CAD'] , [75, 80, 85], [65, 70, 75], [85, 90, 95]],
[12102130600103, 'Dhara', 'ECE', ['PWP','DAA','CAD'] , [85, 90, 95], [75, 80, 85], [95, 100, 100]],
[12102130600104, 'Kayur', 'ME', ['PWP','DAA','CAD'] , [70, 75, 80], [60, 65, 70], [80, 85, 90]],
]
student_dict = {}
for student in students:
#include <iostream>
#include <cuda_runtime.h>
const int N = 256; // Matrix size (N x N)
// Kernel function for matrix multiplication
__global__ void matrixMultiply(float* A, float* B, float* C, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include "kernel.h"
#include "kernel.cu"
#include "dev_array.h"
#include <math.h>
#include<stdio.h>
#include<cuda.h>
__global__ void arradd(int *x,int *y, int *z)
{
int id=blockIdx.x * blockDim.x+threadIdx.x;
/* blockIdx.x gives the respective block id which starts from 0 */
/* threadIdx.x gives the respective thread id which starts from 0 */
/* blockDim.x gives the dimension of block i.e. number of threads in one block */
z[id]=x[id]+y[id];
}
@Bhavya031
Bhavya031 / matix.c
Created September 11, 2023 04:36
cuda matix multiplication
#include <stdio.h>
#include <cuda.h>
#define N 3 // Matrix size (3x3)
__global__ void matrixMultiply(int *a, int *b, int *c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
@Bhavya031
Bhavya031 / matix.c
Created September 11, 2023 04:34
cuda matrix multiplication
#include <stdio.h>
// Matrix dimensions
#define N 4
#define M 4
#define P 4
// CUDA kernel for matrix multiplication
__global__ void matrixMul(int *a, int *b, int *c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;