cursorai.action.generateInTerminal
import traceback traceback.print_exc()
import re | |
def parse_jobs(file_path): | |
"""Parses the gil.txt file and returns a dictionary of jobs with their dependencies.""" | |
with open(file_path, 'r') as file: | |
content = file.read() | |
# Split the content into job blocks based on insert_job keyword | |
job_blocks = re.split(r'\/\*\s+[\w\s-]+\s+\*\/', content) | |
import java.util.{Calendar, Date} | |
object DateDifferenceChecker { | |
def isOneDayApart(date1: Date, date2: Date, excludeWeekends: Boolean = true): Boolean = { | |
val calendar1 = Calendar.getInstance() | |
val calendar2 = Calendar.getInstance() | |
// Set the calendars to the input dates | |
calendar1.setTime(date1) |
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3" | |
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | |
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 | |
http://maven.apache.org/xsd/assembly-1.1.3.xsd"> | |
<id>custom</id> | |
<formats> | |
<format>jar</format> | |
</formats> | |
<dependencySets> |
import org.apache.spark.sql.SparkSession | |
import org.apache.spark.sql.types.{StructField, StringType, IntegerType, StructType} | |
object Main { | |
def main(args: Array[String]): Unit = { | |
val columns = Map("id" -> "int", "name" -> "string", "age" -> "int") | |
val spark = SparkSession.builder | |
.appName("CSV Loader") | |
.config("spark.master", "local") |
# Here's a Python program that uses the Azure SDK to connect to Azure Kubernetes Service (AKS) and checks for the presence of a namespace, configmap, and secret with specific key-value pairs: | |
# ```python | |
from azure.identity import DefaultAzureCredential | |
from azure.mgmt.containerservice import ContainerServiceClient | |
from kubernetes import client, config | |
from kubernetes.client.rest import ApiException | |
def check_aks_resources(resource_group, cluster_name, subscription_id): | |
# Authenticate with Azure |
import java.nio.file.{Paths, Path} | |
def getCsvFilePath(fileName: String): Option[Path] = { | |
val resourceUrl = Option(getClass.getClassLoader.getResource(fileName)) | |
resourceUrl.map(url => Paths.get(url.toURI)) | |
} | |
// Example usage: | |
val csvFilePath: Option[Path] = getCsvFilePath("data.csv") |
def readTable(spark: SparkSession, tableName: String): DataFrame = { | |
val url = sys.env.getOrElse("DB_URL", "default") | |
val user = sys.env.getOrElse("DB_USERNAME", "default") | |
val password = sys.env.getOrElse("DB_PASSWORD", "default") | |
spark.read | |
.format("jdbc") | |
.option("url", url) | |
.option("dbtable", tableName) | |
.option("user", user) |
az role definition list --name "Azure Kubernetes Service Cluster Admin Role" --query "[].id" -o tsv | |
az role assignment create --assignee-object-id <principal_id> --assignee-principal-type <principal_type> --scope <aks_cluster_id> --role "Azure Kubernetes Service Cluster Admin Role" | |
import yfinance as yf | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from scipy.signal import find_peaks | |
def get_stock_data(ticker, start_date, end_date): | |
stock = yf.Ticker(ticker) | |
data = stock.history(start=start_date, end=end_date) | |
return data['Close'] | |