Created
September 27, 2024 20:51
-
-
Save MarkPryceMaherMSFT/ccdca6206b71f8e2b63f49794c4b2bd6 to your computer and use it in GitHub Desktop.
Test how long it tasks to refresh data in the SQL Endpoint
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
import struct | |
import sqlalchemy | |
import pyodbc | |
import notebookutils | |
import sempy.fabric as fabric | |
import time | |
from pyspark.sql import functions as fn | |
from datetime import datetime | |
def create_engine(connection_string : str): | |
token = notebookutils.credentials.getToken('https://analysis.windows.net/powerbi/api').encode("UTF-16-LE") | |
token_struct = struct.pack(f'<I{len(token)}s', len(token), token) | |
SQL_COPT_SS_ACCESS_TOKEN = 1256 | |
return sqlalchemy.create_engine("mssql+pyodbc://", creator=lambda: pyodbc.connect(connection_string, attrs_before={SQL_COPT_SS_ACCESS_TOKEN: token_struct})) | |
tenant_id=spark.conf.get("trident.tenant.id") | |
workspace_id=spark.conf.get("trident.workspace.id") | |
lakehouse_id=spark.conf.get("trident.lakehouse.id") | |
lakehouse_name=spark.conf.get("trident.lakehouse.name") | |
sql_endpoint= fabric.FabricRestClient().get(f"/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}").json()['properties']['sqlEndpointProperties']['connectionString'] | |
connection_string = f"Driver={{ODBC Driver 18 for SQL Server}};Server={sql_endpoint},1433;Encrypt=Yes;TrustServerCertificate=No" | |
print (f"connection_string={connection_string}") | |
engine = create_engine(connection_string) | |
#df = pd.read_sql_query("SELECT DB_NAME() AS [Current Database]; ", engine) | |
#display(df) | |
lh = "demolake" | |
tablename = "part2" | |
schema = "dbo" | |
sparkrc = 0 | |
sqlrc= 1 | |
i=1 | |
now = datetime.now() | |
print("now =", now) | |
while sparkrc != sqlrc : | |
print(i) # Print the current value of i | |
i += 1 | |
try: | |
dfs = pd.read_sql_query("SELECT DB_NAME() AS [Current Database]; ", engine) | |
sparkquery = f"select count(*) as row_count from {lh}.{tablename}" | |
sqlquery = f"select count(*) as row_count from {schema}.{tablename}" | |
df = spark.sql(sparkquery) | |
dfx = pd.read_sql_query(sqlquery, engine) | |
df_pandas = df.toPandas() | |
sparkrc = df_pandas['row_count'].values[0] | |
sqlrc = dfx['row_count'].values[0] | |
print(f"Spark table count {sparkrc} SQL count {sqlrc}") | |
except Exception as e: | |
# handling any other exceptions | |
print("An error occurred:", str(e)) | |
time.sleep(5) | |
now = datetime.now() | |
print("now =", now) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment