Last active
February 15, 2024 02:41
-
-
Save hfleitas/69bbc5a63fa6ffeafd4fadefbb974ae1 to your computer and use it in GitHub Desktop.
Lab1-2.kql
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// https://aka.ms/adxinaday | |
.create table logsRaw( | |
Timestamp:datetime, | |
Source:string, | |
Node:string, | |
Level:string, | |
Component:string, | |
ClientRequestId:string, | |
Message:string, | |
Properties:dynamic | |
) | |
logsRaw | |
| count | |
SELECT COUNT() FROM logsRaw | |
explain SELECT MAX(Timestamp) AS MaxTimestamp FROM logsRaw WHERE Level='Error' | |
logsRaw | |
| where Level == "Error" | |
| summarize max(Timestamp) | |
logsRaw | |
| where Level=="Error" | |
| take 10 | |
logsRaw | |
| summarize count() // or: count | |
logsRaw | |
| summarize avg(Timestamp), max(Timestamp) | |
logsRaw | |
| where Properties <>'' | |
| take 10 | |
logsRaw | |
| where Component == "DOWNLOADER" | |
| take 100 | |
| extend originalSize=tolong(Properties.OriginalSize), compressedSize=tolong(Properties.compressedSize), toguid(ClientRequestId) | |
| getschema | |
logsRaw | |
| project Timestamp, ClientRequestId, Level | |
| where Timestamp >= datetime(2014-03-08 01:00) and Timestamp <= datetime(2014-03-08 10:00) | |
| count | |
logsRaw | |
| where Component =='INGESTOR_EXECUTER' | |
| count | |
logsRaw | |
| summarize count() by Component | |
| sort by count_ | |
// As part of the incident investigation, you want to extract format and rowCount from INGESTOR_EXECUTER [sic] component. | |
// Rename the calculated fields to fileFormat and rowCount respectively. | |
// Also, Make Sure Timestamp, fileFormat and rowCount are the first 3 columns. | |
logsRaw | |
| where Component =='INGESTOR_EXECUTER' | |
| project Timestamp, Properties.size, fileFormat=tostring(Properties.format), rowCount=Properties.rowCount | |
| summarize count() by fileFormat | |
| render piechart | |
// | distinct fileFormat | |
logsRaw | |
| where Component =='DATAACCESS' | |
| count | |
logsRaw | |
| where Message contains "ingestion" | |
| summarize count() by Level | |
logsRaw | |
| summarize count() by Level | |
| render piechart | |
logsRaw | |
| where Timestamp >= datetime(2014-03-08 09:50) and Timestamp <= datetime(2014-03-08 10:00) | |
| summarize count() by bin(Timestamp,1ms) | |
| render timechart | |
.create function ManiputatelogsRaw() { | |
logsRaw | |
| where Component in ( | |
'INGESTOR_EXECUTER', | |
'INGESTOR_GATEWAY', | |
'INTEGRATIONDATABASE', | |
'INTEGRATIONSERVICEFLOWS', | |
'INTEGRATIONSERVICETRACE', | |
'DOWNLOADER') | |
} | |
.create table ingestionLogs ( | |
Timestamp: datetime, | |
Source: string, | |
Node: string, | |
Level: string, | |
Component: string, | |
ClientRequestId: string, | |
Message: string, | |
Properties: dynamic) | |
.alter table ingestionLogs policy update @'[{ "IsEnabled": true, "Source": "logsRaw", "Query": "ManiputatelogsRaw()", "IsTransactional": true, "PropagateIngestionProperties": false}]' | |
.show tables details | |
.show table ingestionLogs policy update | |
// Note: execute the below commands one after another => Using operationId(output of each command), | |
//check the status and execute a new command only after the previous one is completed | |
.show operations | |
.show journal | |
.show commands-and-queries | |
// in-query ingest | |
.ingest into table logsRaw ( | |
h'https://logsbenchmark00.blob.core.windows.net/logsbenchmark-onegb/2014/03/08/00/data.csv.gz?sp=rl&st=2022-08-18T00:00:00Z&se=2030-01-01T00:00:00Z&spr=https&sv=2021-06-08&sr=c&sig=5pjOow5An3%2BTs5mZ%2FyosJBPtDvV7%2FXfDO8pLEeeylVc%3D') | |
with (format='csv', | |
creationTime='2014-03-08T00:00:00Z'); | |
.ingest async into table logsRaw ( | |
h'https://logsbenchmark00.blob.core.windows.net/logsbenchmark-onegb/2014/03/08/01/data.csv.gz?sp=rl&st=2022-08-18T00:00:00Z&se=2030-01-01T00:00:00Z&spr=https&sv=2021-06-08&sr=c&sig=5pjOow5An3%2BTs5mZ%2FyosJBPtDvV7%2FXfDO8pLEeeylVc%3D') | |
with (format='csv', | |
creationTime='2014-03-08T01:00:00Z'); | |
.ingest async into table logsRaw ( | |
h'https://logsbenchmark00.blob.core.windows.net/logsbenchmark-onegb/2014/03/08/02/data.csv.gz?sp=rl&st=2022-08-18T00:00:00Z&se=2030-01-01T00:00:00Z&spr=https&sv=2021-06-08&sr=c&sig=5pjOow5An3%2BTs5mZ%2FyosJBPtDvV7%2FXfDO8pLEeeylVc%3D') | |
with (format='csv', | |
creationTime='2014-03-08T02:00:00Z'); | |
.ingest async into table logsRaw ( | |
h'https://logsbenchmark00.blob.core.windows.net/logsbenchmark-onegb/2014/03/08/03/data.csv.gz?sp=rl&st=2022-08-18T00:00:00Z&se=2030-01-01T00:00:00Z&spr=https&sv=2021-06-08&sr=c&sig=5pjOow5An3%2BTs5mZ%2FyosJBPtDvV7%2FXfDO8pLEeeylVc%3D') | |
with (format='csv', | |
creationTime='2014-03-08T03:00:00Z'); | |
.ingest async into table logsRaw ( | |
h'https://logsbenchmark00.blob.core.windows.net/logsbenchmark-onegb/2014/03/08/04/data.csv.gz?sp=rl&st=2022-08-18T00:00:00Z&se=2030-01-01T00:00:00Z&spr=https&sv=2021-06-08&sr=c&sig=5pjOow5An3%2BTs5mZ%2FyosJBPtDvV7%2FXfDO8pLEeeylVc%3D') | |
with (format='csv', | |
creationTime='2014-03-08T04:00:00Z'); | |
.show operations | |
| where OperationId == 'f265ff45-bb4a-4ab1-90c7-16ff0aaefdba' | |
ingestionLogs | |
| count | |
ingestionLogs count / logsRaw count | |
ingestionLogs | |
| count //93648 | |
logsRaw | |
| where ingestion_time() >= ago(1h) | |
| count //1442786 | |
print ratio=todouble(93648)/todouble(1442786) | |
let x=ingestionLogs | |
| count | |
| extend y=1; | |
logsRaw | |
| where ingestion_time() >= ago(1h) | |
| count | |
| extend y=1 | |
| join kind=leftouter x on $left.y==$right.y | |
| extend ratio=todouble(Count1)/todouble(Count) | |
/////////// | |
// Lab 2 // | |
/////////// | |
.alter table ingestionLogs policy retention | |
``` | |
{ | |
"SoftDeletePeriod": "10:12:00:00", | |
"Recoverability": "Enabled" | |
} | |
``` | |
.show queries | |
.show journal | where * has 'ManiputatelogsRaw' | |
| order by EventTimestamp | |
.show commands | |
| where User =='[email protected]' | |
| project CommandType, Text, AuthScheme = tostring(ClientRequestProperties.AuthorizationScheme) | |
| distinct AuthScheme | |
.show table ingestionLogs details | |
| extend CachingPolicy=todynamic(CachingPolicy) | |
| project CachingPolicy.DataHotSpan | |
let LogType = 'Warning'; | |
let TimeBucket = 1m; | |
ingestionLogs | |
| where Level == LogType | |
| summarize count() by bin(Timestamp, TimeBucket) | |
| where Timestamp == '2014-03-08 00:00:00.0000' | |
search 'Exception=System.Timeout' | |
search Message:"Exception" or Message:"Timeout" | |
search in (logsRaw, ingestionLogs) "Exception=System.Timeout" | |
print str="ThreadId:458745723, Machine:Node001, Text: The service is up, Level: Info" | |
| parse-kv str as (Text: string, ThreadId:long, Machine: string) with (pair_delimiter=',', kv_delimiter=':') | |
| project-away str | |
print str="$IngestionCommand table=scaleEvents format=json" | |
| parse-kv str as (Text: string, table:string , format: string) with (pair_delimiter=' ', kv_delimiter='=') | |
| project-away str | |
ingestionLogs | |
| where Component == 'INGESTOR_GATEWAY' | |
| parse-kv Message as (Text: string, table:string , format: string) with (pair_delimiter=' ', kv_delimiter='=') | |
| summarize count() by format | |
let TimeBuckets = 1m; | |
ingestionLogs | |
| extend Size = tolong(Properties.size) | |
| make-series ActualSize=round(avg(Size),2) on Timestamp step TimeBuckets | |
| extend anomaly = series_decompose_anomalies(ActualSize, 0.5) | |
// | render anomalychart with(anomalycolumns=anomaly, title='Ingestion Anomalies') | |
| mv-expand anomaly to typeof(int) | |
| where anomaly <> 0 | |
//2014-03-08 04:24:00:000? | |
let TimeBuckets = 1m; | |
ingestionLogs | |
| extend Size = tolong(Properties.size) | |
| make-series ActualSize=round(avg(Size),2) on Timestamp step TimeBuckets | |
| extend anomaly = series_decompose_anomalies(ActualSize) | |
| render anomalychart | |
let _startTime='2014-03-08T00:00:00'; | |
let _endTime='2014-03-08T10:00:00'; | |
let TimeBuckets=1m; | |
ingestionLogs | |
| where Timestamp between (todatetime(_startTime) .. todatetime(_endTime)) | |
// | summarize count() by bin(Timestamp, 10m), Component | |
| make-series MySeries=count() on Timestamp step TimeBuckets | |
| extend anomaly = series_decompose_anomalies(MySeries) | |
| render anomalychart | |
ingestionLogs | |
| where Timestamp between (datetime(2014-03-08T00:00:00) .. datetime(2014-03-08T10:00:00)) | |
| summarize count() by Level | |
| render piechart |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment