Skip to content

Instantly share code, notes, and snippets.

@okumin
Last active February 8, 2025 02:33
Show Gist options
  • Save okumin/12c7a5a2ed770459fb398ec3b31e3754 to your computer and use it in GitHub Desktop.
Save okumin/12c7a5a2ed770459fb398ec3b31e3754 to your computer and use it in GitHub Desktop.
% diff --ignore-space-change ~/Downloads/hive_metastore.thrift-313 ~/Downloads/hive_metastore.thrift-400
31a32,35
> const byte ACCESSTYPE_NONE = 1;
> const byte ACCESSTYPE_READONLY = 2;
> const byte ACCESSTYPE_WRITEONLY = 4;
> const byte ACCESSTYPE_READWRITE = 8;
43a48,55
> // Key-value store to be used with selected
> // Metastore APIs (create, alter methods).
> // The client can pass environment properties / configs that can be
> // accessed in hooks.
> struct EnvironmentContext {
> 1: map<string, string> properties
> }
>
120a133,141
> struct SQLAllTableConstraints {
> 1: optional list<SQLPrimaryKey> primaryKeys,
> 2: optional list<SQLForeignKey> foreignKeys,
> 3: optional list<SQLUniqueConstraint> uniqueConstraints,
> 4: optional list<SQLNotNullConstraint> notNullConstraints,
> 5: optional list<SQLDefaultConstraint> defaultConstraints,
> 6: optional list<SQLCheckConstraint> checkConstraints
> }
>
133a155
> DATACONNECTOR = 6,
144a167,168
> const string HIVE_FILTER_FIELD_TABLE_NAME = "hive_filter_field_tableName__"
> const string HIVE_FILTER_FIELD_TABLE_TYPE = "hive_filter_field_tableType__"
145a170,185
> struct PropertySetRequest {
> 1: required string nameSpace;
> 2: map<string, string> propertyMap;
> }
>
> struct PropertyGetRequest {
> 1: required string nameSpace;
> 2: string mapPrefix;
> 3: optional string mapPredicate;
> 4: optional list<string> mapSelection;
> }
>
> struct PropertyGetResponse {
> 1: map<string, map<string , string>> properties;
> }
>
173a214
> EXCL_WRITE = 4,
178a220,221
> REBALANCE = 3,
> ABORT_TXN_CLEANUP = 4,
235a279,283
> enum DatabaseType {
> NATIVE = 1,
> REMOTE = 2
> }
>
280a329,340
> struct TruncateTableRequest {
> 1: required string dbName,
> 2: required string tableName,
> 3: optional list<string> partNames,
> 4: optional i64 writeId=-1,
> 5: optional string validWriteIdList,
> 6: optional EnvironmentContext environmentContext
> }
>
> struct TruncateTableResponse {
> }
>
332c392
< 3: string locationUri // default storage location. When databases are created in
---
> 3: string locationUri, // default storage location. When databases are created in
334a395
> 4: optional i32 createTime // creation time of catalog in seconds since epoch
371c432,437
< 8: optional string catalogName
---
> 8: optional string catalogName,
> 9: optional i32 createTime, // creation time of database in seconds since epoch
> 10: optional string managedLocationUri, // directory for managed tables
> 11: optional DatabaseType type,
> 12: optional string connector_name,
> 13: optional string remote_dbname
414,433c480,487
< // table information
< struct Table {
< 1: string tableName, // name of the table
< 2: string dbName, // database name ('default')
< 3: string owner, // owner of this table
< 4: i32 createTime, // creation time of the table
< 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
< 6: i32 retention, // retention time
< 7: StorageDescriptor sd, // storage descriptor of the table
< 8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
< 9: map<string, string> parameters, // to store comments or any other user level parameters
< 10: string viewOriginalText, // original view text, null for non-view
< 11: string viewExpandedText, // expanded view text, null for non-view
< 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
< 13: optional PrincipalPrivilegeSet privileges,
< 14: optional bool temporary=false,
< 15: optional bool rewriteEnabled, // rewrite enabled or not
< 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation
< 17: optional string catName, // Name of the catalog the table is in
< 18: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility)
---
> struct CreationMetadata {
> 1: required string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: required set<string> tablesUsed,
> 5: optional string validTxnList,
> 6: optional i64 materializationTime,
> 7: optional list<SourceTable> sourceTables
436,474d489
< struct Partition {
< 1: list<string> values // string value is converted to appropriate partition key type
< 2: string dbName,
< 3: string tableName,
< 4: i32 createTime,
< 5: i32 lastAccessTime,
< 6: StorageDescriptor sd,
< 7: map<string, string> parameters,
< 8: optional PrincipalPrivilegeSet privileges,
< 9: optional string catName
< }
<
< struct PartitionWithoutSD {
< 1: list<string> values // string value is converted to appropriate partition key type
< 2: i32 createTime,
< 3: i32 lastAccessTime,
< 4: string relativePath,
< 5: map<string, string> parameters,
< 6: optional PrincipalPrivilegeSet privileges
< }
<
< struct PartitionSpecWithSharedSD {
< 1: list<PartitionWithoutSD> partitions,
< 2: StorageDescriptor sd,
< }
<
< struct PartitionListComposingSpec {
< 1: list<Partition> partitions
< }
<
< struct PartitionSpec {
< 1: string dbName,
< 2: string tableName,
< 3: string rootPath,
< 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
< 5: optional PartitionListComposingSpec partitionList,
< 6: optional string catName
< }
<
488c503,504
< 5: optional binary bitVectors
---
> 5: optional binary bitVectors,
> 6: optional binary histogram
496c512,513
< 5: optional binary bitVectors
---
> 5: optional binary bitVectors,
> 6: optional binary histogram
525c542,543
< 5: optional binary bitVectors
---
> 5: optional binary bitVectors,
> 6: optional binary histogram
537c555,556
< 5: optional binary bitVectors
---
> 5: optional binary bitVectors,
> 6: optional binary histogram
539a559,571
> struct Timestamp {
> 1: required i64 secondsSinceEpoch
> }
>
> struct TimestampColumnStatsData {
> 1: optional Timestamp lowValue,
> 2: optional Timestamp highValue,
> 3: required i64 numNulls,
> 4: required i64 numDVs,
> 5: optional binary bitVectors,
> 6: optional binary histogram
> }
>
547c579,580
< 7: DateColumnStatsData dateStats
---
> 7: DateColumnStatsData dateStats,
> 8: TimestampColumnStatsData timestampStats
567c600,603
< 2: required list<ColumnStatisticsObj> statsObj;
---
> 2: required list<ColumnStatisticsObj> statsObj,
> 3: optional bool isStatsCompliant, // Are the stats isolation-level-compliant with the
> // the calling query?
> 4: optional string engine = "hive"
569a606,716
> // FileMetadata represents the table-level (in case of unpartitioned) or partition-level
> // file metadata. Each partition could have more than 1 files and hence the list of
> // binary data field. Each value in data field corresponds to metadata for one file.
> struct FileMetadata {
> // current supported type mappings are
> // 1 -> IMPALA
> 1: byte type = 1
> 2: byte version = 1
> 3: list<binary> data
> }
>
> // this field can be used to store repeatitive information
> // (like network addresses in filemetadata). Instead of
> // sending the same object repeatedly, we can send the indices
> // corresponding to the object in this list.
> struct ObjectDictionary {
> // the key can be used to determine the object type
> // the value is the list of the objects which can be accessed
> // using their indices. These indices can be used to send instead of
> // full object which can reduce the payload significantly in case of
> // repetitive objects.
> 1: required map<string, list<binary>> values
> }
>
> // table information
> struct Table {
> 1: string tableName, // name of the table
> 2: string dbName, // database name ('default')
> 3: string owner, // owner of this table
> 4: i32 createTime, // creation time of the table
> 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
> 6: i32 retention, // retention time
> 7: StorageDescriptor sd, // storage descriptor of the table
> 8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
> 9: map<string, string> parameters, // to store comments or any other user level parameters
> 10: string viewOriginalText, // original view text, null for non-view
> 11: string viewExpandedText, // expanded view text, null for non-view
> 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
> 13: optional PrincipalPrivilegeSet privileges,
> 14: optional bool temporary=false,
> 15: optional bool rewriteEnabled, // rewrite enabled or not
> 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation
> 17: optional string catName, // Name of the catalog the table is in
> 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
> 19: optional i64 writeId=-1,
> 20: optional bool isStatsCompliant,
> 21: optional ColumnStatistics colStats, // column statistics for table
> 22: optional byte accessType,
> 23: optional list<string> requiredReadCapabilities,
> 24: optional list<string> requiredWriteCapabilities
> 25: optional i64 id, // id of the table. It will be ignored if set. It's only for
> // read purposes
> 26: optional FileMetadata fileMetadata, // optional serialized file-metadata for this table
> // for certain execution engines
> 27: optional ObjectDictionary dictionary,
> 28: optional i64 txnId, // txnId associated with the table creation
> }
>
> struct SourceTable {
> 1: required Table table,
> 2: required i64 insertedCount,
> 3: required i64 updatedCount,
> 4: required i64 deletedCount
> }
>
> struct Partition {
> 1: list<string> values // string value is converted to appropriate partition key type
> 2: string dbName,
> 3: string tableName,
> 4: i32 createTime,
> 5: i32 lastAccessTime,
> 6: StorageDescriptor sd,
> 7: map<string, string> parameters,
> 8: optional PrincipalPrivilegeSet privileges,
> 9: optional string catName,
> 10: optional i64 writeId=-1,
> 11: optional bool isStatsCompliant,
> 12: optional ColumnStatistics colStats, // column statistics for partition
> 13: optional FileMetadata fileMetadata // optional serialized file-metadata useful for certain execution engines
> }
>
> struct PartitionWithoutSD {
> 1: list<string> values // string value is converted to appropriate partition key type
> 2: i32 createTime,
> 3: i32 lastAccessTime,
> 4: string relativePath,
> 5: map<string, string> parameters,
> 6: optional PrincipalPrivilegeSet privileges
> }
>
> struct PartitionSpecWithSharedSD {
> 1: list<PartitionWithoutSD> partitions,
> 2: StorageDescriptor sd,
> }
>
>
> struct PartitionListComposingSpec {
> 1: list<Partition> partitions
> }
>
> struct PartitionSpec {
> 1: string dbName,
> 2: string tableName,
> 3: string rootPath,
> 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
> 5: optional PartitionListComposingSpec partitionList,
> 6: optional string catName,
> 7: optional i64 writeId=-1,
> 8: optional bool isStatsCompliant
> }
>
572c719,720
< 2: required i64 partsFound // number of partitions for which stats were found
---
> 2: required i64 partsFound, // number of partitions for which stats were found
> 3: optional bool isStatsCompliant
577c725,728
< 2: optional bool needMerge //stats need to be merged with the existing stats
---
> 2: optional bool needMerge, //stats need to be merged with the existing stats
> 3: optional i64 writeId=-1, // writeId for the current query that updates the stats
> 4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
> 5: optional string engine = "hive" //engine creating the current request
579a731,734
> struct SetPartitionsStatsResponse {
> 1: required bool result;
> }
>
587,594d741
< // Key-value store to be used with selected
< // Metastore APIs (create, alter methods).
< // The client can pass environment properties / configs that can be
< // accessed in hooks.
< struct EnvironmentContext {
< 1: map<string, string> properties
< }
<
598c745,747
< 3: optional string catName
---
> 3: optional string catName,
> 4: optional string validWriteIdList,
> 5: optional i64 tableId=-1
609,610c758,761
< 4: string foreign_tbl_name
< 5: optional string catName // No cross catalog constraints
---
> 4: string foreign_tbl_name,
> 5: optional string catName, // No cross catalog constraints
> 6: optional string validWriteIdList,
> 7: optional i64 tableId=-1
620a772,773
> 4: optional string validWriteIdList,
> 5: optional i64 tableId=-1
630a784,785
> 4: optional string validWriteIdList,
> 5: optional i64 tableId=-1
640c795,797
< 3: required string tbl_name
---
> 3: required string tbl_name,
> 4: optional string validWriteIdList,
> 5: optional i64 tableId=-1
650c807,809
< 3: required string tbl_name
---
> 3: required string tbl_name,
> 4: optional string validWriteIdList,
> 5: optional i64 tableId=-1
656a816,822
> struct AllTableConstraintsRequest {
> 1: required string dbName,
> 2: required string tblName,
> 3: required string catName,
> 4: optional string validWriteIdList,
> 5: optional i64 tableId=-1
> }
657a824,827
> struct AllTableConstraintsResponse {
> 1: required SQLAllTableConstraints allTableConstraints
> }
>
695a866,872
> // Return type for get_partitions_spec_by_expr
> struct PartitionsSpecByExprResult {
> 1: required list<PartitionSpec> partitionsSpec,
> // Whether the results has any (currently, all) partitions which may or may not match
> 2: required bool hasUnknownPartitions
> }
>
701,702c878,885
< 5: optional i16 maxParts=-1
< 6: optional string catName
---
> 5: optional i16 maxParts=-1,
> 6: optional string catName,
> 7: optional string order
> 8: optional string validWriteIdList,
> 9: optional i64 id=-1, // table id
> 10: optional bool skipColumnSchemaForPartition,
> 11: optional string includeParamKeyPattern,
> 12: optional string excludeParamKeyPattern
706c889,890
< 1: required list<ColumnStatisticsObj> tableStats
---
> 1: required list<ColumnStatisticsObj> tableStats,
> 2: optional bool isStatsCompliant
710c894,895
< 1: required map<string, list<ColumnStatisticsObj>> partStats
---
> 1: required map<string, list<ColumnStatisticsObj>> partStats,
> 2: optional bool isStatsCompliant
717c902,905
< 4: optional string catName
---
> 4: optional string catName,
> 5: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
> 6: optional string engine = "hive", //engine creating the current request
> 7: optional i64 id=-1 // table id
725c913,915
< 5: optional string catName
---
> 5: optional string catName,
> 6: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
> 7: optional string engine = "hive" //engine creating the current request
730a921,922
> 2: optional bool isStatsCompliant,
> 3: optional list<FieldSchema> partitionColSchema
740c932,935
< 6: optional string catName
---
> 6: optional string catName,
> 7: optional string validWriteIdList,
> 8: optional bool skipColumnSchemaForPartition,
> 9: optional list<FieldSchema> partitionColSchema
769c964,965
< 9: optional string catName
---
> 9: optional string catName,
> 10: optional bool skipColumnSchemaForPartition
781c977,978
< 9: optional string catName
---
> 9: optional string catName,
> 10: optional string validWriteIdList
791a989,1020
> struct GetPartitionsByNamesRequest {
> 1: required string db_name,
> 2: required string tbl_name,
> 3: optional list<string> names,
> 4: optional bool get_col_stats,
> 5: optional list<string> processorCapabilities,
> 6: optional string processorIdentifier,
> 7: optional string engine = "hive",
> 8: optional string validWriteIdList,
> 9: optional bool getFileMetadata,
> 10: optional i64 id=-1, // table id
> 11: optional bool skipColumnSchemaForPartition,
> 12: optional string includeParamKeyPattern,
> 13: optional string excludeParamKeyPattern
> }
>
> struct GetPartitionsByNamesResult {
> 1: required list<Partition> partitions
> 2: optional ObjectDictionary dictionary
> }
>
> struct DataConnector {
> 1: string name,
> 2: string type,
> 3: string url,
> 4: optional string description,
> 5: optional map<string,string> parameters,
> 6: optional string ownerName,
> 7: optional PrincipalType ownerType,
> 8: optional i32 createTime
> }
>
801a1031,1047
> enum TxnType {
> DEFAULT = 0,
> REPL_CREATED = 1,
> READ_ONLY = 2,
> COMPACTION = 3,
> MATER_VIEW_REBUILD = 4,
> SOFT_DELETE = 5,
> REBALANCE_COMPACTION = 6
> }
>
> // specifies which info to return with GetTablesExtRequest
> enum GetTablesExtRequestFields {
> ACCESS_TYPE = 1, // return accessType
> PROCESSOR_CAPABILITIES = 2, // return ALL Capabilities for each Tables
> ALL = 2147483647
> }
>
851a1098
> 7: optional TxnType txn_type = TxnType.DEFAULT,
860a1108,1109
> 3: optional TxnType txn_type,
> 4: optional i64 errorCode,
864a1114
> 2: optional i64 errorCode,
866a1117,1147
> struct CommitTxnKeyValue {
> 1: required i64 tableId,
> 2: required string key,
> 3: required string value,
> }
>
> struct WriteEventInfo {
> 1: required i64 writeId,
> 2: required string database,
> 3: required string table,
> 4: required string files,
> 5: optional string partition,
> 6: optional string tableObj, // repl txn task does not need table object for commit
> 7: optional string partitionObj,
> }
>
> struct ReplLastIdInfo {
> 1: required string database,
> 2: required i64 lastReplId,
> 3: optional string table,
> 4: optional string catalog,
> 5: optional list<string> partitionList,
> }
>
> struct UpdateTransactionalStatsRequest {
> 1: required i64 tableId,
> 2: required i64 insertCount,
> 3: required i64 updatedCount,
> 4: required i64 deletedCount,
> }
>
869a1151,1158
> // Information related to write operations done in this transaction.
> 3: optional list<WriteEventInfo> writeEventInfos,
> // Information to update the last repl id of table/partition along with commit txn (replication from 2.6 to 3.0)
> 4: optional ReplLastIdInfo replLastIdInfo,
> // An optional key/value to store atomically with the transaction
> 5: optional CommitTxnKeyValue keyValue,
> 6: optional bool exclWriteEnabled = true,
> 7: optional TxnType txn_type,
884c1173,1174
< 2: required string validTxnList, // Valid txn list string wrt the current txn of the caller
---
> 2: optional string validTxnList, // Valid txn list string wrt the current txn of the caller
> 3: optional i64 writeId, //write id to be used to get the current txn id
900a1191,1196
> // Map for allocated write id against the txn for which it is allocated
> struct TxnToWriteId {
> 1: required i64 txnId,
> 2: required i64 writeId,
> }
>
910a1207,1209
> // If false, reuse previously allocate writeIds for txnIds. If true, remove older txnId to writeIds mappings
> // and regenerate (this is useful during re-compilation when we need to ensure writeIds are regenerated)
> 6: optional bool reallocate = false;
913,918d1211
< // Map for allocated write id against the txn for which it is allocated
< struct TxnToWriteId {
< 1: required i64 txnId,
< 2: required i64 writeId,
< }
<
922a1216,1231
> struct MaxAllocatedTableWriteIdRequest {
> 1: required string dbName,
> 2: required string tableName,
> }
> struct MaxAllocatedTableWriteIdResponse {
> 1: required i64 maxWriteId,
> }
> struct SeedTableWriteIdsRequest {
> 1: required string dbName,
> 2: required string tableName,
> 3: required i64 seedWriteId,
> }
> struct SeedTxnIdRequest {
> 1: required i64 seedTxnId,
> }
>
939a1249,1251
> 6: optional bool zeroWaitReadEnabled = false,
> 7: optional bool exclusiveCTAS = false,
> 8: optional bool locklessReadsEnabled = false
944a1257
> 3: optional string errorMessage
961a1275
> 5: optional i64 txnid,
1003,1007c1317,1321
< 1: required string dbname,
< 2: required string tablename,
< 3: optional string partitionname,
< 4: required CompactionType type,
< 5: optional string runas,
---
> 1: required string dbname
> 2: required string tablename
> 3: optional string partitionname
> 4: required CompactionType type
> 5: optional string runas
1008a1323,1327
> 7: optional string initiatorId
> 8: optional string initiatorVersion
> 9: optional string poolName
> 10: optional i32 numberOfBuckets
> 11: optional string orderByClause;
1010a1330,1382
> struct CompactionInfoStruct {
> 1: required i64 id,
> 2: required string dbname,
> 3: required string tablename,
> 4: optional string partitionname,
> 5: required CompactionType type,
> 6: optional string runas,
> 7: optional string properties,
> 8: optional bool toomanyaborts,
> 9: optional string state,
> 10: optional string workerId,
> 11: optional i64 start,
> 12: optional i64 highestWriteId,
> 13: optional string errorMessage,
> 14: optional bool hasoldabort,
> 15: optional i64 enqueueTime,
> 16: optional i64 retryRetention,
> 17: optional string poolname
> 18: optional i32 numberOfBuckets
> 19: optional string orderByClause;
> }
>
> struct OptionalCompactionInfoStruct {
> 1: optional CompactionInfoStruct ci,
> }
>
> enum CompactionMetricsMetricType {
> NUM_OBSOLETE_DELTAS,
> NUM_DELTAS,
> NUM_SMALL_DELTAS,
> }
>
> struct CompactionMetricsDataStruct {
> 1: required string dbname
> 2: required string tblname
> 3: optional string partitionname
> 4: required CompactionMetricsMetricType type
> 5: required i32 metricvalue
> 6: required i32 version
> 7: required i32 threshold
> }
>
> struct CompactionMetricsDataResponse {
> 1: optional CompactionMetricsDataStruct data
> }
>
> struct CompactionMetricsDataRequest {
> 1: required string dbName,
> 2: required string tblName,
> 3: optional string partitionName
> 4: required CompactionMetricsMetricType type
> }
>
1014c1386,1387
< 3: required bool accepted
---
> 3: required bool accepted,
> 4: optional string errormessage
1017a1391,1399
> 1: optional i64 id,
> 2: optional string poolName,
> 3: optional string dbName,
> 4: optional string tbName,
> 5: optional string partName,
> 6: optional CompactionType type,
> 7: optional string state,
> 8: optional i64 limit,
> 9: optional string order
1033a1416,1427
> 14: optional string errorMessage,
> 15: optional i64 enqueueTime,
> 16: optional string workerVersion,
> 17: optional string initiatorId,
> 18: optional string initiatorVersion,
> 19: optional i64 cleanerStart,
> 20: optional string poolName,
> 21: optional i64 nextTxnId,
> 22: optional i64 txnId,
> 23: optional i64 commitTime,
> 24: optional i64 hightestWriteId
>
1039a1434,1466
> struct AbortCompactionRequest {
> 1: required list<i64> compactionIds,
> 2: optional string type,
> 3: optional string poolName
> }
>
> struct AbortCompactionResponseElement {
> 1: required i64 compactionId,
> 2: optional string status,
> 3: optional string message
> }
>
> struct AbortCompactResponse {
> 1: required map<i64, AbortCompactionResponseElement> abortedcompacts,
> }
>
> struct GetLatestCommittedCompactionInfoRequest {
> 1: required string dbname,
> 2: required string tablename,
> 3: optional list<string> partitionnames,
> 4: optional i64 lastCompactionId,
> }
>
> struct GetLatestCommittedCompactionInfoResponse {
> 1: required list<CompactionInfoStruct> compactions,
> }
>
> struct FindNextCompactRequest {
> 1: optional string workerId,
> 2: optional string workerVersion,
> 3: optional string poolName
> }
>
1058,1065d1484
< struct CreationMetadata {
< 1: required string catName
< 2: required string dbName,
< 3: required string tblName,
< 4: required set<string> tablesUsed,
< 5: optional string validTxnList,
< 6: optional i64 materializationTime
< }
1069a1489,1492
> 3: optional list<string> eventTypeSkipList,
> 4: optional string catName,
> 5: optional string dbName,
> 6: optional list<string> tableNames
1094c1517,1520
< 3: optional string catName
---
> 3: optional string catName,
> 4: optional i64 toEventId,
> 5: optional i64 limit,
> 6: optional list<string> tableNames
1105a1532,1535
> // Used by acid operation to create the sub directory
> 4: optional list<string> subDirectoryList,
> // partition value which was inserted (used in case of bulk insert events)
> 5: optional list<string> partitionVal
1109c1539,1543
< 1: InsertEventRequestData insertData
---
> 1: optional InsertEventRequestData insertData,
> // used to fire insert events on multiple partitions
> 2: optional list<InsertEventRequestData> insertDatas,
> // Identify if it is a refresh or invalidate event
> 3: optional bool refreshEvent
1118a1553
> // ignored if event request data contains multiple insert event datas
1120a1556
> 7: optional map<string, string> tblParams,
1123a1560,1572
> 1: list<i64> eventIds
> }
>
> struct WriteNotificationLogRequest {
> 1: required i64 txnId,
> 2: required i64 writeId,
> 3: required string db,
> 4: required string table,
> 5: required InsertEventRequestData fileInfo,
> 6: optional list<string> partitionVals,
> }
>
> struct WriteNotificationLogResponse {
1126a1576,1586
> struct WriteNotificationLogBatchRequest {
> 1: required string catalog,
> 2: required string db,
> 3: required string table,
> 4: required list<WriteNotificationLogRequest> requestList,
> }
>
> struct WriteNotificationLogBatchResponse {
> // NOP for now, this is just a place holder for future responses
> }
>
1204d1663
<
1208a1668,1703
> /*
> * Generic request API, providing different kinds of filtering and controlling output.
> *
> * The API entry point is get_partitions_with_specs() and getTables, which is based on a single
> * request/response object model.
> *
> * The request defines any filtering that should be done for partitions as well as the list of fields that should be
> * returned (this is called ProjectionSpec). Projection is simply a list of dot separated strings which represent
> * the fields which that be returned. Projection may also include whitelist or blacklist of parameters to include in
> * the partition. When both blacklist and whitelist are present, the blacklist supersedes the
> * whitelist in case of conflicts.
> *
> * Filter spec is the generalization of various types of partition and table filtering. Partitions and tables can be
> * filtered by names, by values or by partition expressions.
> */
>
> struct GetProjectionsSpec {
> // fieldList is a list of dot separated strings which represent the fields which must be returned.
> // Any other field which is not in the fieldList may be unset in the returned partitions (it
> // is up to the implementation to decide whether it chooses to include or exclude such fields).
> // E.g. setting the field list to sd.location, serdeInfo.name, sd.cols.name, sd.cols.type will
> // return partitions which will have location field set in the storage descriptor. Also the serdeInfo
> // in the returned storage descriptor will only have name field set. This applies to multi-valued
> // fields as well like sd.cols, so in the example above only name and type fields will be set for sd.cols.
> // If the fieldList is empty or not present, all the fields will be set
> 1: list<string> fieldList;
> // SQL-92 compliant regex pattern for param keys to be included
> // _ or % wildcards are supported. '_' represent one character and '%' represents 0 or more characters
> // Currently this is unsupported when fetching tables.
> 2: string includeParamKeyPattern;
> // SQL-92 compliant regex pattern for param keys to be excluded
> // _ or % wildcards are supported. '_' represent one character and '%' represents 0 or more characters
> // Current this is unsupported when fetching tables.
> 3: string excludeParamKeyPattern;
> }
>
1213c1708,1714
< 4: optional string catName
---
> 4: optional string catName,
> 6: optional string validWriteIdList,
> 7: optional bool getColumnStats,
> 8: optional list<string> processorCapabilities,
> 9: optional string processorIdentifier,
> 10: optional string engine = "hive",
> 11: optional i64 id=-1 // table id
1217c1718,1719
< 1: required Table table
---
> 1: required Table table,
> 2: optional bool isStatsCompliant
1224c1726,1730
< 4: optional string catName
---
> 4: optional string catName,
> 5: optional list<string> processorCapabilities,
> 6: optional string processorIdentifier,
> 7: optional GetProjectionsSpec projectionSpec,
> 8: optional string tablesPattern
1230a1737,1772
> struct GetTablesExtRequest {
> 1: required string catalog,
> 2: required string database,
> 3: required string tableNamePattern, // table name matching pattern
> 4: required i32 requestedFields, // ORed GetTablesExtRequestFields
> 5: optional i32 limit, // maximum number of tables returned (0=all)
> 6: optional list<string> processorCapabilities, // list of capabilities “possessed” by the client
> 7: optional string processorIdentifier
> }
>
> // response to GetTablesExtRequest call
> struct ExtendedTableInfo {
> 1: required string tblName, // always returned
> 2: optional i32 accessType, // if AccessType set
> 3: optional list<string> requiredReadCapabilities // capabilities required for read access
> 4: optional list<string> requiredWriteCapabilities // capabilities required for write access
> }
>
> struct GetDatabaseRequest {
> 1: optional string name,
> 2: optional string catalogName,
> 3: optional list<string> processorCapabilities,
> 4: optional string processorIdentifier
> }
>
> struct DropDatabaseRequest {
> 1: required string name,
> 2: optional string catalogName,
> 3: required bool ignoreUnknownDb,
> 4: required bool deleteData,
> 5: required bool cascade,
> 6: optional bool softDelete=false,
> 7: optional i64 txnId=0,
> 8: optional bool deleteManagedDir=true
> }
>
1246a1789,1790
> 6: optional string ownerName;
> 7: optional PrincipalType ownerType;
1250a1795
> 2: required bool sourceTablesCompacted;
1270a1816
> 5: optional string ns;
1279a1826
> 8: optional string ns;
1287a1835
> 6: optional string ns;
1297a1846
> 7: optional string ns;
1305a1855
> 6: optional string ns;
1313a1864
> 6: optional string ns;
1318a1870
> 3: optional string ns;
1339a1892
> 1: optional string ns;
1347a1901
> 2: optional string ns;
1354a1909
> 1: optional string ns;
1366a1922
> 6: optional string ns;
1374a1931
> 2: optional string ns;
1383a1941
> 2: optional string ns;
1405a1964
> 3: optional string ns;
1412a1972
> 2: optional string ns;
1436a1997
> 3: optional string ns;
1461a2023
> 5: optional string ns;
1544a2107,2453
> struct CreateTableRequest {
> 1: required Table table,
> 2: optional EnvironmentContext envContext,
> 3: optional list<SQLPrimaryKey> primaryKeys,
> 4: optional list<SQLForeignKey> foreignKeys,
> 5: optional list<SQLUniqueConstraint> uniqueConstraints,
> 6: optional list<SQLNotNullConstraint> notNullConstraints,
> 7: optional list<SQLDefaultConstraint> defaultConstraints,
> 8: optional list<SQLCheckConstraint> checkConstraints,
> 9: optional list<string> processorCapabilities,
> 10: optional string processorIdentifier
> }
>
> struct CreateDatabaseRequest {
> 1: required string databaseName,
> 2: optional string description,
> 3: optional string locationUri,
> 4: optional map<string, string> parameters,
> 5: optional PrincipalPrivilegeSet privileges,
> 6: optional string ownerName,
> 7: optional PrincipalType ownerType,
> 8: optional string catalogName,
> 9: optional i32 createTime,
> 10: optional string managedLocationUri,
> 11: optional string type,
> 12: optional string dataConnectorName
> }
>
> struct CreateDataConnectorRequest {
> 1: DataConnector connector
> }
>
> struct GetDataConnectorRequest {
> 1: required string connectorName
> }
>
> struct ScheduledQueryPollRequest {
> 1: required string clusterNamespace
> }
>
> struct ScheduledQueryKey {
> 1: required string scheduleName,
> 2: required string clusterNamespace,
> }
>
> struct ScheduledQueryPollResponse {
> 1: optional ScheduledQueryKey scheduleKey,
> 2: optional i64 executionId,
> 3: optional string query,
> 4: optional string user,
> }
>
> struct ScheduledQuery {
> 1: required ScheduledQueryKey scheduleKey,
> 2: optional bool enabled,
> 4: optional string schedule,
> 5: optional string user,
> 6: optional string query,
> 7: optional i32 nextExecution,
> }
>
> enum ScheduledQueryMaintenanceRequestType {
> CREATE = 1,
> ALTER = 2,
> DROP = 3,
> }
>
> struct ScheduledQueryMaintenanceRequest {
> 1: required ScheduledQueryMaintenanceRequestType type,
> 2: required ScheduledQuery scheduledQuery,
> }
>
> enum QueryState {
> INITED,
> EXECUTING,
> FAILED,
> FINISHED,
> TIMED_OUT,
> AUTO_DISABLED,
> }
>
> struct ScheduledQueryProgressInfo{
> 1: required i64 scheduledExecutionId,
> 2: required QueryState state,
> 3: required string executorQueryId,
> 4: optional string errorMessage,
> }
>
> struct AlterPartitionsRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tableName,
> 4: required list<Partition> partitions,
> 5: optional EnvironmentContext environmentContext,
> 6: optional i64 writeId=-1,
> 7: optional string validWriteIdList,
> 8: optional bool skipColumnSchemaForPartition,
> 9: optional list<FieldSchema> partitionColSchema
> }
>
> struct AlterPartitionsResponse {
> }
>
> struct RenamePartitionRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tableName,
> 4: required list<string> partVals,
> 5: required Partition newPart,
> 6: optional string validWriteIdList,
> 7: optional i64 txnId, // txnId associated with the rename operation
> 8: optional bool clonePart // non-blocking rename
> }
>
> struct RenamePartitionResponse {
> }
>
> struct AlterTableRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tableName,
> 4: required Table table,
> 5: optional EnvironmentContext environmentContext,
> 6: optional i64 writeId=-1,
> 7: optional string validWriteIdList
> 8: optional list<string> processorCapabilities,
> 9: optional string processorIdentifier,
> 10: optional string expectedParameterKey,
> 11: optional string expectedParameterValue
> // TODO: also add cascade here, out of envCtx
> }
>
> struct AlterTableResponse {
> }
>
> enum PartitionFilterMode {
> BY_NAMES, // filter by names
> BY_VALUES, // filter by values
> BY_EXPR // filter by expression
> }
>
> struct GetPartitionsFilterSpec {
> 7: optional PartitionFilterMode filterMode,
> 8: optional list<string> filters //used as list of partitionNames or list of values or expressions depending on mode
> }
>
> struct GetPartitionsResponse {
> 1: list<PartitionSpec> partitionSpec
> }
>
> struct GetPartitionsRequest {
> 1: optional string catName,
> 2: string dbName,
> 3: string tblName,
> 4: optional bool withAuth,
> 5: optional string user,
> 6: optional list<string> groupNames,
> 7: GetProjectionsSpec projectionSpec
> 8: GetPartitionsFilterSpec filterSpec,
> 9: optional list<string> processorCapabilities,
> 10: optional string processorIdentifier,
> 11: optional string validWriteIdList
> }
>
> struct GetFieldsRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: optional EnvironmentContext envContext,
> 5: optional string validWriteIdList,
> 6: optional i64 id=-1 // table id
> }
>
> struct GetFieldsResponse {
> 1: required list<FieldSchema> fields
> }
>
> struct GetSchemaRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: optional EnvironmentContext envContext,
> 5: optional string validWriteIdList,
> 6: optional i64 id=-1 // table id
> }
>
> struct GetSchemaResponse {
> 1: required list<FieldSchema> fields
> }
>
> struct GetPartitionRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: required list<string> partVals,
> 5: optional string validWriteIdList,
> 6: optional i64 id=-1 // table id
> }
>
> struct GetPartitionResponse {
> 1: required Partition partition
> }
>
> struct PartitionsRequest { // Not using Get prefix as that name is already used for a different method
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: optional i16 maxParts=-1,
> 5: optional string validWriteIdList,
> 6: optional i64 id=-1, // table id
> 7: optional bool skipColumnSchemaForPartition,
> 8: optional string includeParamKeyPattern,
> 9: optional string excludeParamKeyPattern
> }
>
> struct PartitionsResponse { // Not using Get prefix as that name is already used for a different method
> 1: required list<Partition> partitions
> }
>
> struct GetPartitionsByFilterRequest {
> 1: optional string catName,
> 2: string dbName,
> 3: string tblName,
> 4: string filter,
> 5: optional i16 maxParts=-1,
> 6: optional bool skipColumnSchemaForPartition,
> 7: optional string includeParamKeyPattern,
> 8: optional string excludeParamKeyPattern
> }
>
> struct GetPartitionNamesPsRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: optional list<string> partValues,
> 5: optional i16 maxParts=-1,
> 6: optional string validWriteIdList,
> 7: optional i64 id=-1 // table id
> }
>
> struct GetPartitionNamesPsResponse {
> 1: required list<string> names
> }
>
> struct GetPartitionsPsWithAuthRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: optional list<string> partVals,
> 5: optional i16 maxParts=-1,
> 6: optional string userName,
> 7: optional list<string> groupNames,
> 8: optional string validWriteIdList,
> 9: optional i64 id=-1 // table id
> 10: optional bool skipColumnSchemaForPartition,
> 11: optional string includeParamKeyPattern,
> 12: optional string excludeParamKeyPattern,
> 13: optional list<string> partNames;
> }
>
> struct GetPartitionsPsWithAuthResponse {
> 1: required list<Partition> partitions
> }
>
> struct ReplicationMetrics{
> 1: required i64 scheduledExecutionId,
> 2: required string policy,
> 3: required i64 dumpExecutionId,
> 4: optional string metadata,
> 5: optional string progress,
> 6: optional string messageFormat
> }
>
> struct ReplicationMetricList{
> 1: required list<ReplicationMetrics> replicationMetricList,
> }
>
> struct GetReplicationMetricsRequest {
> 1: optional i64 scheduledExecutionId,
> 2: optional string policy,
> 3: optional i64 dumpExecutionId
> }
>
> struct GetOpenTxnsRequest {
> 1: optional list<TxnType> excludeTxnTypes;
> }
>
> struct StoredProcedureRequest {
> 1: required string catName,
> 2: required string dbName,
> 3: required string procName
> }
>
> struct ListStoredProcedureRequest {
> 1: required string catName
> 2: optional string dbName
> }
>
> struct StoredProcedure {
> 1: string name,
> 2: string dbName,
> 3: string catName,
> 4: string ownerName,
> 5: string source
> }
>
> struct AddPackageRequest {
> 1: string catName,
> 2: string dbName,
> 3: string packageName
> 4: string ownerName,
> 5: string header,
> 6: string body
> }
>
> struct GetPackageRequest {
> 1: required string catName,
> 2: required string dbName,
> 3: required string packageName
> }
>
> struct DropPackageRequest {
> 1: required string catName,
> 2: required string dbName,
> 3: required string packageName
> }
>
> struct ListPackageRequest {
> 1: required string catName
> 2: optional string dbName
> }
>
> struct Package {
> 1: string catName,
> 2: string dbName,
> 3: string packageName
> 4: string ownerName,
> 5: string header,
> 6: string body
> }
>
> struct GetAllWriteEventInfoRequest {
> 1: required i64 txnId,
> 2: optional string dbName,
> 3: optional string tableName
> }
>
1607a2517,2523
> exception CompactionAbortedException {
> 1: string message
> }
>
> exception NoSuchCompactionException {
> 1: string message
> }
1612a2529
> AbortCompactResponse abort_Compactions(1: AbortCompactionRequest rqst)
1623a2541
> Database get_database_req(1:GetDatabaseRequest request) throws(1:NoSuchObjectException o1, 2:MetaException o2)
1624a2543
> void drop_database_req(1:DropDatabaseRequest req) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
1628a2548,2553
> void create_dataconnector(1:DataConnector connector) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
> DataConnector get_dataconnector_req(1:GetDataConnectorRequest request) throws(1:NoSuchObjectException o1, 2:MetaException o2)
> void drop_dataconnector(1:string name, 2:bool ifNotExists, 3:bool checkReferences) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
> list<string> get_dataconnectors() throws(1:MetaException o1)
> void alter_dataconnector(1:string name, 2:DataConnector connector) throws(1:MetaException o1, 2:NoSuchObjectException o2)
>
1637,1638c2562,2563
< list<FieldSchema> get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
< list<FieldSchema> get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
---
> list<FieldSchema> get_fields(1:string db_name, 2:string table_name) throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
> list<FieldSchema> get_fields_with_environment_context(1:string db_name, 2:string table_name, 3:EnvironmentContext environment_context) throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
1639a2565,2567
> GetFieldsResponse get_fields_req(1: GetFieldsRequest req)
> throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
>
1641,1642c2569,2570
< list<FieldSchema> get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
< list<FieldSchema> get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
---
> list<FieldSchema> get_schema(1:string db_name, 2:string table_name) throws(1:MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
> list<FieldSchema> get_schema_with_environment_context(1:string db_name, 2:string table_name, 3:EnvironmentContext environment_context) throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
1643a2572,2574
> GetSchemaResponse get_schema_req(1: GetSchemaRequest req)
> throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
>
1664a2596,2598
> void create_table_req(1:CreateTableRequest request) throws (1:AlreadyExistsException o1,
> 2:InvalidObjectException o2, 3:MetaException o3,
> 4:NoSuchObjectException o4)
1679c2613,2614
<
---
> Table translate_table_dryrun(1:CreateTableRequest request)
> throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
1688a2624
> TruncateTableResponse truncate_table_req(1:TruncateTableRequest req) throws(1:MetaException o1)
1690a2627
> list<Table> get_all_materialized_view_objects_for_rewriting() throws (1:MetaException o1)
1698a2636
> list<ExtendedTableInfo> get_tables_ext(1: GetTablesExtRequest req) throws (1: MetaException o1)
1753a2692,2696
> AlterTableResponse alter_table_req(1:AlterTableRequest req)
> throws (1:InvalidOperationException o1, 2:MetaException o2)
>
>
>
1792a2736,2737
> GetPartitionResponse get_partition_req(1: GetPartitionRequest req)
> throws(1:MetaException o1, 2:NoSuchObjectException o2)
1812a2758,2759
> PartitionsResponse get_partitions_req(1:PartitionsRequest req)
> throws(1:NoSuchObjectException o1, 2:MetaException o2)
1834c2781,2782
< list<Partition> get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1,
---
> list<Partition> get_partitions_ps_with_auth(1:string db_name,
> 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1,
1835a2784,2785
> GetPartitionsPsWithAuthResponse get_partitions_ps_with_auth_req(1:GetPartitionsPsWithAuthRequest req)
> throws(1:MetaException o1, 2:NoSuchObjectException o2)
1839a2790,2791
> GetPartitionNamesPsResponse get_partition_names_ps_req(1:GetPartitionNamesPsRequest req)
> throws(1:MetaException o1, 2:NoSuchObjectException o2)
1840a2793,2795
> list<string> get_partition_names_req(1:PartitionsByExprRequest req)
> throws(1:MetaException o1, 2:NoSuchObjectException o2)
>
1845a2801,2803
> list<Partition> get_partitions_by_filter_req(1:GetPartitionsByFilterRequest req)
> throws(1:MetaException o1, 2:NoSuchObjectException o2)
>
1857a2816,2821
> // unlike get_partitions_by_expr, this returns PartitionSpec which contains deduplicated
> // storage descriptor
> PartitionsSpecByExprResult get_partitions_spec_by_expr(1:PartitionsByExprRequest req)
> throws(1:MetaException o1, 2:NoSuchObjectException o2)
>
> // get the partitions matching the given partition filter
1863c2827,2829
< throws(1:MetaException o1, 2:NoSuchObjectException o2)
---
> throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3)
> GetPartitionsByNamesResult get_partitions_by_names_req(1:GetPartitionsByNamesRequest req)
> throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3)
1864a2831,2835
> // retrieve properties
> PropertyGetResponse get_properties(1:PropertyGetRequest req) throws(1:MetaException e1, 2:NoSuchObjectException e2);
> // set properties
> bool set_properties(1:PropertySetRequest req) throws(1:MetaException e1, 2:NoSuchObjectException e2);
>
1874a2846
>
1876a2849,2851
> AlterPartitionsResponse alter_partitions_req(1:AlterPartitionsRequest req)
> throws (1:InvalidOperationException o1, 2:MetaException o2)
>
1887a2863,2865
> RenamePartitionResponse rename_partition_req(1:RenamePartitionRequest req)
> throws (1:InvalidOperationException o1, 2:MetaException o2)
>
1930a2909,2911
> // All table constrains
> AllTableConstraintsResponse get_all_table_constraints(1:AllTableConstraintsRequest request)
> throws(1:MetaException o1, 2:NoSuchObjectException o2)
1942a2924,2931
> SetPartitionsStatsResponse update_table_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1,
> 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
> SetPartitionsStatsResponse update_partition_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1,
> 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
>
> void update_transaction_statistics(1:UpdateTransactionalStatsRequest req) throws (1:MetaException o1)
>
>
1965c2954
< bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws
---
> bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name, 5:string engine) throws
1968c2957
< bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
---
> bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name, 4:string engine) throws
2074a3064
> // Deprecated use get_open_txns_req
2081a3072
> i64 get_latest_txnid_in_conflict(1:i64 txnId) throws (1:MetaException o1)
2084a3076
> void add_write_ids_to_min_history(1:i64 txnId, 2: map<string, i64> writeIds) throws (1:MetaException o2)
2086a3079,3083
> MaxAllocatedTableWriteIdResponse get_max_allocated_table_write_id(1:MaxAllocatedTableWriteIdRequest rqst)
> throws (1:MetaException o1)
> void seed_write_id(1:SeedTableWriteIdsRequest rqst)
> throws (1:MetaException o1)
> void seed_txn_id(1:SeedTxnIdRequest rqst) throws (1:MetaException o1)
2096a3094
> bool submit_for_cleanup(1:CompactionRequest o1, 2:i64 o2, 3:i64 o3) throws (1:MetaException o1)
2097a3096,3108
> // Deprecated, use find_next_compact2()
> OptionalCompactionInfoStruct find_next_compact(1: string workerId) throws(1:MetaException o1)
> OptionalCompactionInfoStruct find_next_compact2(1: FindNextCompactRequest rqst) throws(1:MetaException o1)
> void update_compactor_state(1: CompactionInfoStruct cr, 2: i64 txn_id)
> list<string> find_columns_with_stats(1: CompactionInfoStruct cr)
> void mark_cleaned(1:CompactionInfoStruct cr) throws(1:MetaException o1)
> void mark_compacted(1: CompactionInfoStruct cr) throws(1:MetaException o1)
> void mark_failed(1: CompactionInfoStruct cr) throws(1:MetaException o1)
> void mark_refused(1: CompactionInfoStruct cr) throws(1:MetaException o1)
> bool update_compaction_metrics_data(1: CompactionMetricsDataStruct data) throws(1:MetaException o1)
> void remove_compaction_metrics_data(1: CompactionMetricsDataRequest request) throws(1:MetaException o1)
> void set_hadoop_jobid(1: string jobId, 2: i64 cq_id)
> GetLatestCommittedCompactionInfoResponse get_latest_committed_compaction_info(1:GetLatestCommittedCompactionInfoRequest rqst)
2104a3116,3117
> WriteNotificationLogResponse add_write_notification_log(1:WriteNotificationLogRequest rqst)
> WriteNotificationLogBatchResponse add_write_notification_log_in_batch(1:WriteNotificationLogBatchRequest rqst)
2172c3185
< NoSuchObjectException o2, 3:MetaException o3)
---
> 2: NoSuchObjectException o2, 3:MetaException o3)
2205a3219,3241
>
> // get_partitions with filter and projectspec
> GetPartitionsResponse get_partitions_with_specs(1: GetPartitionsRequest request) throws(1:MetaException o1)
>
> ScheduledQueryPollResponse scheduled_query_poll(1: ScheduledQueryPollRequest request) throws(1:MetaException o1)
> void scheduled_query_maintenance(1: ScheduledQueryMaintenanceRequest request) throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:AlreadyExistsException o3, 4:InvalidInputException o4)
> void scheduled_query_progress(1: ScheduledQueryProgressInfo info) throws(1:MetaException o1, 2: InvalidOperationException o2)
> ScheduledQuery get_scheduled_query(1: ScheduledQueryKey scheduleKey) throws(1:MetaException o1, 2:NoSuchObjectException o2)
>
> void add_replication_metrics(1: ReplicationMetricList replicationMetricList) throws(1:MetaException o1)
> ReplicationMetricList get_replication_metrics(1: GetReplicationMetricsRequest rqst) throws(1:MetaException o1)
> GetOpenTxnsResponse get_open_txns_req(1: GetOpenTxnsRequest getOpenTxnsRequest)
>
> void create_stored_procedure(1: StoredProcedure proc) throws(1:NoSuchObjectException o1, 2:MetaException o2)
> StoredProcedure get_stored_procedure(1: StoredProcedureRequest request) throws (1:MetaException o1, 2:NoSuchObjectException o2)
> void drop_stored_procedure(1: StoredProcedureRequest request) throws (1:MetaException o1)
> list<string> get_all_stored_procedures(1: ListStoredProcedureRequest request) throws (1:MetaException o1)
>
> Package find_package(1: GetPackageRequest request) throws (1:MetaException o1, 2:NoSuchObjectException o2)
> void add_package(1: AddPackageRequest request) throws (1:MetaException o1)
> list<string> get_all_packages(1: ListPackageRequest request) throws (1:MetaException o1)
> void drop_package(1: DropPackageRequest request) throws (1:MetaException o1)
> list<WriteEventInfo> get_all_write_event_info(1: GetAllWriteEventInfoRequest request) throws (1:MetaException o1)
2231a3268
> const string IF_PURGE = "ifPurge",
2242c3279
< const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction",
---
> const string NO_AUTO_COMPACT = "no_auto_compaction",
2244a3282,3289
> const string DRUID_CONFIG_PREFIX = "druid.",
> const string JDBC_CONFIG_PREFIX = "hive.sql.",
> const string TABLE_IS_CTAS = "created_with_ctas",
> const string TABLE_IS_CTLT = "created_with_ctlt",
> const string PARTITION_TRANSFORM_SPEC = "partition_transform_spec",
> const string NO_CLEANUP = "no_cleanup",
> const string CTAS_LEGACY_CONFIG = "create_table_as_external",
> const string DEFAULT_TABLE_TYPE = "defaultTableType",
2245a3291,3297
> // ACID
> const string TXN_ID = "txnId",
> const string WRITE_ID = "writeId",
>
> // Keys for alter table environment context parameters
> const string EXPECTED_PARAMETER_KEY = "expected_parameter_key",
> const string EXPECTED_PARAMETER_VALUE = "expected_parameter_value",
% diff --ignore-space-change ~/Downloads/hive_metastore.thrift-400 ~/Downloads/hive_metastore.thrift-401
935c935,936
< 9: optional list<FieldSchema> partitionColSchema
---
> 9: optional list<FieldSchema> partitionColSchema,
> 10: optional EnvironmentContext environmentContext
967a969,978
> struct DropPartitionRequest {
> 1: optional string catName,
> 2: required string dbName,
> 3: required string tblName,
> 4: optional string partName,
> 5: optional list<string> partVals,
> 6: optional bool deleteData,
> 7: optional EnvironmentContext environmentContext
> }
>
1754a1766,1774
> struct DropTableRequest {
> 1: optional string catalogName,
> 2: required string dbName,
> 3: required string tableName,
> 4: optional bool deleteData,
> 5: optional EnvironmentContext envContext,
> 6: optional bool dropPartitions
> }
>
1761a1782,1786
> struct AlterDatabaseRequest {
> 1: required string oldDbName,
> 2: required Database newDb
> }
>
2131,2132c2156,2158
< 11: optional string type,
< 12: optional string dataConnectorName
---
> 11: optional DatabaseType type,
> 12: optional string dataConnectorName,
> 13: optional string remote_dbname
2136c2162
< 1: DataConnector connector
---
> 1: required DataConnector connector
2142a2169,2179
> struct AlterDataConnectorRequest {
> 1: required string connectorName,
> 2: required DataConnector newConnector
> }
>
> struct DropDataConnectorRequest {
> 1: required string connectorName,
> 2: optional bool ifNotExists,
> 3: optional bool checkReferences
> }
>
2206a2244,2252
> struct AppendPartitionsRequest {
> 1: optional string catalogName,
> 2: required string dbName,
> 3: required string tableName,
> 4: optional string name,
> 5: optional list<string> partVals,
> 6: optional EnvironmentContext environmentContext
> }
>
2539a2586
> void create_database_req(1:CreateDatabaseRequest createDatabaseRequest) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
2546a2594
> void alter_database_req(1:AlterDatabaseRequest alterDbReq) throws(1:MetaException o1, 2:NoSuchObjectException o2)
2548c2596
< void create_dataconnector(1:DataConnector connector) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
---
> void create_dataconnector_req(1:CreateDataConnectorRequest connectorReq) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
2550c2598
< void drop_dataconnector(1:string name, 2:bool ifNotExists, 3:bool checkReferences) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
---
> void drop_dataconnector_req(1:DropDataConnectorRequest dropDcReq) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
2552c2600
< void alter_dataconnector(1:string name, 2:DataConnector connector) throws(1:MetaException o1, 2:NoSuchObjectException o2)
---
> void alter_dataconnector_req(1:AlterDataConnectorRequest alterReq) throws(1:MetaException o1, 2:NoSuchObjectException o2)
2621a2670,2671
> void drop_table_req(1:DropTableRequest dropTableReq)
> throws(1:NoSuchObjectException o1, 2:MetaException o3)
2633,2635d2682
< Table get_table(1:string dbname, 2:string tbl_name)
< throws (1:MetaException o1, 2:NoSuchObjectException o2)
< list<Table> get_table_objects_by_name(1:string dbname, 2:list<string> tbl_names)
2715a2763,2764
> Partition append_partition_req(1:AppendPartitionsRequest appendPartitionsReq)
> throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
2725a2775,2776
> bool drop_partition_req(1:DropPartitionRequest dropPartitionReq)
> throws(1:NoSuchObjectException o1, 2:MetaException o2)
2768c2819,2820
<
---
> list<string> fetch_partition_names_req(1:PartitionsRequest partitionReq)
> throws(1:NoSuchObjectException o1, 2:MetaException o2)
% diff --ignore-space-change src/main/thrift/hive_metastore.thrift ~/Downloads/hive_metastore.thrift-313
1c1,3
< /*
---
> #!/usr/local/bin/thrift -java
>
> /**
19c21,23
< namespace java io.trino.hive.thrift.metastore
---
> #
> # Thrift Service that the MetaStore is built on
> #
20a25,30
> include "share/fb303/if/fb303.thrift"
>
> namespace java org.apache.hadoop.hive.metastore.api
> namespace php metastore
> namespace cpp Apache.Hadoop.Hive
>
423,424c433
< 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
< 19: optional i64 writeId=-1
---
> 18: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility)
436,437c445
< 9: optional string catName,
< 10: optional i64 writeId=-1
---
> 9: optional string catName
567,579d574
< struct AlterPartitionsRequest {
< 1: optional string catName,
< 2: required string dbName,
< 3: required string tableName,
< 4: required list<Partition> partitions,
< 5: optional EnvironmentContext environmentContext,
< 6: optional i64 writeId=-1,
< 7: optional string validWriteIdList
< }
<
< struct AlterPartitionsResponse {
< }
<
583,584d577
< 3: optional i64 writeId=-1, // writeId for the current query that updates the stats
< 4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
587,590d579
< struct SetPartitionsStatsResponse {
< 1: required bool result;
< }
<
1242,1257d1230
< struct AlterTableRequest {
< 1: optional string catName,
< 2: required string dbName,
< 3: required string tableName,
< 4: required Table table,
< 5: optional EnvironmentContext environmentContext,
< 6: optional i64 writeId=-1,
< 7: optional string validWriteIdList
< 8: optional list<string> processorCapabilities,
< 9: optional string processorIdentifier
< // TODO: also add cascade here, out of envCtx
< }
<
< struct AlterTableResponse {
< }
<
1638c1611
< service ThriftHiveMetastore
---
> service ThriftHiveMetastore extends fb303.FacebookService
1781,1783d1753
< AlterTableResponse alter_table_req(1:AlterTableRequest req)
< throws (1:InvalidOperationException o1, 2:MetaException o2)
<
1912,1914d1881
< AlterPartitionsResponse alter_partitions_req(1:AlterPartitionsRequest req)
< throws (1:InvalidOperationException o1, 2:MetaException o2)
<
1976,1980d1942
< SetPartitionsStatsResponse update_table_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1,
< 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
< SetPartitionsStatsResponse update_partition_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1,
< 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
<
2210c2172
< 2: NoSuchObjectException o2, 3:MetaException o3)
---
> NoSuchObjectException o2, 3:MetaException o3)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment