Last active
December 22, 2024 03:01
-
-
Save ebuildy/3de0e2855498e5358e4eed1a4f72ea48 to your computer and use it in GitHub Desktop.
Flatten Spark data frame fields structure, via SQL in Java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Toto | |
{ | |
public void Main() | |
{ | |
final DataFrame source = GetDataFrame(); | |
final String querySelectSQL = flattenSchema(source.schema(), null); | |
source.registerTempTable("source"); | |
final DataFrame flattenData = sqlContext.sql("SELECT " + querySelectSQL + " FROM source") | |
} | |
/** | |
* Generate SQL to select columns as flat. | |
*/ | |
public String flattenSchema(StructType schema, String prefix) | |
{ | |
final StringBuilder selectSQLQuery = new StringBuilder(); | |
for (StructField field : schema.fields()) | |
{ | |
final String fieldName = field.name(); | |
if (fieldName.startsWith("@")) | |
{ | |
continue; | |
} | |
String colName = prefix == null ? fieldName : (prefix + "." + fieldName); | |
String colNameTarget = colName.replace(".", "_"); | |
if (field.dataType().getClass().equals(StructType.class)) | |
{ | |
selectSQLQuery.append(flattenSchema((StructType) field.dataType(), colName)); | |
} | |
else | |
{ | |
selectSQLQuery.append(colName); | |
selectSQLQuery.append(" as "); | |
selectSQLQuery.append(colNameTarget); | |
} | |
selectSQLQuery.append(","); | |
} | |
if (selectSQLQuery.length() > 0) | |
{ | |
selectSQLQuery.deleteCharAt(selectSQLQuery.length() - 1); | |
} | |
return selectSQLQuery.toString(); | |
} | |
} |
This is a bit more succinct Java code to flatten a schema and return the fields as a List:
private List<String> flattenSchema(StructType schema) {
List<String> allFields = new ArrayList<>();
flattenSchemaHelper(schema, null, allFields);
return allFields;
}
private void flattenSchemaHelper(StructType schema, String prefix, List<String> fields) {
for (StructField field : schema.fields()) {
String colName = prefix == null ? field.name() : (prefix + "." + field.name());
if (field.dataType().getClass().equals(StructType.class)) {
flattenSchemaHelper((StructType) field.dataType(), colName, fields);
} else {
fields.add(colName);
}
}
}
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
If I am having array field, like bellow than above code doesn't expand fields, so what should i need to change??
this is my schema:
root
|-- personalInfo: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- studentId: integer (nullable = true)
| | |-- studentName: string (nullable = true)
| | |-- studentAddress: array (nullable = true)
| | | |-- element: struct (containsNull = true)
| | | | |-- streetName: string (nullable = true)
| | | | |-- city: string (nullable = true)
| | |-- studentContactNo: integer (nullable = true)
| | |-- studentBranch: string (nullable = true)
|-- gradeInfo: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- studentId: integer (nullable = true)
| | |-- studentSemester: string (nullable = true)
| | |-- studentCPI: double (nullable = true)
output of above code is :
personalInfo as personalInfo,gradeInfo as gradeInfo
+--------------------+---------------+
| personalInfo| gradeInfo|
+--------------------+---------------+
|[[1, dhara, [[TP-...|[[1, 3rd, 8.0]]|
|[[1, dhara, [[TP-...|[[1, 3rd, 8.7]]|
|[[2, Ankita, [[TP...|[[2, 3rd, 7.0]]|
|[[2, Ankita, [[TP...|[[2, 3rd, 7.7]]|
|[[3, Shreya, [[TP...|[[3, 3rd, 6.0]]|
|[[3, Shreya, [[TP...|[[3, 3rd, 6.7]]|
+--------------------+---------------+