Created
April 13, 2017 10:29
-
-
Save jkatagi/ed4d48af86d75c4c371f5d0896dedd50 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# coding: utf-8 | |
# 2017/04/13 Jin Katagi | |
import pandas as pd | |
import subprocess | |
import re | |
# In[2]: | |
def get_scene(id, category, lat, lon): | |
""" return scene which include validation points. | |
input: | |
id(int) : id of the validation points. | |
lat(float): latitude of the validation points. | |
lon(float): longitude of the validation point. | |
return: | |
tmp_df(data frame): df of the scene which include validation point. | |
""" | |
# make empty datraframe. | |
tmp_df = pd.DataFrame(columns=['id','category', 'lon', 'lat', 'scene']) | |
# get scene name which include validatoin points. | |
tmp_df["scene"] = df[(df["ll_lat"] < lat) & (lat < df["ul_lat"] ) & (df["ll_lon"] < lon) & (lon < df["ul_lon"])]["scene"] | |
# append id, category, lat, lon | |
tmp_df["id"] = id | |
tmp_df["category"] =category | |
tmp_df["lat"] = lat | |
tmp_df["lon"] =lon | |
return tmp_df | |
# In[12]: | |
def run_gdal(file_path, file_name): | |
gdal_command = ["gdalinfo", file_path + "/" + file_name] | |
proc = subprocess.Popen( | |
gdal_command, | |
stdout=subprocess.PIPE) | |
return proc | |
def run_grep(input_stdin): | |
grep_command = ['grep', 'UTM'] | |
proc = subprocess.Popen( | |
grep_command, | |
stdin=input_stdin, | |
stdout=subprocess.PIPE) | |
return proc | |
# In[ ]: | |
def make_scene_zone(file_path,file_name, scene_zone_dict): | |
""" get UTM zone and return pair dict of sceen and UTM zone number.""" | |
# run gdalinfo | |
gdal_proc = run_gdal(file_path, file_name) | |
# grep UTM | |
grep_proc = run_grep(gdal_proc.stdout) | |
out, err = grep_proc.communicate() | |
string = out.decode('utf-8') | |
pattern = r'.*(\d\d)N.*$' | |
matchOB = re.match(pattern, string) | |
if matchOB: | |
scene_zone_dict[file_name] = matchOB.group(1) | |
# In[169]: | |
### Create dataframe df[id, category, lon, lat, scene] ### | |
# read validation csv failes | |
validation_df = pd.read_csv("./50points_coordinates_category.csv", header=None) | |
# read scene list. | |
AV2PAN_list_name = '../AV2PAN/AV2PAN_list_decimal_sort.txt' | |
df = pd.read_csv(AV2PAN_list_name) | |
# create dataframe df['id, category, lon, lat, scene] | |
scene_df = pd.DataFrame(columns=['id','category', 'lon', 'lat', 'scene']) | |
for i, validation in validation_df.iterrows(): | |
# get scene name and append exit list | |
scene_df = scene_df.append(get_scene(validation[0], validation[1], validation[3], validation[2])) | |
### create dict {'scene', 'UTM_zone} ### | |
scene_zone_dict = {} | |
file_path="/GEODATA/AV2PAN/PAN" | |
for file_name in scene_df['scene']: | |
make_scene_zone(file_name, scene_zone_dict) | |
# convert to dataframe df[scene, UTM_zone] | |
df = pd.DataFrame() | |
df['scene'] = scene_zone_dict.keys() | |
df['UTM_zone'] = scene_zone_dict.values() | |
# merge df[id, category, lon, lat, scene] and df[scene, UTM_zone] | |
final_df = pd.merge(scene_df, df, how='inner') | |
#### save as csv. ### | |
final_df.to_csv(path_or_buf='./AV2PAN_UTM_zone.csv', sep=',', index=False ) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment