Import the library:
# Import the main fetcher:
from argopy import DataFetcher as ArgoDataFetcherDefine what you want to fetch:
# a region:
ArgoSet = ArgoDataFetcher().region([-85, -45, 10., 20., 0, 1000.])| import urllib.request | |
| import json | |
| import numpy as np | |
| import pandas as pd | |
| def get_float_config(wmo, cyc=None): | |
| def id_mission(missionCycles, a_cyc): | |
| this_mission = None | |
| for im, mission in enumerate(missionCycles): |
Import the library:
# Import the main fetcher:
from argopy import DataFetcher as ArgoDataFetcherDefine what you want to fetch:
# a region:
ArgoSet = ArgoDataFetcher().region([-85, -45, 10., 20., 0, 1000.])To get a list of WMO,CYC matching some QC position, you can do something like:
https://erddap.ifremer.fr/erddap/tabledap/ArgoFloats.htmlTable?platform_number,cycle_number,position_qc&platform_number=~%226903075|6903076%22&position_qc=~%221%22&distinct()&orderBy(%22platform_number,cycle_number%22)
This is an example where I select profiles from floats 6903075 and 6903076 having a QC position of 1
If you want to allow for more allowed QC, list them with a | in the request:
https://erddap.ifremer.fr/erddap/tabledap/ArgoFloats.htmlTable?platform_number,cycle_number,position_qc&platform_number=~%226903075|6903076%22&position_qc=~%220|1%22&distinct()&orderBy(%22platform_number,cycle_number%22)
| #!/bin/env python | |
| # -*coding: UTF-8 -*- | |
| import requests | |
| import time | |
| # Request full data: | |
| t0 = time.time() | |
| url = 'http://www.ifremer.fr/erddap/tabledap/ArgoFloats.csv?data_mode,latitude,longitude,position_qc,time,time_qc,direction,platform_number,cycle_number,pres,temp,psal,pres_qc,temp_qc,psal_qc,pres_adjusted,temp_adjusted,psal_adjusted,pres_adjusted_qc,temp_adjusted_qc,psal_adjusted_qc,pres_adjusted_error,temp_adjusted_error,psal_adjusted_error&platform_number=~"5900446"&distinct()&orderBy("time,pres")' | |
| requests.get(url) |
| #!/usr/bin/env python | |
| # coding: utf-8 | |
| # | |
| # $ time ./Parallel_images.py | |
| # Use 8 processes | |
| # 107.249u 2.444s 0:17.10 641.4% 0+0k 0+0io 1056pf+0w | |
| # | |
| import os | |
| import numpy as np |
| #!/usr/bin/env bash | |
| # | |
| # Gerenate mp4 videos from a collection of image files | |
| # | |
| # Video files are saved into ./videos | |
| # | |
| # Folder with image files: | |
| src="/home/datawork-lops-oh/somovar/WP1/data/dashboard/img/monthly" # This is an example |
| def base_fct(**kwargs): | |
| defaults = {'sharey':'row', 'dpi':80, 'facecolor':'w', 'edgecolor':'k'} | |
| options = {**defaults, **kwargs} | |
| return options | |
| def fct(**kwargs): | |
| defaults = {'sharey':'cols'} | |
| return base_fct(**{**defaults, **kwargs}) | |
| print("Default base options:\n", base_fct()) |
| #~/usr/bin/env python | |
| # | |
| # Useful functions for xarray time series analysis | |
| # (c) G. Maze, Ifremer | |
| # | |
| import numpy as np | |
| import xarray as xr | |
| import pandas as pd | |
| from statsmodels.tsa.seasonal import seasonal_decompose |
To run data mining algorithms on ocean's large datasets, we need to optimise access to datasets with possibly up to 6-dimensions.
A generalised 6-dimensional dataset is [X,Y,Z,T,V,E] where:
Running data mining algorithms on this dataset mostly implies to re-arrange the 6 dimensions into 2-dimensional arrays with, following the statistics vocabulary "sampling" vs "features" dimensions. The sampling dimension is along rows, the features along columns. A large dataset can have billions of rows and hundreds of columns.
Eg: