Skip to content

Instantly share code, notes, and snippets.

@abevieiramota
Created July 6, 2018 10:50
Show Gist options
  • Save abevieiramota/d63ec1b62a0c5295f19e931f68b1776d to your computer and use it in GitHub Desktop.
Save abevieiramota/d63ec1b62a0c5295f19e931f68b1776d to your computer and use it in GitHub Desktop.
import pandas as pd
from io import StringIO
s = """
hora_ini,hora_fim
2018-07-01 00:01:00,2018-07-01 00:03:00
2018-07-01 00:00:00,2018-07-01 00:04:00
2018-07-01 00:00:00,2018-07-01 00:01:00
2018-07-01 00:00:01,2018-07-01 00:01:00
"""
df = pd.read_csv(StringIO(s), parse_dates=[0, 1])
si = pd.Series(index=df.hora_ini, data=1).resample('T').sum()
so = pd.Series(index=df.hora_fim, data=-1).resample('T').sum()
rf = pd.concat([si, so], axis=1)
rf.cumsum().ffill().sum(axis=1)
"""
hora_ini
2018-07-01 00:00:00 3.0
2018-07-01 00:01:00 2.0
2018-07-01 00:02:00 2.0
2018-07-01 00:03:00 1.0
2018-07-01 00:04:00 0.0
Freq: T, dtype: float64
"""
@rogeriomgatto
Copy link

import numpy as np
import pandas as pd

# dataframe com requests aleatórios
#
#                      arrival                  departure
# 0 2018-07-01 00:00:04.641644 2018-07-01 00:00:09.227074
# 1 2018-07-01 00:00:06.509443 2018-07-01 00:00:07.261495
# 2 2018-07-01 00:00:10.035496 2018-07-01 00:00:10.908116
# 3 2018-07-01 00:00:14.157417 2018-07-01 00:00:17.228109
# 4 2018-07-01 00:00:14.913765 2018-07-01 00:00:20.483120
# ...

nsamples=1000
arrival = pd.to_datetime('2018-07-01', box=False) + pd.to_timedelta(np.abs(np.random.normal(size=nsamples, loc=2, scale=2)), unit='s', box=False).cumsum()
departure = arrival + pd.to_timedelta(np.random.exponential(size=nsamples, scale=2.5), unit='s', box=False)
df = pd.DataFrame({'arrival': arrival, 'departure': departure})


# monta deltas de entrada e saída
# 
# 2018-07-01 00:00:04.641644    1
# 2018-07-01 00:00:06.509443    1
# 2018-07-01 00:00:10.035496    1
# 2018-07-01 00:00:14.157417    1
# 2018-07-01 00:00:14.913765    1
# ...
# 2018-07-01 00:38:27.248679976   -1
# 2018-07-01 00:38:33.719072975   -1
# 2018-07-01 00:38:31.275817975   -1
# 2018-07-01 00:38:33.556652975   -1
# 2018-07-01 00:38:33.635886974   -1

requests = pd.concat([
    pd.Series(1, index=df['arrival']),
    pd.Series(-1, index=df['departure']),
])


# garante que existam entradas nas fronteiras de 1min e calcula o acumulado
#
# 2018-07-01 00:00:00.000000    0
# 2018-07-01 00:00:04.641644    1
# 2018-07-01 00:00:06.509443    2
# 2018-07-01 00:00:07.261495    1
# 2018-07-01 00:00:09.227074    0
# ...
# 2018-07-01 00:38:33.519121974    3
# 2018-07-01 00:38:33.556652975    2
# 2018-07-01 00:38:33.635886974    1
# 2018-07-01 00:38:33.719072975    0
# 2018-07-01 00:39:00.000000000    0

requests = pd.concat([
    requests,
    pd.Series(0, index=pd.date_range(requests.index.min().floor('1T'), requests.index.max().ceil('1T'), freq='1T'))
]).sort_index().cumsum().rename('n')


# por quanto tempo cada valor ficou em vigor?
#
#                             n        dt
# 2018-07-01 00:00:00.000000  0  4.641644
# 2018-07-01 00:00:04.641644  1  1.867799
# 2018-07-01 00:00:06.509443  2  0.752052
# 2018-07-01 00:00:07.261495  1  1.965579
# 2018-07-01 00:00:09.227074  0  0.808422
# ...

requests = pd.concat([
    requests,
    requests.index.to_series().diff().shift(-1).rename('dt').dt.total_seconds()
], axis=1).dropna()


# média ponderada pelo tempo, por minuto
#
# 2018-07-01 00:00:00    1.315696
# 2018-07-01 00:01:00    0.965842
# 2018-07-01 00:02:00    0.993505
# 2018-07-01 00:03:00    0.798858
# 2018-07-01 00:04:00    1.517413
# ...

requests.resample('1T').apply(lambda group: (group['n'] * group['dt']).sum() / group['dt'].sum())


# pico por minuto
#
# 2018-07-01 00:00:00    4
# 2018-07-01 00:01:00    3
# 2018-07-01 00:02:00    3
# 2018-07-01 00:03:00    3
# 2018-07-01 00:04:00    5
# ...

requests.resample('1T')['n'].max()

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment