API details.

class TSeries[source]

TSeries(x, **kwargs) :: TensorBase

no_emp_dim[source]

no_emp_dim(x)

show_graph[source]

show_graph(array, ax=None, figsize=None, title=None, ctx=None, tx=None, **kwargs)

Show an array on ax.

test_graph_exists[source]

test_graph_exists(ax)

Test there is a graph displayed in ax

show_graph can show an array...

a = np.arange(10)
ax = show_graph(a)
a = tensor(np.arange(10)+np.random.randn(2,10))[None,:]
ax = show_graph(a)

class TensorSeq[source]

TensorSeq(x, **kwargs) :: TensorBase

ax = TensorSeq(np.arange(10)).show()

class TSTensorSeq[source]

TSTensorSeq(x, **kwargs) :: TensorSeq

class TSTensorSeqy[source]

TSTensorSeqy(x, **kwargs) :: TensorSeq

ts = TSTensorSeq(np.arange(10))
ctx = ts.show()

y = TSTensorSeqy(np.arange(12))
y.show(ctx=ctx)
<matplotlib.axes._subplots.AxesSubplot at 0x7f19a8f2c410>

show_graphs[source]

show_graphs(arrays, rows=None, cols=None, figsize=None, titles=None, nrows=1, ncols=1, imsize=3, add_vert=0, sharex=False, sharey=False, squeeze=True, subplot_kw=None, gridspec_kw=None)

Show all images arrays as subplots with rows using titles

y_hat = TSTensorSeqy(np.array([10.3,10.8,12.2]), x_len=10, m='*r', label='pred')

show_graphs(((a, y, y_hat), (a*.7, y*.7, y_hat*.7)), titles=('y','y * .7'), figsize=(10,10))
[<matplotlib.axes._subplots.AxesSubplot at 0x7f19a90a05d0>,
 <matplotlib.axes._subplots.AxesSubplot at 0x7f19a8c98610>]

Utils

first_item[source]

first_item(lst)

l = L(1,2,3)
test_eq(first_item(l),1)
l = [1,2,3]
test_eq(first_item(l),1)
test_eq(1,1)

concat_dct[source]

concat_dct(new_dct, expand_dct)

Concatanates torch.tensor's in new_dct to the same key in expand_dct'.

dct = defaultdict(dict)
new_d = {'foo':{'bar':torch.randn(4,4)}}
dct = concat_dct(new_d, dct)
print(dct['foo']['bar'].shape)
new_d = {'foo':{'bar':torch.randn(4,4)+10}}
dct = concat_dct(new_d, dct)
print(dct['foo']['bar'].shape)
new_d = {'foo':{'bar':torch.randn(8,4)+10}}
dct = concat_dct(new_d, dct)
print(dct['foo']['bar'].shape)
torch.Size([4, 4])
torch.Size([8, 4])
torch.Size([16, 4])

pad_zeros[source]

pad_zeros(X, lenght)

Skip[source]

Skip(percentage_remove)

Helper function for pd.read_csv and will randomly not load percentage_remove% of the whole dataset

l = []
skip = Skip(.9)
for i in range(10000):
    if not skip(i):
        l.append(1)
    else:
        l.append(0)    
test_eq(l[0],1)
test_close(np.mean(l),.9,.01)
from fastseq.data.all import *
from fastai2.basics import *
path = untar_data(URLs.m4_daily)
pd.read_csv(path/'val.csv',skiprows = Skip(.9))
V1 V2 V3 V4 V5 V6 V7 V8 V9 V10 V11 V12 V13 V14 V15
0 D1 2039.20 2035.00 2051.80 2061.8 2063.50 2069.5 2054.00 2057.00 2062.80 2066.40 2067.40 2071.40 2083.80 2080.600
1 D2 2986.00 3001.20 2975.90 2996.1 2981.90 2985.5 2975.80 2956.20 2964.70 2989.00 2991.40 3024.90 3070.80 3076.900
2 D3 1120.70 1117.90 1115.10 1112.3 1109.50 1106.7 1103.90 1101.10 1098.30 1095.50 1092.70 1089.90 1087.10 1084.300
3 D4 1190.00 1162.00 1134.00 1106.0 1078.00 1050.0 1022.00 994.00 966.00 938.00 910.00 1428.00 1400.00 1372.000
4 D5 5904.67 5917.05 5922.58 5928.8 5935.29 6002.8 6009.47 6014.82 6020.19 6072.49 6077.72 6080.23 6082.75 6108.070
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3808 D4220 4263.00 4257.00 4267.00 4267.0 4174.00 4175.0 4176.00 4195.00 4207.00 4218.00 4219.00 4214.00 4204.00 4203.000
3809 D4221 1969.50 1978.00 1973.50 1981.5 1971.50 1962.0 1967.50 1993.50 2016.00 2015.00 1994.50 1999.00 2003.00 1999.763
3810 D4222 2203.00 2201.00 2229.00 2239.0 2193.00 2189.0 2204.00 2207.00 2221.00 2244.00 2227.00 2222.00 2247.00 2248.000
3811 D4224 3719.00 3754.00 3754.00 3721.0 3730.00 3727.0 3700.00 3700.00 3708.00 3708.00 3708.00 3708.00 3708.00 3708.000
3812 D4227 15454.80 15455.70 15576.30 15391.7 15483.60 15596.0 15389.60 15330.50 15285.40 14614.60 14285.90 14574.70 14800.70 14619.300

3813 rows × 15 columns

get_ts_files[source]

get_ts_files(path, recurse=True, folders=None, **kwargs)

Get image files in path recursively, only in folders, if specified.

# train, val = get_ts_files(path, nrows=1000)

IndexsSplitter[source]

IndexsSplitter(train_idx, val_idx=None, test=None)

Split items from 0 to train_idx in the training set, from train_idx to val_idx (or the end) in the validation set.

Optionly if test will in test set will also make test from val_idx to end.

items = list(range(10))
splitter = IndexsSplitter(5,8)
test_eq(splitter(items),(L(0,1,2,3,4),L(5,6,7)))
/home/tako/dev/env37/lib/python3.7/site-packages/ipykernel_launcher.py:20: UserWarning: You lose data
splitter = IndexsSplitter(5,8,True)
test_eq(splitter(items),(L(0,1,2,3,4),L(5,6,7),L(8,9)))

Preprocces functions

M4

ts_lists[source]

ts_lists(ts:ndarray)

Transforms a np.ndarray of shape (timeseries, max_time) to a list of timeseries with shape (1,time).

where:

max_time = the length of the longest timeserie

time = the length of the non-nan values of that specific timeserie

ts
array([[ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9.],
       [ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8., nan]])
ts_lists(ts)
(#2) [array([[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]]),array([[0., 1., 2., 3., 4., 5., 6., 7., 8.]])]