A basic architecture for time series forecasting.

linspace[source]

linspace(lookback, horizon, device=None)

print(linspace(10, 5))
(tensor([-9., -8., -7., -6., -5., -4., -3., -2., -1.,  0.], device='cuda:0'), tensor([1., 2., 3., 4., 5.], device='cuda:0'))

make_base[source]

make_base(u_in, layers, use_bn, ps)

class Block[source]

Block(fnc_f, fnc_b=None, base=None, **kwargs) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

class SeasonalityBlock[source]

SeasonalityBlock(layers:L, thetas_dim:int, device, lookback=10, horizon=5, use_bn=True, season=None, bn_final=False, ps:L=None, share_thetas=True, y_range=[-0.5, 0.5], att=True, scale_exp=4, stand_alone=False, base=None, **kwargs) :: Block

Same as nn.Module, but no need for subclasses to call super().__init__

class SeasonalityModel[source]

SeasonalityModel(period=None)

Returns a function with period being the period of least frequent aproximations function.

start, end = -10, 5
theta = (torch.ones(11, device='cuda:0'))[None,:]
for period in [2,10,20]:
    fnc = SeasonalityModel(period)
    t = torch.arange(start, end, .01, device='cuda:0')
    plt.plot(t.cpu(),period+ fnc(theta, t).cpu().T, label=str(period))
plt.legend()

f = plt.figure()
# sample spacing
T = .01
for period in [3,17,37]:
    fnc = SeasonalityModel(period)
    t = torch.arange(start, end, T, device='cuda:0')
    N = int(len(t))
    res =fnc(theta, t)[:,:,None]
    res = torch.cat([res,torch.zeros_like(res)],-1)
    ft = torch.fft(res, 1, True)[:,:,0].cpu()
    plt.plot(np.linspace(0.0, 1.0/(2.0*T), int(N/2)),2*period+abs(ft[0,:N//2]) , label=str(period))
    plt.axis([0,15,0,100])
f.suptitle('FastFourierTransform')
plt.legend()
<matplotlib.legend.Legend at 0x7f6f30cc3080>
horizon, lookback = 7,25
items = dummy_data_generator(50, 7, nrows=10, norm=True, noise=0 )
data = TSDataLoaders.from_items(items, horizon = horizon, lookback=lookback, step=5, after_batch = NormalizeTS(), valid_pct=.5, bs = 32)
                                
mdl = SeasonalityBlock([256,256], thetas_dim=10, device = data.train.device, horizon=horizon, lookback=lookback,stand_alone=True, season = lookback+horizon)
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam,)                  

learn.fit(10, .01)
learn.recorder.plot_loss()     
(1, 57)
Train:20; Valid: 10; Test 10
epoch train_loss valid_loss time
0 1.921567 1.161635 00:00
1 1.703814 0.968443 00:00
2 1.548923 0.854076 00:00
3 1.431560 0.799598 00:00
4 1.343658 0.777966 00:00
5 1.274377 0.761676 00:00
6 1.219248 0.741369 00:00
7 1.174307 0.717252 00:00
8 1.136541 0.702864 00:00
9 1.105499 0.692837 00:00
learn.show_results()

class TrendBlock[source]

TrendBlock(layers:L, device, thetas_dim, lookback=10, horizon=5, use_bn=True, bn_final=False, ps:L=None, share_thetas=True, y_range=[-0.1, 0.1], att=True, scale_exp=10, stand_alone=False, base=None, **kwargs) :: Block

Same as nn.Module, but no need for subclasses to call super().__init__

trend_model[source]

trend_model(thetas, t)

t = tensor([-14., -13., -12., -11., -10.,  -9.,  -8.,  -7.,  -6.,  -5.,  -4.,  -3.,
         -2.,  -1.,   0., 1,2,3,4,5], device='cuda:0')
dims = 4
scaler =1*10**-(torch.arange(float(dims)))
scaler[0] = 10
# scaler = torch.ones(dims)

for i in range(10):
    theta = (torch.randn(64, dims, device='cuda:0')) * scaler.to(device='cuda:0')
    theta = torch.cat([theta, 5*torch.randn(64,dims).to(device='cuda:0')],-1)
    plt.plot(t.cpu(),trend_model(theta,t,)[0,:].cpu().T)
horizon, lookback = 7,25
items = dummy_data_generator(50, 7, nrows=10, norm=True, signal_type='trend', noise = .1)
data = TSDataLoaders.from_items(items, horizon = horizon, lookback=lookback, step=5, after_batch = NormalizeTS(), valid_pct=.5, bs = 32)
                                
mdl = TrendBlock([256,256], thetas_dim=4, device = data.train.device, horizon=horizon, lookback=lookback, stand_alone=True, y_range=[-.5, .5])
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam, )                             
                
learn.fit(1, .01)
learn.recorder.plot_loss()
(1, 57)
Train:20; Valid: 10; Test 10
epoch train_loss valid_loss time
0 1.436112 0.882117 00:00
learn.show_results()
mdl = TrendBlock([256], thetas_dim=4, device=device, lookback=100, horizon=10, att=False)
dct = mdl(torch.randn(64,100).to(device))
test_eq(dct['b'].shape, (64,100))
test_eq(dct['f'].shape, (64,10))
test_eq(dct['theta'].shape,(64,8))
test_eq('attention' not in dct, True)


mdl = TrendBlock([256], thetas_dim=4, device=device, lookback=100, horizon=10, stand_alone=True)
bf = mdl(torch.randn(64,1,100).to(device))
test_eq(bf.shape, (64,1,110))

class NBeatsNet[source]

NBeatsNet(device, stack_types=('trend', 'seasonality'), nb_blocks_per_stack=3, horizon=5, lookback=10, thetas_dim=None, share_weights_in_layers=True, layers=[1024, 512], **kwargs) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

select_block[source]

select_block(o)

Examples

horizon, lookback = 7,25
items = dummy_data_generator(50, 7, nrows=10, norm=True)
data = TSDataLoaders.from_items(items, horizon = horizon, lookback=lookback, step=2, after_batch = NormalizeTS())
     
data.show_batch()
(1, 57)
Train:10; Valid: 60; Test 10
mdl = NBeatsNet(device = data.train.device, stack_types=('trend','seasonality'), horizon=horizon, lookback=lookback, season=lookback+horizon)
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam, )                             
                
learn.fit(10, .01)
learn.recorder.plot_loss()                
epoch train_loss valid_loss time
0 1.329673 1.065632 00:00
1 1.307293 0.925020 00:00
2 1.237234 0.849307 00:00
3 1.109022 0.779169 00:00
4 1.013001 0.769686 00:00
5 0.936485 0.852710 00:00
6 0.862965 0.871295 00:00
7 0.824713 0.941237 00:00
8 0.783660 1.003032 00:01
9 0.741602 1.023556 00:00
learn.show_results(0)
learn.show_results(1)

Another example

df = pd.read_csv('/home/tako/dev/fastseq/data/airline-passengers.csv')
print(df.head())
items = L([np.array(list(df['Passengers'].values))])
items
     Month  Passengers
0  1949-01         112
1  1949-02         118
2  1949-03         132
3  1949-04         129
4  1949-05         121
(#1) [array([112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115,
       126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150,
       178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193,
       181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235,
       229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234,
       264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315,
       364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413,
       405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467,
       404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404,
       359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407,
       362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390,
       432])]
horizon, lookback = 12,24
data = TSDataLoaders.from_items(items, lookback=lookback, horizon =horizon, step=1, bs=64)
data.show_batch()
(1, 144)
Train:67; Valid: 19; Test 1
mdl = NBeatsNet(device = data.train.device, stack_types=('trend','seasonality'), horizon=horizon, lookback=lookback, season = 24)
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam, )                             
                
learn.fit(10, .01)
learn.recorder.plot_loss()
epoch train_loss valid_loss time
0 2.442768 2.071324 00:00
1 2.303048 0.994528 00:00
2 1.949394 0.842307 00:00
3 1.736230 0.854912 00:00
4 1.602172 0.841866 00:00
5 1.500812 0.751410 00:00
6 1.416816 0.690435 00:00
7 1.326123 0.624953 00:00
8 1.250510 0.599351 00:01
9 1.185108 0.587219 00:00
learn.show_results(0)
# class LinearD(nn.Linear):   
#     """"""
#     def forward(self, x, *args, **kwargs):
#         return super().forward(x)
        
# class GenericBlock(Block):
#     def __init__(
#         self, layers:L, thetas_dim:int, device, lookback=10, horizon=5, use_bn=True,
#         bn_final=False, ps:L=None, share_thetas=True, y_range=[-.05,.05]
#     ):
#         store_attr(self,"y_range,device,layers,thetas_dim,use_bn,ps,lookback,horizon,bn_final,share_thetas" ) 
#         self.scale = torch.ones(self.thetas_dim,device=self.device)
#         super().__init__(LinearD(self.thetas_dim, self.horizon),LinearD(self.thetas_dim, self.lookback))        
#         self.to(device)
        
# mdl = GenericBlock([256], thetas_dim=4, device=device, lookback=100, horizon=10)
# dct = mdl(torch.randn(64,100).to(device))
# test_eq(dct['b'].shape, (64,100))
# test_eq(dct['f'].shape, (64,10))