A basic architecture for time series forecasting.
print(linspace(10, 5))
start, end = -10, 5
theta = (torch.ones(11, device='cuda:0'))[None,:]
for period in [2,10,20]:
fnc = SeasonalityModel(period)
t = torch.arange(start, end, .01, device='cuda:0')
plt.plot(t.cpu(),period+ fnc(theta, t).cpu().T, label=str(period))
plt.legend()
f = plt.figure()
# sample spacing
T = .01
for period in [3,17,37]:
fnc = SeasonalityModel(period)
t = torch.arange(start, end, T, device='cuda:0')
N = int(len(t))
res =fnc(theta, t)[:,:,None]
res = torch.cat([res,torch.zeros_like(res)],-1)
ft = torch.fft(res, 1, True)[:,:,0].cpu()
plt.plot(np.linspace(0.0, 1.0/(2.0*T), int(N/2)),2*period+abs(ft[0,:N//2]) , label=str(period))
plt.axis([0,15,0,100])
f.suptitle('FastFourierTransform')
plt.legend()
horizon, lookback = 7,25
items = dummy_data_generator(50, 7, nrows=10, norm=True, noise=0 )
data = TSDataLoaders.from_items(items, horizon = horizon, lookback=lookback, step=5, after_batch = NormalizeTS(), valid_pct=.5, bs = 32)
mdl = SeasonalityBlock([256,256], thetas_dim=10, device = data.train.device, horizon=horizon, lookback=lookback,stand_alone=True, season = lookback+horizon)
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam,)
learn.fit(10, .01)
learn.recorder.plot_loss()
learn.show_results()
t = tensor([-14., -13., -12., -11., -10., -9., -8., -7., -6., -5., -4., -3.,
-2., -1., 0., 1,2,3,4,5], device='cuda:0')
dims = 4
scaler =1*10**-(torch.arange(float(dims)))
scaler[0] = 10
# scaler = torch.ones(dims)
for i in range(10):
theta = (torch.randn(64, dims, device='cuda:0')) * scaler.to(device='cuda:0')
theta = torch.cat([theta, 5*torch.randn(64,dims).to(device='cuda:0')],-1)
plt.plot(t.cpu(),trend_model(theta,t,)[0,:].cpu().T)
horizon, lookback = 7,25
items = dummy_data_generator(50, 7, nrows=10, norm=True, signal_type='trend', noise = .1)
data = TSDataLoaders.from_items(items, horizon = horizon, lookback=lookback, step=5, after_batch = NormalizeTS(), valid_pct=.5, bs = 32)
mdl = TrendBlock([256,256], thetas_dim=4, device = data.train.device, horizon=horizon, lookback=lookback, stand_alone=True, y_range=[-.5, .5])
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam, )
learn.fit(1, .01)
learn.recorder.plot_loss()
learn.show_results()
mdl = TrendBlock([256], thetas_dim=4, device=device, lookback=100, horizon=10, att=False)
dct = mdl(torch.randn(64,100).to(device))
test_eq(dct['b'].shape, (64,100))
test_eq(dct['f'].shape, (64,10))
test_eq(dct['theta'].shape,(64,8))
test_eq('attention' not in dct, True)
mdl = TrendBlock([256], thetas_dim=4, device=device, lookback=100, horizon=10, stand_alone=True)
bf = mdl(torch.randn(64,1,100).to(device))
test_eq(bf.shape, (64,1,110))
horizon, lookback = 7,25
items = dummy_data_generator(50, 7, nrows=10, norm=True)
data = TSDataLoaders.from_items(items, horizon = horizon, lookback=lookback, step=2, after_batch = NormalizeTS())
data.show_batch()
mdl = NBeatsNet(device = data.train.device, stack_types=('trend','seasonality'), horizon=horizon, lookback=lookback, season=lookback+horizon)
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam, )
learn.fit(10, .01)
learn.recorder.plot_loss()
learn.show_results(0)
learn.show_results(1)
df = pd.read_csv('/home/tako/dev/fastseq/data/airline-passengers.csv')
print(df.head())
items = L([np.array(list(df['Passengers'].values))])
items
horizon, lookback = 12,24
data = TSDataLoaders.from_items(items, lookback=lookback, horizon =horizon, step=1, bs=64)
data.show_batch()
mdl = NBeatsNet(device = data.train.device, stack_types=('trend','seasonality'), horizon=horizon, lookback=lookback, season = 24)
learn = Learner(data, mdl, loss_func=F.mse_loss, opt_func= Adam, )
learn.fit(10, .01)
learn.recorder.plot_loss()
learn.show_results(0)
# class LinearD(nn.Linear):
# """"""
# def forward(self, x, *args, **kwargs):
# return super().forward(x)
# class GenericBlock(Block):
# def __init__(
# self, layers:L, thetas_dim:int, device, lookback=10, horizon=5, use_bn=True,
# bn_final=False, ps:L=None, share_thetas=True, y_range=[-.05,.05]
# ):
# store_attr(self,"y_range,device,layers,thetas_dim,use_bn,ps,lookback,horizon,bn_final,share_thetas" )
# self.scale = torch.ones(self.thetas_dim,device=self.device)
# super().__init__(LinearD(self.thetas_dim, self.horizon),LinearD(self.thetas_dim, self.lookback))
# self.to(device)
# mdl = GenericBlock([256], thetas_dim=4, device=device, lookback=100, horizon=10)
# dct = mdl(torch.randn(64,100).to(device))
# test_eq(dct['b'].shape, (64,100))
# test_eq(dct['f'].shape, (64,10))