You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

755 lines
33 KiB

1 month ago
  1. import numpy as np
  2. import pandas as pd
  3. import torch
  4. from huggingface_hub import PyTorchModelHubMixin
  5. import sys
  6. from tqdm import trange
  7. sys.path.append("../")
  8. from model.module import *
  9. class KronosTokenizer(nn.Module, PyTorchModelHubMixin):
  10. """
  11. KronosTokenizer module for tokenizing input data using a hybrid quantization approach.
  12. This tokenizer utilizes a combination of encoder and decoder Transformer blocks
  13. along with the Binary Spherical Quantization (BSQuantizer) to compress and decompress input data.
  14. Args:
  15. d_in (int): Input dimension.
  16. d_model (int): Model dimension.
  17. n_heads (int): Number of attention heads.
  18. ff_dim (int): Feed-forward dimension.
  19. n_enc_layers (int): Number of encoder layers.
  20. n_dec_layers (int): Number of decoder layers.
  21. ffn_dropout_p (float): Dropout probability for feed-forward networks.
  22. attn_dropout_p (float): Dropout probability for attention mechanisms.
  23. resid_dropout_p (float): Dropout probability for residual connections.
  24. s1_bits (int): Number of bits for the pre token in BSQuantizer.
  25. s2_bits (int): Number of bits for the post token in BSQuantizer.
  26. beta (float): Beta parameter for BSQuantizer.
  27. gamma0 (float): Gamma0 parameter for BSQuantizer.
  28. gamma (float): Gamma parameter for BSQuantizer.
  29. zeta (float): Zeta parameter for BSQuantizer.
  30. group_size (int): Group size parameter for BSQuantizer.
  31. """
  32. def __init__(self, d_in, d_model, n_heads, ff_dim, n_enc_layers, n_dec_layers, ffn_dropout_p, attn_dropout_p, resid_dropout_p, s1_bits, s2_bits, beta, gamma0, gamma, zeta, group_size):
  33. super().__init__()
  34. self.d_in = d_in
  35. self.d_model = d_model
  36. self.n_heads = n_heads
  37. self.ff_dim = ff_dim
  38. self.enc_layers = n_enc_layers
  39. self.dec_layers = n_dec_layers
  40. self.ffn_dropout_p = ffn_dropout_p
  41. self.attn_dropout_p = attn_dropout_p
  42. self.resid_dropout_p = resid_dropout_p
  43. self.s1_bits = s1_bits
  44. self.s2_bits = s2_bits
  45. self.codebook_dim = s1_bits + s2_bits # Total dimension of the codebook after quantization
  46. self.embed = nn.Linear(self.d_in, self.d_model)
  47. self.head = nn.Linear(self.d_model, self.d_in)
  48. # Encoder Transformer Blocks
  49. self.encoder = nn.ModuleList([
  50. TransformerBlock(self.d_model, self.n_heads, self.ff_dim, self.ffn_dropout_p, self.attn_dropout_p, self.resid_dropout_p)
  51. for _ in range(self.enc_layers - 1)
  52. ])
  53. # Decoder Transformer Blocks
  54. self.decoder = nn.ModuleList([
  55. TransformerBlock(self.d_model, self.n_heads, self.ff_dim, self.ffn_dropout_p, self.attn_dropout_p, self.resid_dropout_p)
  56. for _ in range(self.dec_layers - 1)
  57. ])
  58. self.quant_embed = nn.Linear(in_features=self.d_model, out_features=self.codebook_dim) # Linear layer before quantization
  59. self.post_quant_embed_pre = nn.Linear(in_features=self.s1_bits, out_features=self.d_model) # Linear layer after quantization (pre part - s1 bits)
  60. self.post_quant_embed = nn.Linear(in_features=self.codebook_dim, out_features=self.d_model) # Linear layer after quantization (full codebook)
  61. self.tokenizer = BSQuantizer(self.s1_bits, self.s2_bits, beta, gamma0, gamma, zeta, group_size) # BSQuantizer module
  62. def forward(self, x):
  63. """
  64. Forward pass of the KronosTokenizer.
  65. Args:
  66. x (torch.Tensor): Input tensor of shape (batch_size, seq_len, d_in).
  67. Returns:
  68. tuple: A tuple containing:
  69. - tuple: (z_pre, z) - Reconstructed outputs from decoder with s1_bits and full codebook respectively,
  70. both of shape (batch_size, seq_len, d_in).
  71. - torch.Tensor: bsq_loss - Loss from the BSQuantizer.
  72. - torch.Tensor: quantized - Quantized representation from BSQuantizer.
  73. - torch.Tensor: z_indices - Indices from the BSQuantizer.
  74. """
  75. z = self.embed(x)
  76. for layer in self.encoder:
  77. z = layer(z)
  78. z = self.quant_embed(z) # (B, T, codebook)
  79. bsq_loss, quantized, z_indices = self.tokenizer(z)
  80. quantized_pre = quantized[:, :, :self.s1_bits] # Extract the first part of quantized representation (s1_bits)
  81. z_pre = self.post_quant_embed_pre(quantized_pre)
  82. z = self.post_quant_embed(quantized)
  83. # Decoder layers (for pre part - s1 bits)
  84. for layer in self.decoder:
  85. z_pre = layer(z_pre)
  86. z_pre = self.head(z_pre)
  87. # Decoder layers (for full codebook)
  88. for layer in self.decoder:
  89. z = layer(z)
  90. z = self.head(z)
  91. return (z_pre, z), bsq_loss, quantized, z_indices
  92. def indices_to_bits(self, x, half=False):
  93. """
  94. Converts indices to bit representations and scales them.
  95. Args:
  96. x (torch.Tensor): Indices tensor.
  97. half (bool, optional): Whether to process only half of the codebook dimension. Defaults to False.
  98. Returns:
  99. torch.Tensor: Bit representation tensor.
  100. """
  101. if half:
  102. x1 = x[0] # Assuming x is a tuple of indices if half is True
  103. x2 = x[1]
  104. mask = 2 ** torch.arange(self.codebook_dim//2, device=x1.device, dtype=torch.long) # Create a mask for bit extraction
  105. x1 = (x1.unsqueeze(-1) & mask) != 0 # Extract bits for the first half
  106. x2 = (x2.unsqueeze(-1) & mask) != 0 # Extract bits for the second half
  107. x = torch.cat([x1, x2], dim=-1) # Concatenate the bit representations
  108. else:
  109. mask = 2 ** torch.arange(self.codebook_dim, device=x.device, dtype=torch.long) # Create a mask for bit extraction
  110. x = (x.unsqueeze(-1) & mask) != 0 # Extract bits
  111. x = x.float() * 2 - 1 # Convert boolean to bipolar (-1, 1)
  112. q_scale = 1. / (self.codebook_dim ** 0.5) # Scaling factor
  113. x = x * q_scale
  114. return x
  115. # def encode(self, x, half=False):
  116. # """
  117. # Encodes the input data into quantized indices.
  118. #
  119. # Args:
  120. # x (torch.Tensor): Input tensor of shape (batch_size, seq_len, d_in).
  121. # half (bool, optional): Whether to use half quantization in BSQuantizer. Defaults to False.
  122. #
  123. # Returns:
  124. # torch.Tensor: Quantized indices from BSQuantizer.
  125. # """
  126. # z = self.embed(x)
  127. # for layer in self.encoder:
  128. # z = layer(z)
  129. # z = self.quant_embed(z)
  130. #
  131. # bsq_loss, quantized, z_indices = self.tokenizer(z, half)
  132. # return z_indices
  133. def encode(self, x, half=False):
  134. """
  135. Encodes the input data into quantized indices.
  136. Args:
  137. x (torch.Tensor): Input tensor of shape (batch_size, seq_len, d_in).
  138. half (bool, optional): Whether to use half quantization in BSQuantizer. Defaults to False.
  139. Returns:
  140. torch.Tensor: Quantized indices from BSQuantizer.
  141. """
  142. print(f"🔍 [KronosTokenizer.encode] 输入调试:")
  143. print(f" x 形状: {x.shape}")
  144. print(f" x 设备: {x.device}")
  145. print(f" half: {half}")
  146. print(f" self.d_in: {self.d_in}")
  147. z = self.embed(x)
  148. print(f"🔍 embed后 z 形状: {z.shape}")
  149. for i, layer in enumerate(self.encoder):
  150. z = layer(z)
  151. print(f"🔍 encoder层 {i} 后 z 形状: {z.shape}")
  152. z = self.quant_embed(z)
  153. print(f"🔍 quant_embed后 z 形状: {z.shape}")
  154. print("🔍 开始BSQuantizer...")
  155. bsq_loss, quantized, z_indices = self.tokenizer(z, half)
  156. print(f"🔍 BSQuantizer完成:")
  157. print(f" bsq_loss: {bsq_loss}")
  158. print(f" quantized 形状: {quantized.shape}")
  159. print(f" z_indices 类型: {type(z_indices)}")
  160. if isinstance(z_indices, (list, tuple)):
  161. for i, idx in enumerate(z_indices):
  162. print(f" z_indices[{i}] 形状: {idx.shape}")
  163. else:
  164. print(f" z_indices 形状: {z_indices.shape}")
  165. return z_indices
  166. def decode(self, x, half=False):
  167. """
  168. Decodes quantized indices back to the input data space.
  169. Args:
  170. x (torch.Tensor): Quantized indices tensor.
  171. half (bool, optional): Whether the indices were generated with half quantization. Defaults to False.
  172. Returns:
  173. torch.Tensor: Reconstructed output tensor of shape (batch_size, seq_len, d_in).
  174. """
  175. quantized = self.indices_to_bits(x, half)
  176. z = self.post_quant_embed(quantized)
  177. for layer in self.decoder:
  178. z = layer(z)
  179. z = self.head(z)
  180. return z
  181. class Kronos(nn.Module, PyTorchModelHubMixin):
  182. """
  183. Kronos Model.
  184. Args:
  185. s1_bits (int): Number of bits for pre tokens.
  186. s2_bits (int): Number of bits for post tokens.
  187. n_layers (int): Number of Transformer blocks.
  188. d_model (int): Dimension of the model's embeddings and hidden states.
  189. n_heads (int): Number of attention heads in the MultiheadAttention layers.
  190. ff_dim (int): Dimension of the feedforward network in the Transformer blocks.
  191. ffn_dropout_p (float): Dropout probability for the feedforward network.
  192. attn_dropout_p (float): Dropout probability for the attention layers.
  193. resid_dropout_p (float): Dropout probability for residual connections.
  194. token_dropout_p (float): Dropout probability for token embeddings.
  195. learn_te (bool): Whether to use learnable temporal embeddings.
  196. """
  197. def __init__(self, s1_bits, s2_bits, n_layers, d_model, n_heads, ff_dim, ffn_dropout_p, attn_dropout_p, resid_dropout_p, token_dropout_p, learn_te):
  198. super().__init__()
  199. self.s1_bits = s1_bits
  200. self.s2_bits = s2_bits
  201. self.n_layers = n_layers
  202. self.d_model = d_model
  203. self.n_heads = n_heads
  204. self.learn_te = learn_te
  205. self.ff_dim = ff_dim
  206. self.ffn_dropout_p = ffn_dropout_p
  207. self.attn_dropout_p = attn_dropout_p
  208. self.resid_dropout_p = resid_dropout_p
  209. self.token_dropout_p = token_dropout_p
  210. self.s1_vocab_size = 2 ** self.s1_bits
  211. self.token_drop = nn.Dropout(self.token_dropout_p)
  212. self.embedding = HierarchicalEmbedding(self.s1_bits, self.s2_bits, self.d_model)
  213. self.time_emb = TemporalEmbedding(self.d_model, self.learn_te)
  214. self.transformer = nn.ModuleList([
  215. TransformerBlock(self.d_model, self.n_heads, self.ff_dim, self.ffn_dropout_p, self.attn_dropout_p, self.resid_dropout_p)
  216. for _ in range(self.n_layers)
  217. ])
  218. self.norm = RMSNorm(self.d_model)
  219. self.dep_layer = DependencyAwareLayer(self.d_model)
  220. self.head = DualHead(self.s1_bits, self.s2_bits, self.d_model)
  221. self.apply(self._init_weights)
  222. def _init_weights(self, module):
  223. if isinstance(module, nn.Linear):
  224. nn.init.xavier_normal_(module.weight)
  225. if module.bias is not None:
  226. nn.init.zeros_(module.bias)
  227. elif isinstance(module, nn.Embedding):
  228. nn.init.normal_(module.weight, mean=0, std=self.embedding.d_model ** -0.5)
  229. elif isinstance(module, nn.LayerNorm):
  230. nn.init.ones_(module.weight)
  231. nn.init.zeros_(module.bias)
  232. elif isinstance(module, RMSNorm):
  233. nn.init.ones_(module.weight)
  234. def forward(self, s1_ids, s2_ids, stamp=None, padding_mask=None, use_teacher_forcing=False, s1_targets=None):
  235. """
  236. Args:
  237. s1_ids (torch.Tensor): Input tensor of s1 token IDs. Shape: [batch_size, seq_len]
  238. s2_ids (torch.Tensor): Input tensor of s2 token IDs. Shape: [batch_size, seq_len]
  239. stamp (torch.Tensor, optional): Temporal stamp tensor. Shape: [batch_size, seq_len]. Defaults to None.
  240. padding_mask (torch.Tensor, optional): Mask for padding tokens. Shape: [batch_size, seq_len]. Defaults to None.
  241. use_teacher_forcing (bool, optional): Whether to use teacher forcing for s1 decoding. Defaults to False.
  242. s1_targets (torch.Tensor, optional): Target s1 token IDs for teacher forcing. Shape: [batch_size, seq_len]. Defaults to None.
  243. Returns:
  244. Tuple[torch.Tensor, torch.Tensor]:
  245. - s1 logits: Logits for s1 token predictions. Shape: [batch_size, seq_len, s1_vocab_size]
  246. - s2_logits: Logits for s2 token predictions, conditioned on s1. Shape: [batch_size, seq_len, s2_vocab_size]
  247. """
  248. x = self.embedding([s1_ids, s2_ids])
  249. if stamp is not None:
  250. time_embedding = self.time_emb(stamp)
  251. x = x + time_embedding
  252. x = self.token_drop(x)
  253. for layer in self.transformer:
  254. x = layer(x, key_padding_mask=padding_mask)
  255. x = self.norm(x)
  256. s1_logits = self.head(x)
  257. if use_teacher_forcing:
  258. sibling_embed = self.embedding.emb_s1(s1_targets)
  259. else:
  260. s1_probs = F.softmax(s1_logits.detach(), dim=-1)
  261. sample_s1_ids = torch.multinomial(s1_probs.view(-1, self.s1_vocab_size), 1).view(s1_ids.shape)
  262. sibling_embed = self.embedding.emb_s1(sample_s1_ids)
  263. x2 = self.dep_layer(x, sibling_embed, key_padding_mask=padding_mask) # Dependency Aware Layer: Condition on s1 embeddings
  264. s2_logits = self.head.cond_forward(x2)
  265. return s1_logits, s2_logits
  266. def decode_s1(self, s1_ids, s2_ids, stamp=None, padding_mask=None):
  267. """
  268. Decodes only the s1 tokens.
  269. This method performs a forward pass to predict only s1 tokens. It returns the s1 logits
  270. and the context representation from the Transformer, which can be used for subsequent s2 decoding.
  271. Args:
  272. s1_ids (torch.Tensor): Input tensor of s1 token IDs. Shape: [batch_size, seq_len]
  273. s2_ids (torch.Tensor): Input tensor of s2 token IDs. Shape: [batch_size, seq_len]
  274. stamp (torch.Tensor, optional): Temporal stamp tensor. Shape: [batch_size, seq_len]. Defaults to None.
  275. padding_mask (torch.Tensor, optional): Mask for padding tokens. Shape: [batch_size, seq_len]. Defaults to None.
  276. Returns:
  277. Tuple[torch.Tensor, torch.Tensor]:
  278. - s1 logits: Logits for s1 token predictions. Shape: [batch_size, seq_len, s1_vocab_size]
  279. - context: Context representation from the Transformer. Shape: [batch_size, seq_len, d_model]
  280. """
  281. x = self.embedding([s1_ids, s2_ids])
  282. if stamp is not None:
  283. time_embedding = self.time_emb(stamp)
  284. x = x + time_embedding
  285. x = self.token_drop(x)
  286. for layer in self.transformer:
  287. x = layer(x, key_padding_mask=padding_mask)
  288. x = self.norm(x)
  289. s1_logits = self.head(x)
  290. return s1_logits, x
  291. def decode_s2(self, context, s1_ids, padding_mask=None):
  292. """
  293. Decodes the s2 tokens, conditioned on the context and s1 tokens.
  294. This method decodes s2 tokens based on a pre-computed context representation (typically from `decode_s1`)
  295. and the s1 token IDs. It uses the dependency-aware layer and the conditional s2 head to predict s2 tokens.
  296. Args:
  297. context (torch.Tensor): Context representation from the transformer (output of decode_s1).
  298. Shape: [batch_size, seq_len, d_model]
  299. s1_ids (torch.torch.Tensor): Input tensor of s1 token IDs. Shape: [batch_size, seq_len]
  300. padding_mask (torch.Tensor, optional): Mask for padding tokens. Shape: [batch_size, seq_len]. Defaults to None.
  301. Returns:
  302. torch.Tensor: s2 logits. Shape: [batch_size, seq_len, s2_vocab_size]
  303. """
  304. sibling_embed = self.embedding.emb_s1(s1_ids)
  305. x2 = self.dep_layer(context, sibling_embed, key_padding_mask=padding_mask)
  306. return self.head.cond_forward(x2)
  307. def top_k_top_p_filtering(
  308. logits,
  309. top_k: int = 0,
  310. top_p: float = 1.0,
  311. filter_value: float = -float("Inf"),
  312. min_tokens_to_keep: int = 1,
  313. ):
  314. """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
  315. Args:
  316. logits: logits distribution shape (batch size, vocabulary size)
  317. if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
  318. if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
  319. Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
  320. Make sure we keep at least min_tokens_to_keep per batch example in the output
  321. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
  322. """
  323. if top_k > 0:
  324. top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
  325. # Remove all tokens with a probability less than the last token of the top-k
  326. indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
  327. logits[indices_to_remove] = filter_value
  328. return logits
  329. if top_p < 1.0:
  330. sorted_logits, sorted_indices = torch.sort(logits, descending=True)
  331. cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
  332. # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
  333. sorted_indices_to_remove = cumulative_probs > top_p
  334. if min_tokens_to_keep > 1:
  335. # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
  336. sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
  337. # Shift the indices to the right to keep also the first token above the threshold
  338. sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
  339. sorted_indices_to_remove[..., 0] = 0
  340. # scatter sorted tensors to original indexing
  341. indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
  342. logits[indices_to_remove] = filter_value
  343. return logits
  344. def sample_from_logits(logits, temperature=1.0, top_k=None, top_p=None, sample_logits=True):
  345. logits = logits / temperature
  346. if top_k is not None or top_p is not None:
  347. if top_k > 0 or top_p < 1.0:
  348. logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
  349. probs = F.softmax(logits, dim=-1)
  350. if not sample_logits:
  351. _, x = top_k(probs, k=1, dim=-1)
  352. else:
  353. x = torch.multinomial(probs, num_samples=1)
  354. return x
  355. def auto_regressive_inference(tokenizer, model, x, x_stamp, y_stamp, max_context, pred_len, clip=5, T=1.0, top_k=0, top_p=0.99, sample_count=5, verbose=False):
  356. # 添加调试信息
  357. print("🔍 [auto_regressive_inference] 输入调试:")
  358. print(f" x 形状: {x.shape}")
  359. print(f" x_stamp 形状: {x_stamp.shape}")
  360. print(f" y_stamp 形状: {y_stamp.shape}")
  361. print(f" max_context: {max_context}")
  362. print(f" pred_len: {pred_len}")
  363. with torch.no_grad():
  364. batch_size = x.size(0)
  365. initial_seq_len = x.size(1)
  366. x = torch.clip(x, -clip, clip)
  367. device = x.device
  368. print(f"🔍 复制前 x 形状: {x.shape}")
  369. x = x.unsqueeze(1).repeat(1, sample_count, 1, 1).reshape(-1, x.size(1), x.size(2)).to(device)
  370. x_stamp = x_stamp.unsqueeze(1).repeat(1, sample_count, 1, 1).reshape(-1, x_stamp.size(1), x_stamp.size(2)).to(
  371. device)
  372. y_stamp = y_stamp.unsqueeze(1).repeat(1, sample_count, 1, 1).reshape(-1, y_stamp.size(1), y_stamp.size(2)).to(
  373. device)
  374. print(f"🔍 复制后 x 形状: {x.shape}")
  375. print(f"🔍 复制后 x_stamp 形状: {x_stamp.shape}")
  376. print(f"🔍 复制后 y_stamp 形状: {y_stamp.shape}")
  377. # 这里添加tokenizer编码的调试
  378. print("🔍 开始tokenizer编码...")
  379. x_token = tokenizer.encode(x, half=True)
  380. print(f"🔍 tokenizer编码完成,x_token 类型: {type(x_token)}")
  381. if isinstance(x_token, (list, tuple)):
  382. print(f"🔍 x_token 长度: {len(x_token)}")
  383. for i, token in enumerate(x_token):
  384. print(f"🔍 x_token[{i}] 形状: {token.shape}")
  385. else:
  386. print(f"🔍 x_token 形状: {x_token.shape}")
  387. # 继续原有代码...
  388. with torch.no_grad():
  389. batch_size = x.size(0)
  390. initial_seq_len = x.size(1)
  391. x = torch.clip(x, -clip, clip)
  392. device = x.device
  393. x = x.unsqueeze(1).repeat(1, sample_count, 1, 1).reshape(-1, x.size(1), x.size(2)).to(device)
  394. x_stamp = x_stamp.unsqueeze(1).repeat(1, sample_count, 1, 1).reshape(-1, x_stamp.size(1), x_stamp.size(2)).to(device)
  395. y_stamp = y_stamp.unsqueeze(1).repeat(1, sample_count, 1, 1).reshape(-1, y_stamp.size(1), y_stamp.size(2)).to(device)
  396. x_token = tokenizer.encode(x, half=True)
  397. def get_dynamic_stamp(x_stamp, y_stamp, current_seq_len, pred_step):
  398. if current_seq_len <= max_context - pred_step:
  399. return torch.cat([x_stamp, y_stamp[:, :pred_step, :]], dim=1)
  400. else:
  401. start_idx = max_context - pred_step
  402. return torch.cat([x_stamp[:, -start_idx:, :], y_stamp[:, :pred_step, :]], dim=1)
  403. if verbose:
  404. ran = trange
  405. else:
  406. ran = range
  407. for i in ran(pred_len):
  408. current_seq_len = initial_seq_len + i
  409. if current_seq_len <= max_context:
  410. input_tokens = x_token
  411. else:
  412. input_tokens = [t[:, -max_context:].contiguous() for t in x_token]
  413. current_stamp = get_dynamic_stamp(x_stamp, y_stamp, current_seq_len, i)
  414. s1_logits, context = model.decode_s1(input_tokens[0], input_tokens[1], current_stamp)
  415. s1_logits = s1_logits[:, -1, :]
  416. sample_pre = sample_from_logits(s1_logits, temperature=T, top_k=top_k, top_p=top_p, sample_logits=True)
  417. s2_logits = model.decode_s2(context, sample_pre)
  418. s2_logits = s2_logits[:, -1, :]
  419. sample_post = sample_from_logits(s2_logits, temperature=T, top_k=top_k, top_p=top_p, sample_logits=True)
  420. x_token[0] = torch.cat([x_token[0], sample_pre], dim=1)
  421. x_token[1] = torch.cat([x_token[1], sample_post], dim=1)
  422. torch.cuda.empty_cache()
  423. input_tokens = [t[:, -max_context:].contiguous() for t in x_token]
  424. z = tokenizer.decode(input_tokens, half=True)
  425. z = z.reshape(batch_size, sample_count, z.size(1), z.size(2))
  426. preds = z.cpu().numpy()
  427. preds = np.mean(preds, axis=1)
  428. return preds
  429. def calc_time_stamps(x_timestamp):
  430. time_df = pd.DataFrame()
  431. time_df['minute'] = x_timestamp.dt.minute
  432. time_df['hour'] = x_timestamp.dt.hour
  433. time_df['weekday'] = x_timestamp.dt.weekday
  434. time_df['day'] = x_timestamp.dt.day
  435. time_df['month'] = x_timestamp.dt.month
  436. return time_df
  437. class KronosPredictor:
  438. def __init__(self, model, tokenizer, device="cuda:0", max_context=512, clip=5):
  439. self.tokenizer = tokenizer
  440. self.model = model
  441. self.max_context = max_context
  442. self.clip = clip
  443. self.price_cols = ['open', 'high', 'low', 'close']
  444. self.vol_col = 'volume'
  445. self.amt_vol = 'amount'
  446. self.time_cols = ['minute', 'hour', 'weekday', 'day', 'month']
  447. self.device = device
  448. self.tokenizer = self.tokenizer.to(self.device)
  449. self.model = self.model.to(self.device)
  450. def generate(self, x, x_stamp, y_stamp, pred_len, T, top_k, top_p, sample_count, verbose):
  451. x_tensor = torch.from_numpy(np.array(x).astype(np.float32)).to(self.device)
  452. x_stamp_tensor = torch.from_numpy(np.array(x_stamp).astype(np.float32)).to(self.device)
  453. y_stamp_tensor = torch.from_numpy(np.array(y_stamp).astype(np.float32)).to(self.device)
  454. preds = auto_regressive_inference(self.tokenizer, self.model, x_tensor, x_stamp_tensor, y_stamp_tensor, self.max_context, pred_len,
  455. self.clip, T, top_k, top_p, sample_count, verbose)
  456. preds = preds[:, -pred_len:, :]
  457. return preds
  458. def predict(self, df, x_timestamp, y_timestamp, pred_len, T=1.0, top_k=0, top_p=0.9, sample_count=1, verbose=True):
  459. # 添加详细的调试信息
  460. print("🔍 [KronosPredictor.predict] 输入调试:")
  461. print(f" df 类型: {type(df)}")
  462. print(f" df 形状: {df.shape}")
  463. print(f" df 列名: {df.columns.tolist()}")
  464. print(f" x_timestamp 长度: {len(x_timestamp)}")
  465. print(f" y_timestamp 长度: {len(y_timestamp)}")
  466. if not isinstance(df, pd.DataFrame):
  467. raise ValueError("Input must be a pandas DataFrame.")
  468. if not all(col in df.columns for col in self.price_cols):
  469. raise ValueError(f"Price columns {self.price_cols} not found in DataFrame.")
  470. df = df.copy()
  471. if self.vol_col not in df.columns:
  472. df[self.vol_col] = 0.0 # Fill missing volume with zeros
  473. df[self.amt_vol] = 0.0 # Fill missing amount with zeros
  474. if self.amt_vol not in df.columns and self.vol_col in df.columns:
  475. df[self.amt_vol] = df[self.vol_col] * df[self.price_cols].mean(axis=1)
  476. print(f"🔍 处理后 df 形状: {df.shape}")
  477. print(f"🔍 处理后 df 列名: {df.columns.tolist()}")
  478. if df[self.price_cols + [self.vol_col, self.amt_vol]].isnull().values.any():
  479. raise ValueError("Input DataFrame contains NaN values in price or volume columns.")
  480. x_time_df = calc_time_stamps(x_timestamp)
  481. y_time_df = calc_time_stamps(y_timestamp)
  482. x = df[self.price_cols + [self.vol_col, self.amt_vol]].values.astype(np.float32)
  483. x_stamp = x_time_df.values.astype(np.float32)
  484. y_stamp = y_time_df.values.astype(np.float32)
  485. print(f"🔍 最终 x 形状: {x.shape}")
  486. print(f"🔍 最终 x_stamp 形状: {x_stamp.shape}")
  487. print(f"🔍 最终 y_stamp 形状: {y_stamp.shape}")
  488. x_mean, x_std = np.mean(x, axis=0), np.std(x, axis=0)
  489. x = (x - x_mean) / (x_std + 1e-5)
  490. x = np.clip(x, -self.clip, self.clip)
  491. x = x[np.newaxis, :]
  492. x_stamp = x_stamp[np.newaxis, :]
  493. y_stamp = y_stamp[np.newaxis, :]
  494. print(f"🔍 标准化后 x 形状: {x.shape}")
  495. if not isinstance(df, pd.DataFrame):
  496. raise ValueError("Input must be a pandas DataFrame.")
  497. if not all(col in df.columns for col in self.price_cols):
  498. raise ValueError(f"Price columns {self.price_cols} not found in DataFrame.")
  499. df = df.copy()
  500. if self.vol_col not in df.columns:
  501. df[self.vol_col] = 0.0 # Fill missing volume with zeros
  502. df[self.amt_vol] = 0.0 # Fill missing amount with zeros
  503. if self.amt_vol not in df.columns and self.vol_col in df.columns:
  504. df[self.amt_vol] = df[self.vol_col] * df[self.price_cols].mean(axis=1)
  505. if df[self.price_cols + [self.vol_col, self.amt_vol]].isnull().values.any():
  506. raise ValueError("Input DataFrame contains NaN values in price or volume columns.")
  507. x_time_df = calc_time_stamps(x_timestamp)
  508. y_time_df = calc_time_stamps(y_timestamp)
  509. x = df[self.price_cols + [self.vol_col, self.amt_vol]].values.astype(np.float32)
  510. x_stamp = x_time_df.values.astype(np.float32)
  511. y_stamp = y_time_df.values.astype(np.float32)
  512. x_mean, x_std = np.mean(x, axis=0), np.std(x, axis=0)
  513. x = (x - x_mean) / (x_std + 1e-5)
  514. x = np.clip(x, -self.clip, self.clip)
  515. x = x[np.newaxis, :]
  516. x_stamp = x_stamp[np.newaxis, :]
  517. y_stamp = y_stamp[np.newaxis, :]
  518. preds = self.generate(x, x_stamp, y_stamp, pred_len, T, top_k, top_p, sample_count, verbose)
  519. preds = preds.squeeze(0)
  520. preds = preds * (x_std + 1e-5) + x_mean
  521. pred_df = pd.DataFrame(preds, columns=self.price_cols + [self.vol_col, self.amt_vol], index=y_timestamp)
  522. return pred_df
  523. def predict_batch(self, df_list, x_timestamp_list, y_timestamp_list, pred_len, T=1.0, top_k=0, top_p=0.9, sample_count=1, verbose=True):
  524. """
  525. Perform parallel (batch) prediction on multiple time series. All series must have the same historical length and prediction length (pred_len).
  526. Args:
  527. df_list (List[pd.DataFrame]): List of input DataFrames, each containing price columns and optional volume/amount columns.
  528. x_timestamp_list (List[pd.DatetimeIndex or Series]): List of timestamps corresponding to historical data, length should match the number of rows in each DataFrame.
  529. y_timestamp_list (List[pd.DatetimeIndex or Series]): List of future prediction timestamps, length should equal pred_len.
  530. pred_len (int): Number of prediction steps.
  531. T (float): Sampling temperature.
  532. top_k (int): Top-k filtering threshold.
  533. top_p (float): Top-p (nucleus sampling) threshold.
  534. sample_count (int): Number of parallel samples per series, automatically averaged internally.
  535. verbose (bool): Whether to display autoregressive progress.
  536. Returns:
  537. List[pd.DataFrame]: List of prediction results in the same order as input, each DataFrame contains
  538. `open, high, low, close, volume, amount` columns, indexed by corresponding `y_timestamp`.
  539. """
  540. # Basic validation
  541. if not isinstance(df_list, (list, tuple)) or not isinstance(x_timestamp_list, (list, tuple)) or not isinstance(y_timestamp_list, (list, tuple)):
  542. raise ValueError("df_list, x_timestamp_list, y_timestamp_list must be list or tuple types.")
  543. if not (len(df_list) == len(x_timestamp_list) == len(y_timestamp_list)):
  544. raise ValueError("df_list, x_timestamp_list, y_timestamp_list must have consistent lengths.")
  545. num_series = len(df_list)
  546. x_list = []
  547. x_stamp_list = []
  548. y_stamp_list = []
  549. means = []
  550. stds = []
  551. seq_lens = []
  552. y_lens = []
  553. for i in range(num_series):
  554. df = df_list[i]
  555. if not isinstance(df, pd.DataFrame):
  556. raise ValueError(f"Input at index {i} is not a pandas DataFrame.")
  557. if not all(col in df.columns for col in self.price_cols):
  558. raise ValueError(f"DataFrame at index {i} is missing price columns {self.price_cols}.")
  559. df = df.copy()
  560. if self.vol_col not in df.columns:
  561. df[self.vol_col] = 0.0
  562. df[self.amt_vol] = 0.0
  563. if self.amt_vol not in df.columns and self.vol_col in df.columns:
  564. df[self.amt_vol] = df[self.vol_col] * df[self.price_cols].mean(axis=1)
  565. if df[self.price_cols + [self.vol_col, self.amt_vol]].isnull().values.any():
  566. raise ValueError(f"DataFrame at index {i} contains NaN values in price or volume columns.")
  567. x_timestamp = x_timestamp_list[i]
  568. y_timestamp = y_timestamp_list[i]
  569. x_time_df = calc_time_stamps(x_timestamp)
  570. y_time_df = calc_time_stamps(y_timestamp)
  571. x = df[self.price_cols + [self.vol_col, self.amt_vol]].values.astype(np.float32)
  572. x_stamp = x_time_df.values.astype(np.float32)
  573. y_stamp = y_time_df.values.astype(np.float32)
  574. if x.shape[0] != x_stamp.shape[0]:
  575. raise ValueError(f"Inconsistent lengths at index {i}: x has {x.shape[0]} vs x_stamp has {x_stamp.shape[0]}.")
  576. if y_stamp.shape[0] != pred_len:
  577. raise ValueError(f"y_timestamp length at index {i} should equal pred_len={pred_len}, got {y_stamp.shape[0]}.")
  578. x_mean, x_std = np.mean(x, axis=0), np.std(x, axis=0)
  579. x_norm = (x - x_mean) / (x_std + 1e-5)
  580. x_norm = np.clip(x_norm, -self.clip, self.clip)
  581. x_list.append(x_norm)
  582. x_stamp_list.append(x_stamp)
  583. y_stamp_list.append(y_stamp)
  584. means.append(x_mean)
  585. stds.append(x_std)
  586. seq_lens.append(x_norm.shape[0])
  587. y_lens.append(y_stamp.shape[0])
  588. # Require all series to have consistent historical and prediction lengths for batch processing
  589. if len(set(seq_lens)) != 1:
  590. raise ValueError(f"Parallel prediction requires all series to have consistent historical lengths, got: {seq_lens}")
  591. if len(set(y_lens)) != 1:
  592. raise ValueError(f"Parallel prediction requires all series to have consistent prediction lengths, got: {y_lens}")
  593. x_batch = np.stack(x_list, axis=0).astype(np.float32) # (B, seq_len, feat)
  594. x_stamp_batch = np.stack(x_stamp_list, axis=0).astype(np.float32) # (B, seq_len, time_feat)
  595. y_stamp_batch = np.stack(y_stamp_list, axis=0).astype(np.float32) # (B, pred_len, time_feat)
  596. preds = self.generate(x_batch, x_stamp_batch, y_stamp_batch, pred_len, T, top_k, top_p, sample_count, verbose)
  597. # preds: (B, pred_len, feat)
  598. pred_dfs = []
  599. for i in range(num_series):
  600. preds_i = preds[i] * (stds[i] + 1e-5) + means[i]
  601. pred_df = pd.DataFrame(preds_i, columns=self.price_cols + [self.vol_col, self.amt_vol], index=y_timestamp_list[i])
  602. pred_dfs.append(pred_df)
  603. return pred_dfs