tx · BtfC1G7NQ7m5TCp3zTNFdqmD8r2RcD3LmRuMn73n5p6q

3N9ttyLcRwDo7L4EmJkbS3ZFuQJygivupsL:  -0.00500000 Waves

2023.10.23 17:18 [2811367] invoke 3N9ttyLcRwDo7L4EmJkbS3ZFuQJygivupsL > 3N9tKixzqTYWnEXQxrDQ5pBTGvQd6sFsvmV commitTask()

3N9tKixzqTYWnEXQxrDQ5pBTGvQd6sFsvmV: checked_out_by_92ovWCy1Zf8CSsTLLLssC74m8yn5yPMqVp9fmVacou97_youtube_CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp: true -> null
3N9tKixzqTYWnEXQxrDQ5pBTGvQd6sFsvmV: CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_commit_timestamp_youtube: 1698070704021
3N9tKixzqTYWnEXQxrDQ5pBTGvQd6sFsvmV: CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_commit_height_youtube: 2811367
3N9tKixzqTYWnEXQxrDQ5pBTGvQd6sFsvmV: CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_result_youtube: "The speaker discusses the process of training Bloomberg's model Blumberg GPT, a 50 billion parameter language model trained on a massive dataset of tokens, which includes a notable amount from the financial domain. The speaker explains the autoregressive nature of the model, how it predicts the next word in a sequence, and the use of tokens for flexibility. However, during the training process, issues arose related to gradient norm spikes and a decline in performance. Despite these challenges, after employing various troubleshooting strategies and adjustments, the model managed to perform well on different tasks, specifically excelling in financial tasks. Notwithstanding the success, the speaker questions the need for training large models from scratch and shows an inclination towards using open-source models and testing innovations on a smaller scale before exploring larger models. The conversation concludes with a Q&A session, addressing specific queries about the model building process."
3N9tKixzqTYWnEXQxrDQ5pBTGvQd6sFsvmV: CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_status_youtube: "checked_out" -> "done"

{ "type": 16, "id": "BtfC1G7NQ7m5TCp3zTNFdqmD8r2RcD3LmRuMn73n5p6q", "fee": 500000, "feeAssetId": null, "timestamp": 1698070753523, "version": 2, "chainId": 84, "sender": "3N9ttyLcRwDo7L4EmJkbS3ZFuQJygivupsL", "senderPublicKey": "92ovWCy1Zf8CSsTLLLssC74m8yn5yPMqVp9fmVacou97", "proofs": [ "3b9xpt9zvWKKZ8osyXdrsEmWLvDormYbt3GS8dLs9CA95qLY9qeehREX6aNvgsG8GCJ5KCpMk7E5h87RdMUY2Lz1" ], "dApp": "3N9tKixzqTYWnEXQxrDQ5pBTGvQd6sFsvmV", "payment": [], "call": { "function": "commitTask", "args": [ { "type": "string", "value": "CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp" }, { "type": "string", "value": "The speaker discusses the process of training Bloomberg's model Blumberg GPT, a 50 billion parameter language model trained on a massive dataset of tokens, which includes a notable amount from the financial domain. The speaker explains the autoregressive nature of the model, how it predicts the next word in a sequence, and the use of tokens for flexibility. However, during the training process, issues arose related to gradient norm spikes and a decline in performance. Despite these challenges, after employing various troubleshooting strategies and adjustments, the model managed to perform well on different tasks, specifically excelling in financial tasks. Notwithstanding the success, the speaker questions the need for training large models from scratch and shows an inclination towards using open-source models and testing innovations on a smaller scale before exploring larger models. The conversation concludes with a Q&A session, addressing specific queries about the model building process." } ] }, "height": 2811367, "applicationStatus": "succeeded", "spentComplexity": 67, "stateChanges": { "data": [ { "key": "CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_status_youtube", "type": "string", "value": "done" }, { "key": "CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_result_youtube", "type": "string", "value": "The speaker discusses the process of training Bloomberg's model Blumberg GPT, a 50 billion parameter language model trained on a massive dataset of tokens, which includes a notable amount from the financial domain. The speaker explains the autoregressive nature of the model, how it predicts the next word in a sequence, and the use of tokens for flexibility. However, during the training process, issues arose related to gradient norm spikes and a decline in performance. Despite these challenges, after employing various troubleshooting strategies and adjustments, the model managed to perform well on different tasks, specifically excelling in financial tasks. Notwithstanding the success, the speaker questions the need for training large models from scratch and shows an inclination towards using open-source models and testing innovations on a smaller scale before exploring larger models. The conversation concludes with a Q&A session, addressing specific queries about the model building process." }, { "key": "CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_commit_height_youtube", "type": "integer", "value": 2811367 }, { "key": "CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp_commit_timestamp_youtube", "type": "integer", "value": 1698070704021 }, { "key": "checked_out_by_92ovWCy1Zf8CSsTLLLssC74m8yn5yPMqVp9fmVacou97_youtube_CGuNCdDJJbrzvpKiJKjT5pjey2Vgnqp2cW7BfdGogg8g_D2Eu5TQSVkAwDi7WKia7vFRkw5diyFYkCRhRzsVDmjQp", "value": null } ], "transfers": [], "issues": [], "reissues": [], "burns": [], "sponsorFees": [], "leases": [], "leaseCancels": [], "invokes": [] } }

github/deemru/w8io/c3f4982 
15.90 ms