tx · FPJLJBHCFpWbureEe13BzNSfQbZFGFP6nVQzTvxGcaVB 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY: -0.01000000 Waves 2024.03.20 12:07 [3026144] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves
{ "type": 13, "id": "FPJLJBHCFpWbureEe13BzNSfQbZFGFP6nVQzTvxGcaVB", "fee": 1000000, "feeAssetId": null, "timestamp": 1710925713061, "version": 2, "chainId": 84, "sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY", "senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc", "proofs": [ "KwxhceLQVShJHbQbaaeqC6cwTwcGv1kk2VcykM2Bmddk1XSBDXySNodfnZUgg3eALj2npSnt7ZRwFx7hXDmUkj8" ], "script": "base64:AAIFAAAAAAAAAAgIAhIECgIBAQAAAAgAAAAADWxheWVyMVdlaWdodHMJAARMAAAAAgkABEwAAAACAAAAAAAASAnZCQAETAAAAAIA//////+zrIUFAAAAA25pbAkABEwAAAACCQAETAAAAAIAAAAAAABfA54JAARMAAAAAgD//////58MgwUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMUJpYXNlcwkABEwAAAACAP//////2YbeCQAETAAAAAIAAAAAAAAzuDoFAAAAA25pbAAAAAANbGF5ZXIyV2VpZ2h0cwkABEwAAAACCQAETAAAAAIAAAAAAAB7v3AJAARMAAAAAgD//////4ylsAUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMkJpYXNlcwkABEwAAAACAAAAAAAANUR+BQAAAANuaWwBAAAAB3NpZ21vaWQAAAACAAAAAXoAAAALZGVidWdQcmVmaXgEAAAAAWUAAAAAAAApekkEAAAABGJhc2UAAAAAAAAPQkAEAAAACW5lZ2F0aXZlWgkAAGgAAAACAP//////////BQAAAAF6BAAAAAdleHBQYXJ0CQAAawAAAAMFAAAAAWUFAAAACW5lZ2F0aXZlWgUAAAAEYmFzZQQAAAAIc2lnVmFsdWUJAABrAAAAAwUAAAAEYmFzZQUAAAAEYmFzZQkAAGQAAAACBQAAAARiYXNlBQAAAAdleHBQYXJ0CQAFFAAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAIJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAACW5lZ2F0aXZlWgUAAAAJbmVnYXRpdmVaCQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACCQABLAAAAAIFAAAAC2RlYnVnUHJlZml4AgAAAAdleHBQYXJ0BQAAAAdleHBQYXJ0BQAAAANuaWwFAAAACHNpZ1ZhbHVlAQAAAApkb3RQcm9kdWN0AAAAAgAAAAFhAAAAAWIEAAAACHByb2R1Y3QwCQAAawAAAAMJAAGRAAAAAgUAAAABYQAAAAAAAAAAAAkAAZEAAAACBQAAAAFiAAAAAAAAAAAAAAAAAAAAD0JABAAAAAhwcm9kdWN0MQkAAGsAAAADCQABkQAAAAIFAAAAAWEAAAAAAAAAAAEJAAGRAAAAAgUAAAABYgAAAAAAAAAAAQAAAAAAAA9CQAkAAGQAAAACBQAAAAhwcm9kdWN0MAUAAAAIcHJvZHVjdDEBAAAAC2ZvcndhcmRQYXNzAAAABAAAAAVpbnB1dAAAAAd3ZWlnaHRzAAAABmJpYXNlcwAAAAVsYXllcgQAAAAEc3VtMAkAAGQAAAACCQEAAAAKZG90UHJvZHVjdAAAAAIFAAAABWlucHV0CQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAJAAGRAAAAAgUAAAAGYmlhc2VzAAAAAAAAAAAABAAAAARzdW0xCQAAZAAAAAIJAQAAAApkb3RQcm9kdWN0AAAAAgUAAAAFaW5wdXQJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQkAAZEAAAACBQAAAAZiaWFzZXMAAAAAAAAAAAEEAAAACyR0MDEwMDUxMDYyCQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTAJAAEsAAAAAgUAAAAFbGF5ZXICAAAABEwxTjEEAAAADXNpZ21vaWREZWJ1ZzAIBQAAAAskdDAxMDA1MTA2MgAAAAJfMQQAAAAEc2lnMAgFAAAACyR0MDEwMDUxMDYyAAAAAl8yBAAAAAskdDAxMDY3MTEyNAkBAAAAB3NpZ21vaWQAAAACBQAAAARzdW0xCQABLAAAAAIFAAAABWxheWVyAgAAAARMMU4yBAAAAA1zaWdtb2lkRGVidWcxCAUAAAALJHQwMTA2NzExMjQAAAACXzEEAAAABHNpZzEIBQAAAAskdDAxMDY3MTEyNAAAAAJfMgkABRQAAAACCQAETAAAAAIFAAAABHNpZzAJAARMAAAAAgUAAAAEc2lnMQkABEwAAAACBQAAAARzdW0wCQAETAAAAAIFAAAABHN1bTEFAAAAA25pbAkABE4AAAACBQAAAA1zaWdtb2lkRGVidWcwBQAAAA1zaWdtb2lkRGVidWcxAQAAABB4b3JOZXVyYWxOZXR3b3JrAAAAAgAAAAZpbnB1dDEAAAAGaW5wdXQyBAAAAAVpbnB1dAkABEwAAAACBQAAAAZpbnB1dDEJAARMAAAAAgUAAAAGaW5wdXQyBQAAAANuaWwEAAAACyR0MDEzMDIxMzk0CQEAAAALZm9yd2FyZFBhc3MAAAAEBQAAAAVpbnB1dAUAAAANbGF5ZXIxV2VpZ2h0cwUAAAAMbGF5ZXIxQmlhc2VzAgAAAAJITAQAAAARaGlkZGVuTGF5ZXJPdXRwdXQIBQAAAAskdDAxMzAyMTM5NAAAAAJfMQQAAAALaGlkZGVuRGVidWcIBQAAAAskdDAxMzAyMTM5NAAAAAJfMgQAAAALJHQwMTM5OTE1MzQJAQAAAAdzaWdtb2lkAAAAAgkAAGQAAAACCQEAAAAKZG90UHJvZHVjdAAAAAIJAARMAAAAAgkAAZEAAAACBQAAABFoaWRkZW5MYXllck91dHB1dAAAAAAAAAAAAAkABEwAAAACCQABkQAAAAIFAAAAEWhpZGRlbkxheWVyT3V0cHV0AAAAAAAAAAABBQAAAANuaWwJAAGRAAAAAgUAAAANbGF5ZXIyV2VpZ2h0cwAAAAAAAAAAAAkAAZEAAAACBQAAAAxsYXllcjJCaWFzZXMAAAAAAAAAAAACAAAAAk9MBAAAAAtvdXRwdXREZWJ1ZwgFAAAACyR0MDEzOTkxNTM0AAAAAl8xBAAAAAZvdXRwdXQIBQAAAAskdDAxMzk5MTUzNAAAAAJfMgkABRQAAAACCQAETAAAAAIFAAAABm91dHB1dAkABEwAAAACCQAAZAAAAAIJAQAAAApkb3RQcm9kdWN0AAAAAgkABEwAAAACCQABkQAAAAIFAAAAEWhpZGRlbkxheWVyT3V0cHV0AAAAAAAAAAAACQAETAAAAAIJAAGRAAAAAgUAAAARaGlkZGVuTGF5ZXJPdXRwdXQAAAAAAAAAAAEFAAAAA25pbAkAAZEAAAACBQAAAA1sYXllcjJXZWlnaHRzAAAAAAAAAAAACQABkQAAAAIFAAAADGxheWVyMkJpYXNlcwAAAAAAAAAAAAkABEwAAAACCQABkQAAAAIFAAAAEWhpZGRlbkxheWVyT3V0cHV0AAAAAAAAAAACCQAETAAAAAIJAAGRAAAAAgUAAAARaGlkZGVuTGF5ZXJPdXRwdXQAAAAAAAAAAAMFAAAAA25pbAkABE4AAAACBQAAAAtoaWRkZW5EZWJ1ZwUAAAALb3V0cHV0RGVidWcAAAABAAAAAWkBAAAAB3ByZWRpY3QAAAACAAAABmlucHV0MQAAAAZpbnB1dDIEAAAADHNjYWxlZElucHV0MQMJAAAAAAAAAgUAAAAGaW5wdXQxAAAAAAAAAAABAAAAAAAAD0JAAAAAAAAAAAAABAAAAAxzY2FsZWRJbnB1dDIDCQAAAAAAAAIFAAAABmlucHV0MgAAAAAAAAAAAQAAAAAAAA9CQAAAAAAAAAAAAAQAAAALJHQwMTkxOTIwMDAJAQAAABB4b3JOZXVyYWxOZXR3b3JrAAAAAgUAAAAMc2NhbGVkSW5wdXQxBQAAAAxzY2FsZWRJbnB1dDIEAAAADm5ldHdvcmtPdXRwdXRzCAUAAAALJHQwMTkxOTIwMDAAAAACXzEEAAAADGRlYnVnRW50cmllcwgFAAAACyR0MDE5MTkyMDAwAAAAAl8yCQAETgAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAICAAAABnJlc3VsdAkAAZEAAAACBQAAAA5uZXR3b3JrT3V0cHV0cwAAAAAAAAAAAAkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgIAAAAOb3V0cHV0TGF5ZXJTdW0JAAGRAAAAAgUAAAAObmV0d29ya091dHB1dHMAAAAAAAAAAAEJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAICAAAAFWhpZGRlbkxheWVyT3V0cHV0MVN1bQkAAZEAAAACBQAAAA5uZXR3b3JrT3V0cHV0cwAAAAAAAAAAAgkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgIAAAAVaGlkZGVuTGF5ZXJPdXRwdXQyU3VtCQABkQAAAAIFAAAADm5ldHdvcmtPdXRwdXRzAAAAAAAAAAADBQAAAANuaWwFAAAADGRlYnVnRW50cmllcwAAAAAyd09+", "height": 3026144, "applicationStatus": "succeeded", "spentComplexity": 0 } View: original | compacted Prev: Dt43XLUUf8BCJnkLcq3JC7kCQ4a6N3PTGwv5jeCvkDY2 Next: 2M6HVceMCCSxMt5DCypi4Uye6KUcaMYd3fZxBfo1itFh Diff:
Old | New | Differences | |
---|---|---|---|
9 | 9 | ||
10 | 10 | let layer2Biases = [3490942] | |
11 | 11 | ||
12 | - | func sigmoid (z) = { | |
12 | + | func sigmoid (z,debugPrefix) = { | |
13 | 13 | let e = 2718281 | |
14 | 14 | let base = 1000000 | |
15 | 15 | let negativeZ = (-1 * z) | |
16 | 16 | let expPart = fraction(e, negativeZ, base) | |
17 | - | fraction(base, base, (base + expPart)) | |
17 | + | let sigValue = fraction(base, base, (base + expPart)) | |
18 | + | $Tuple2([IntegerEntry((debugPrefix + "negativeZ"), negativeZ), IntegerEntry((debugPrefix + "expPart"), expPart)], sigValue) | |
18 | 19 | } | |
19 | 20 | ||
20 | 21 | ||
25 | 26 | } | |
26 | 27 | ||
27 | 28 | ||
28 | - | func forwardPass (input,weights,biases) = { | |
29 | + | func forwardPass (input,weights,biases,layer) = { | |
29 | 30 | let sum0 = (dotProduct(input, weights[0]) + biases[0]) | |
30 | 31 | let sum1 = (dotProduct(input, weights[1]) + biases[1]) | |
31 | - | let sig0 = sigmoid(sum0) | |
32 | - | let sig1 = sigmoid(sum1) | |
33 | - | [sig0, sig1, sum0, sum1] | |
32 | + | let $t010051062 = sigmoid(sum0, (layer + "L1N1")) | |
33 | + | let sigmoidDebug0 = $t010051062._1 | |
34 | + | let sig0 = $t010051062._2 | |
35 | + | let $t010671124 = sigmoid(sum1, (layer + "L1N2")) | |
36 | + | let sigmoidDebug1 = $t010671124._1 | |
37 | + | let sig1 = $t010671124._2 | |
38 | + | $Tuple2([sig0, sig1, sum0, sum1], (sigmoidDebug0 ++ sigmoidDebug1)) | |
34 | 39 | } | |
35 | 40 | ||
36 | 41 | ||
37 | 42 | func xorNeuralNetwork (input1,input2) = { | |
38 | 43 | let input = [input1, input2] | |
39 | - | let hiddenLayerOutput = forwardPass(input, layer1Weights, layer1Biases) | |
40 | - | let outputLayerSum = (dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]) | |
41 | - | let output = sigmoid(outputLayerSum) | |
42 | - | [output, outputLayerSum, hiddenLayerOutput[2], hiddenLayerOutput[3]] | |
44 | + | let $t013021394 = forwardPass(input, layer1Weights, layer1Biases, "HL") | |
45 | + | let hiddenLayerOutput = $t013021394._1 | |
46 | + | let hiddenDebug = $t013021394._2 | |
47 | + | let $t013991534 = sigmoid((dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), "OL") | |
48 | + | let outputDebug = $t013991534._1 | |
49 | + | let output = $t013991534._2 | |
50 | + | $Tuple2([output, (dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), hiddenLayerOutput[2], hiddenLayerOutput[3]], (hiddenDebug ++ outputDebug)) | |
43 | 51 | } | |
44 | 52 | ||
45 | 53 | ||
51 | 59 | let scaledInput2 = if ((input2 == 1)) | |
52 | 60 | then 1000000 | |
53 | 61 | else 0 | |
54 | - | let networkOutputs = xorNeuralNetwork(scaledInput1, scaledInput2) | |
55 | - | let result = networkOutputs[0] | |
56 | - | let outputLayerSum = networkOutputs[1] | |
57 | - | let hiddenLayerOutput1Sum = networkOutputs[2] | |
58 | - | let hiddenLayerOutput2Sum = networkOutputs[3] | |
59 | - | [IntegerEntry("result", result), IntegerEntry("outputLayerSum", outputLayerSum), IntegerEntry("hiddenLayerOutput1Sum", hiddenLayerOutput1Sum), IntegerEntry("hiddenLayerOutput2Sum", hiddenLayerOutput2Sum)] | |
62 | + | let $t019192000 = xorNeuralNetwork(scaledInput1, scaledInput2) | |
63 | + | let networkOutputs = $t019192000._1 | |
64 | + | let debugEntries = $t019192000._2 | |
65 | + | ([IntegerEntry("result", networkOutputs[0]), IntegerEntry("outputLayerSum", networkOutputs[1]), IntegerEntry("hiddenLayerOutput1Sum", networkOutputs[2]), IntegerEntry("hiddenLayerOutput2Sum", networkOutputs[3])] ++ debugEntries) | |
60 | 66 | } | |
61 | 67 | ||
62 | 68 |
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | 4 | let layer1Weights = [[4721113, -5002107], [6226846, -6353789]] | |
5 | 5 | ||
6 | 6 | let layer1Biases = [-2521378, 3389498] | |
7 | 7 | ||
8 | 8 | let layer2Weights = [[8109936, -7559760]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [3490942] | |
11 | 11 | ||
12 | - | func sigmoid (z) = { | |
12 | + | func sigmoid (z,debugPrefix) = { | |
13 | 13 | let e = 2718281 | |
14 | 14 | let base = 1000000 | |
15 | 15 | let negativeZ = (-1 * z) | |
16 | 16 | let expPart = fraction(e, negativeZ, base) | |
17 | - | fraction(base, base, (base + expPart)) | |
17 | + | let sigValue = fraction(base, base, (base + expPart)) | |
18 | + | $Tuple2([IntegerEntry((debugPrefix + "negativeZ"), negativeZ), IntegerEntry((debugPrefix + "expPart"), expPart)], sigValue) | |
18 | 19 | } | |
19 | 20 | ||
20 | 21 | ||
21 | 22 | func dotProduct (a,b) = { | |
22 | 23 | let product0 = fraction(a[0], b[0], 1000000) | |
23 | 24 | let product1 = fraction(a[1], b[1], 1000000) | |
24 | 25 | (product0 + product1) | |
25 | 26 | } | |
26 | 27 | ||
27 | 28 | ||
28 | - | func forwardPass (input,weights,biases) = { | |
29 | + | func forwardPass (input,weights,biases,layer) = { | |
29 | 30 | let sum0 = (dotProduct(input, weights[0]) + biases[0]) | |
30 | 31 | let sum1 = (dotProduct(input, weights[1]) + biases[1]) | |
31 | - | let sig0 = sigmoid(sum0) | |
32 | - | let sig1 = sigmoid(sum1) | |
33 | - | [sig0, sig1, sum0, sum1] | |
32 | + | let $t010051062 = sigmoid(sum0, (layer + "L1N1")) | |
33 | + | let sigmoidDebug0 = $t010051062._1 | |
34 | + | let sig0 = $t010051062._2 | |
35 | + | let $t010671124 = sigmoid(sum1, (layer + "L1N2")) | |
36 | + | let sigmoidDebug1 = $t010671124._1 | |
37 | + | let sig1 = $t010671124._2 | |
38 | + | $Tuple2([sig0, sig1, sum0, sum1], (sigmoidDebug0 ++ sigmoidDebug1)) | |
34 | 39 | } | |
35 | 40 | ||
36 | 41 | ||
37 | 42 | func xorNeuralNetwork (input1,input2) = { | |
38 | 43 | let input = [input1, input2] | |
39 | - | let hiddenLayerOutput = forwardPass(input, layer1Weights, layer1Biases) | |
40 | - | let outputLayerSum = (dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]) | |
41 | - | let output = sigmoid(outputLayerSum) | |
42 | - | [output, outputLayerSum, hiddenLayerOutput[2], hiddenLayerOutput[3]] | |
44 | + | let $t013021394 = forwardPass(input, layer1Weights, layer1Biases, "HL") | |
45 | + | let hiddenLayerOutput = $t013021394._1 | |
46 | + | let hiddenDebug = $t013021394._2 | |
47 | + | let $t013991534 = sigmoid((dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), "OL") | |
48 | + | let outputDebug = $t013991534._1 | |
49 | + | let output = $t013991534._2 | |
50 | + | $Tuple2([output, (dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), hiddenLayerOutput[2], hiddenLayerOutput[3]], (hiddenDebug ++ outputDebug)) | |
43 | 51 | } | |
44 | 52 | ||
45 | 53 | ||
46 | 54 | @Callable(i) | |
47 | 55 | func predict (input1,input2) = { | |
48 | 56 | let scaledInput1 = if ((input1 == 1)) | |
49 | 57 | then 1000000 | |
50 | 58 | else 0 | |
51 | 59 | let scaledInput2 = if ((input2 == 1)) | |
52 | 60 | then 1000000 | |
53 | 61 | else 0 | |
54 | - | let networkOutputs = xorNeuralNetwork(scaledInput1, scaledInput2) | |
55 | - | let result = networkOutputs[0] | |
56 | - | let outputLayerSum = networkOutputs[1] | |
57 | - | let hiddenLayerOutput1Sum = networkOutputs[2] | |
58 | - | let hiddenLayerOutput2Sum = networkOutputs[3] | |
59 | - | [IntegerEntry("result", result), IntegerEntry("outputLayerSum", outputLayerSum), IntegerEntry("hiddenLayerOutput1Sum", hiddenLayerOutput1Sum), IntegerEntry("hiddenLayerOutput2Sum", hiddenLayerOutput2Sum)] | |
62 | + | let $t019192000 = xorNeuralNetwork(scaledInput1, scaledInput2) | |
63 | + | let networkOutputs = $t019192000._1 | |
64 | + | let debugEntries = $t019192000._2 | |
65 | + | ([IntegerEntry("result", networkOutputs[0]), IntegerEntry("outputLayerSum", networkOutputs[1]), IntegerEntry("hiddenLayerOutput1Sum", networkOutputs[2]), IntegerEntry("hiddenLayerOutput2Sum", networkOutputs[3])] ++ debugEntries) | |
60 | 66 | } | |
61 | 67 | ||
62 | 68 |
github/deemru/w8io/169f3d6 27.76 ms ◑