tx · 6zJ8QRcPZvRhEXXc7Bhy38mZu4RMyVgBXbvcTVw8gMMR 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY: -0.01000000 Waves 2024.04.28 13:14 [3082576] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves
{ "type": 13, "id": "6zJ8QRcPZvRhEXXc7Bhy38mZu4RMyVgBXbvcTVw8gMMR", "fee": 1000000, "feeAssetId": null, "timestamp": 1714299254677, "version": 2, "chainId": 84, "sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY", "senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc", "proofs": [ "49dYRZjTq5Cc5H1eUmvwxtLoCZqxupCxbEKuLqtc6qvNDyPGPw5TEUYhBbXGmPeC9SLtxU7fDwT7BvCqb8DhboE7" ], "script": "base64:AAIFAAAAAAAAAAgIAhIECgIBAQAAAAkAAAAADWxheWVyMVdlaWdodHMJAARMAAAAAgkABEwAAAACAAAAAAAACSmxCQAETAAAAAIAAAAAAAAJKpwFAAAAA25pbAkABEwAAAACCQAETAAAAAIAAAAAAAAGUfUJAARMAAAAAgAAAAAAAAZSLQUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMUJpYXNlcwkABEwAAAACAP///////AwVCQAETAAAAAIA///////2TQoFAAAAA25pbAAAAAANbGF5ZXIyV2VpZ2h0cwkABEwAAAACCQAETAAAAAIAAAAAAAAMtcYJAARMAAAAAgD///////JPigUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMkJpYXNlcwkABEwAAAACAP//////+i8FBQAAAANuaWwBAAAABmNsYW1wWgAAAAIAAAABegAAAAVsaW1pdAMJAABmAAAAAgUAAAABegUAAAAFbGltaXQFAAAABWxpbWl0AwkAAGYAAAACCQEAAAABLQAAAAEFAAAABWxpbWl0BQAAAAF6CQEAAAABLQAAAAEFAAAABWxpbWl0BQAAAAF6AQAAAApleHBfYXBwcm94AAAAAQAAAAF4BAAAAARiYXNlAAAAAAAAD0JABAAAAAdzY2FsZWRYCQAAaQAAAAIFAAAAAXgAAAAAAAAAJxADCQAAZgAAAAIAAAAAAAAAAAAFAAAAAXgJAABpAAAAAgUAAAAEYmFzZQkAAGQAAAACAAAAAAAAAAABCQAAaAAAAAIFAAAABGJhc2UJAQAAAAEtAAAAAQUAAAAHc2NhbGVkWAkAAGQAAAACAAAAAAAAAAABCQAAaAAAAAIFAAAABGJhc2UFAAAAB3NjYWxlZFgBAAAAB3NpZ21vaWQAAAACAAAAAXoAAAALZGVidWdQcmVmaXgEAAAACGNsYW1wZWRaCQEAAAAGY2xhbXBaAAAAAgUAAAABegAAAAAAAAGGoAQAAAAJcG9zaXRpdmVaAwkAAGYAAAACAAAAAAAAAAAABQAAAAF6CQEAAAABLQAAAAEFAAAAAXoFAAAAAXoEAAAACGV4cFZhbHVlCQEAAAAKZXhwX2FwcHJveAAAAAEJAQAAAAEtAAAAAQUAAAAJcG9zaXRpdmVaBAAAAAhzaWdWYWx1ZQkAAGkAAAACAAAAAAAAD0JACQAAZAAAAAIAAAAAAAAPQkAFAAAACGV4cFZhbHVlCQAFFAAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAIJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAACGNsYW1wZWRaBQAAAAhjbGFtcGVkWgkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAJcG9zaXRpdmVaBQAAAAlwb3NpdGl2ZVoJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAIJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAACGV4cFZhbHVlBQAAAAhleHBWYWx1ZQkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAIc2lnVmFsdWUFAAAACHNpZ1ZhbHVlBQAAAANuaWwFAAAACHNpZ1ZhbHVlAQAAABFmb3J3YXJkUGFzc0xheWVyMQAAAAQAAAAFaW5wdXQAAAAHd2VpZ2h0cwAAAAZiaWFzZXMAAAALZGVidWdQcmVmaXgEAAAABHN1bTAJAABkAAAAAgkAAGQAAAACCQAAaAAAAAIJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAAJAAGRAAAAAgkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAAAAAAAAAAAAAAACQAAaAAAAAIJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAEJAAGRAAAAAgkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAAAAAAAAAAAAAABCQAAaAAAAAIJAAGRAAAAAgUAAAAGYmlhc2VzAAAAAAAAAAAAAAAAAAAAAYagBAAAAARzdW0xCQAAZAAAAAIJAABkAAAAAgkAAGgAAAACCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAAACQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAAkAAGgAAAACCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAABCQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAQkAAGgAAAACCQABkQAAAAIFAAAABmJpYXNlcwAAAAAAAAAAAQAAAAAAAAGGoAQAAAALJHQwMTUyNjE1NzkJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMAIAAAAITGF5ZXIxTjAEAAAADWRlYnVnRW50cmllczAIBQAAAAskdDAxNTI2MTU3OQAAAAJfMQQAAAAEc2lnMAgFAAAACyR0MDE1MjYxNTc5AAAAAl8yBAAAAAskdDAxNTg0MTYzNwkBAAAAB3NpZ21vaWQAAAACBQAAAARzdW0xAgAAAAhMYXllcjFOMQQAAAANZGVidWdFbnRyaWVzMQgFAAAACyR0MDE1ODQxNjM3AAAAAl8xBAAAAARzaWcxCAUAAAALJHQwMTU4NDE2MzcAAAACXzIEAAAACWRlYnVnSW5mbwkABE4AAAACBQAAAA1kZWJ1Z0VudHJpZXMwBQAAAA1kZWJ1Z0VudHJpZXMxBAAAAAZvdXRwdXQJAARMAAAAAgUAAAAEc2lnMAkABEwAAAACBQAAAARzaWcxBQAAAANuaWwJAAUUAAAAAgUAAAAJZGVidWdJbmZvBQAAAAZvdXRwdXQBAAAAEWZvcndhcmRQYXNzTGF5ZXIyAAAABAAAAAVpbnB1dAAAAAd3ZWlnaHRzAAAABmJpYXNlcwAAAAtkZWJ1Z1ByZWZpeAQAAAAEc3VtMAkAAGQAAAACCQAAZAAAAAIJAABoAAAAAgkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAAkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAAJAABoAAAAAgkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAQkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAEJAABoAAAAAgkAAZEAAAACBQAAAAZiaWFzZXMAAAAAAAAAAAAAAAAAAAABhqAEAAAACyR0MDE5NDcyMDAwCQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTACAAAACExheWVyMk4wBAAAAA1kZWJ1Z0VudHJpZXMwCAUAAAALJHQwMTk0NzIwMDAAAAACXzEEAAAABHNpZzAIBQAAAAskdDAxOTQ3MjAwMAAAAAJfMgQAAAAJZGVidWdJbmZvBQAAAA1kZWJ1Z0VudHJpZXMwBAAAAAZvdXRwdXQFAAAABHNpZzAJAAUUAAAAAgUAAAAJZGVidWdJbmZvBQAAAAZvdXRwdXQAAAABAAAAAWkBAAAAB3ByZWRpY3QAAAACAAAABmlucHV0MQAAAAZpbnB1dDIEAAAADHNjYWxlZElucHV0MQMJAAAAAAAAAgUAAAAGaW5wdXQxAAAAAAAAAAABAAAAAAAAD0JAAAAAAAAAAAAABAAAAAxzY2FsZWRJbnB1dDIDCQAAAAAAAAIFAAAABmlucHV0MgAAAAAAAAAAAQAAAAAAAA9CQAAAAAAAAAAAAAQAAAAGaW5wdXRzCQAETAAAAAIFAAAADHNjYWxlZElucHV0MQkABEwAAAACBQAAAAxzY2FsZWRJbnB1dDIFAAAAA25pbAQAAAALJHQwMjMxMjI0MTAJAQAAABFmb3J3YXJkUGFzc0xheWVyMQAAAAQFAAAABmlucHV0cwUAAAANbGF5ZXIxV2VpZ2h0cwUAAAAMbGF5ZXIxQmlhc2VzAgAAAAZMYXllcjEEAAAAC2RlYnVnTGF5ZXIxCAUAAAALJHQwMjMxMjI0MTAAAAACXzEEAAAADGxheWVyMU91dHB1dAgFAAAACyR0MDIzMTIyNDEwAAAAAl8yBAAAAAskdDAyNDE1MjUxOQkBAAAAEWZvcndhcmRQYXNzTGF5ZXIyAAAABAUAAAAMbGF5ZXIxT3V0cHV0BQAAAA1sYXllcjJXZWlnaHRzBQAAAAxsYXllcjJCaWFzZXMCAAAABkxheWVyMgQAAAALZGVidWdMYXllcjIIBQAAAAskdDAyNDE1MjUxOQAAAAJfMQQAAAAMbGF5ZXIyT3V0cHV0CAUAAAALJHQwMjQxNTI1MTkAAAACXzIJAAROAAAAAgkABE4AAAACCQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACAgAAAAZyZXN1bHQFAAAADGxheWVyMk91dHB1dAUAAAADbmlsBQAAAAtkZWJ1Z0xheWVyMQUAAAALZGVidWdMYXllcjIAAAAAwW9Mew==", "height": 3082576, "applicationStatus": "succeeded", "spentComplexity": 0 } View: original | compacted Prev: 7s1h3jYoYnAwi8pPGmy5HUUL4Ybuoz6K6kVB7GwRyRrV Next: Yxm27VmnSiXh83CHjo1DgvTCSkak33fbPnBnSF6qgCS Diff:
Old | New | Differences | |
---|---|---|---|
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | 4 | let layer1Weights = [[600497, 600732], [414197, 414253]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-259051, -635638] | |
7 | 7 | ||
8 | - | let layer2Weights = [[ | |
8 | + | let layer2Weights = [[832966, -897142]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
12 | + | func clampZ (z,limit) = if ((z > limit)) | |
13 | + | then limit | |
14 | + | else if ((-(limit) > z)) | |
15 | + | then -(limit) | |
16 | + | else z | |
17 | + | ||
18 | + | ||
19 | + | func exp_approx (x) = { | |
20 | + | let base = 1000000 | |
21 | + | let scaledX = (x / 10000) | |
22 | + | if ((0 > x)) | |
23 | + | then (base / (1 + (base * -(scaledX)))) | |
24 | + | else (1 + (base * scaledX)) | |
25 | + | } | |
26 | + | ||
27 | + | ||
12 | 28 | func sigmoid (z,debugPrefix) = { | |
13 | - | let e = 2718281 | |
14 | - | let base = 1000000 | |
15 | - | let scaledZ = (z / 10000) | |
16 | - | let expPart = fraction(e, base, -(scaledZ)) | |
17 | - | let sigValue = fraction(base, (base + expPart), base) | |
18 | - | $Tuple2([IntegerEntry((debugPrefix + "z"), z), IntegerEntry((debugPrefix + "expPart"), expPart), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
29 | + | let clampedZ = clampZ(z, 100000) | |
30 | + | let positiveZ = if ((0 > z)) | |
31 | + | then -(z) | |
32 | + | else z | |
33 | + | let expValue = exp_approx(-(positiveZ)) | |
34 | + | let sigValue = (1000000 / (1000000 + expValue)) | |
35 | + | $Tuple2([IntegerEntry((debugPrefix + "clampedZ"), clampedZ), IntegerEntry((debugPrefix + "positiveZ"), positiveZ), IntegerEntry((debugPrefix + "expValue"), expValue), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
19 | 36 | } | |
20 | 37 | ||
21 | 38 | ||
22 | 39 | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
23 | 40 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
24 | 41 | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + (biases[1] * 100000)) | |
25 | - | let $ | |
26 | - | let debugEntries0 = $ | |
27 | - | let sig0 = $ | |
28 | - | let $ | |
29 | - | let debugEntries1 = $ | |
30 | - | let sig1 = $ | |
42 | + | let $t015261579 = sigmoid(sum0, "Layer1N0") | |
43 | + | let debugEntries0 = $t015261579._1 | |
44 | + | let sig0 = $t015261579._2 | |
45 | + | let $t015841637 = sigmoid(sum1, "Layer1N1") | |
46 | + | let debugEntries1 = $t015841637._1 | |
47 | + | let sig1 = $t015841637._2 | |
31 | 48 | let debugInfo = (debugEntries0 ++ debugEntries1) | |
32 | 49 | let output = [sig0, sig1] | |
33 | 50 | $Tuple2(debugInfo, output) | |
36 | 53 | ||
37 | 54 | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
38 | 55 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
39 | - | let $ | |
40 | - | let debugEntries0 = $ | |
41 | - | let sig0 = $ | |
56 | + | let $t019472000 = sigmoid(sum0, "Layer2N0") | |
57 | + | let debugEntries0 = $t019472000._1 | |
58 | + | let sig0 = $t019472000._2 | |
42 | 59 | let debugInfo = debugEntries0 | |
43 | 60 | let output = sig0 | |
44 | 61 | $Tuple2(debugInfo, output) | |
54 | 71 | then 1000000 | |
55 | 72 | else 0 | |
56 | 73 | let inputs = [scaledInput1, scaledInput2] | |
57 | - | let $ | |
58 | - | let debugLayer1 = $ | |
59 | - | let layer1Output = $ | |
60 | - | let $ | |
61 | - | let debugLayer2 = $ | |
62 | - | let layer2Output = $ | |
74 | + | let $t023122410 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
75 | + | let debugLayer1 = $t023122410._1 | |
76 | + | let layer1Output = $t023122410._2 | |
77 | + | let $t024152519 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
78 | + | let debugLayer2 = $t024152519._1 | |
79 | + | let layer2Output = $t024152519._2 | |
63 | 80 | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
64 | 81 | } | |
65 | 82 |
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | 4 | let layer1Weights = [[600497, 600732], [414197, 414253]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-259051, -635638] | |
7 | 7 | ||
8 | - | let layer2Weights = [[ | |
8 | + | let layer2Weights = [[832966, -897142]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
12 | + | func clampZ (z,limit) = if ((z > limit)) | |
13 | + | then limit | |
14 | + | else if ((-(limit) > z)) | |
15 | + | then -(limit) | |
16 | + | else z | |
17 | + | ||
18 | + | ||
19 | + | func exp_approx (x) = { | |
20 | + | let base = 1000000 | |
21 | + | let scaledX = (x / 10000) | |
22 | + | if ((0 > x)) | |
23 | + | then (base / (1 + (base * -(scaledX)))) | |
24 | + | else (1 + (base * scaledX)) | |
25 | + | } | |
26 | + | ||
27 | + | ||
12 | 28 | func sigmoid (z,debugPrefix) = { | |
13 | - | let e = 2718281 | |
14 | - | let base = 1000000 | |
15 | - | let scaledZ = (z / 10000) | |
16 | - | let expPart = fraction(e, base, -(scaledZ)) | |
17 | - | let sigValue = fraction(base, (base + expPart), base) | |
18 | - | $Tuple2([IntegerEntry((debugPrefix + "z"), z), IntegerEntry((debugPrefix + "expPart"), expPart), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
29 | + | let clampedZ = clampZ(z, 100000) | |
30 | + | let positiveZ = if ((0 > z)) | |
31 | + | then -(z) | |
32 | + | else z | |
33 | + | let expValue = exp_approx(-(positiveZ)) | |
34 | + | let sigValue = (1000000 / (1000000 + expValue)) | |
35 | + | $Tuple2([IntegerEntry((debugPrefix + "clampedZ"), clampedZ), IntegerEntry((debugPrefix + "positiveZ"), positiveZ), IntegerEntry((debugPrefix + "expValue"), expValue), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
19 | 36 | } | |
20 | 37 | ||
21 | 38 | ||
22 | 39 | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
23 | 40 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
24 | 41 | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + (biases[1] * 100000)) | |
25 | - | let $ | |
26 | - | let debugEntries0 = $ | |
27 | - | let sig0 = $ | |
28 | - | let $ | |
29 | - | let debugEntries1 = $ | |
30 | - | let sig1 = $ | |
42 | + | let $t015261579 = sigmoid(sum0, "Layer1N0") | |
43 | + | let debugEntries0 = $t015261579._1 | |
44 | + | let sig0 = $t015261579._2 | |
45 | + | let $t015841637 = sigmoid(sum1, "Layer1N1") | |
46 | + | let debugEntries1 = $t015841637._1 | |
47 | + | let sig1 = $t015841637._2 | |
31 | 48 | let debugInfo = (debugEntries0 ++ debugEntries1) | |
32 | 49 | let output = [sig0, sig1] | |
33 | 50 | $Tuple2(debugInfo, output) | |
34 | 51 | } | |
35 | 52 | ||
36 | 53 | ||
37 | 54 | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
38 | 55 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
39 | - | let $ | |
40 | - | let debugEntries0 = $ | |
41 | - | let sig0 = $ | |
56 | + | let $t019472000 = sigmoid(sum0, "Layer2N0") | |
57 | + | let debugEntries0 = $t019472000._1 | |
58 | + | let sig0 = $t019472000._2 | |
42 | 59 | let debugInfo = debugEntries0 | |
43 | 60 | let output = sig0 | |
44 | 61 | $Tuple2(debugInfo, output) | |
45 | 62 | } | |
46 | 63 | ||
47 | 64 | ||
48 | 65 | @Callable(i) | |
49 | 66 | func predict (input1,input2) = { | |
50 | 67 | let scaledInput1 = if ((input1 == 1)) | |
51 | 68 | then 1000000 | |
52 | 69 | else 0 | |
53 | 70 | let scaledInput2 = if ((input2 == 1)) | |
54 | 71 | then 1000000 | |
55 | 72 | else 0 | |
56 | 73 | let inputs = [scaledInput1, scaledInput2] | |
57 | - | let $ | |
58 | - | let debugLayer1 = $ | |
59 | - | let layer1Output = $ | |
60 | - | let $ | |
61 | - | let debugLayer2 = $ | |
62 | - | let layer2Output = $ | |
74 | + | let $t023122410 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
75 | + | let debugLayer1 = $t023122410._1 | |
76 | + | let layer1Output = $t023122410._2 | |
77 | + | let $t024152519 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
78 | + | let debugLayer2 = $t024152519._1 | |
79 | + | let layer2Output = $t024152519._2 | |
63 | 80 | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
64 | 81 | } | |
65 | 82 | ||
66 | 83 |
github/deemru/w8io/169f3d6 33.94 ms ◑