tx · Bwy1SyhVSdH6tc9jHiiZaCcFLvycRcpAd58odgAUgqL6 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY: -0.01000000 Waves 2024.04.28 15:31 [3082711] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves
{ "type": 13, "id": "Bwy1SyhVSdH6tc9jHiiZaCcFLvycRcpAd58odgAUgqL6", "fee": 1000000, "feeAssetId": null, "timestamp": 1714307469199, "version": 2, "chainId": 84, "sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY", "senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc", "proofs": [ "5Dy3YdSSkx8xAf1ta7NX2KCiKL3Z9wLffx9FSTyWJyri2UUrkyWhXyrnnb7mXurx84ijdrDA62P1PGqRWUi8xy4B" ], "script": "base64:AAIFAAAAAAAAAAgIAhIECgIBAQAAAAgAAAAADWxheWVyMVdlaWdodHMJAARMAAAAAgkABEwAAAACAAAAAAAACSmwCQAETAAAAAIAAAAAAAAJKp0FAAAAA25pbAkABEwAAAACCQAETAAAAAIAAAAAAAAGUfUJAARMAAAAAgAAAAAAAAZSLQUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMUJpYXNlcwkABEwAAAACAP///////AwWCQAETAAAAAIA///////2TQsFAAAAA25pbAAAAAANbGF5ZXIyV2VpZ2h0cwkABEwAAAACCQAETAAAAAIAAAAAAAAMtcYJAARMAAAAAgD///////JPiwUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMkJpYXNlcwkABEwAAAACAP//////+i8FBQAAAANuaWwBAAAACWV4cEFwcHJveAAAAAEAAAABeAQAAAAHc2NhbGVkWAkAAGsAAAADBQAAAAF4AAAAAAAAAAABAAAAAAAAAAAKBAAAAAhzY2FsZWRYMgkAAGsAAAADBQAAAAdzY2FsZWRYBQAAAAdzY2FsZWRYAAAAAAAAAAAKBAAAAAV0ZXJtMQkAAGUAAAACAAAAAAAAAAAKBQAAAAdzY2FsZWRYBAAAAAV0ZXJtMgkAAGsAAAADBQAAAAhzY2FsZWRYMgAAAAAAAAAABQAAAAAAAAAAAQkAAGQAAAACBQAAAAV0ZXJtMQUAAAAFdGVybTIBAAAAB3NpZ21vaWQAAAACAAAAAXoAAAALZGVidWdQcmVmaXgEAAAAB2NhcHBlZFoDCQAAZgAAAAIFAAAAAXoAAAAAAAAAAMgAAAAAAAAAAMgDCQAAZgAAAAIA/////////zgFAAAAAXoA/////////zgFAAAAAXoEAAAAB2V4cE5lZ1oJAQAAAAlleHBBcHByb3gAAAABCQEAAAABLQAAAAEFAAAAB2NhcHBlZFoEAAAADm9uZVBsdXNFeHBOZWdaCQAAZAAAAAIAAAAAAAAAAAoFAAAAB2V4cE5lZ1oEAAAACHNpZ1ZhbHVlCQAAawAAAAMAAAAAAAAAAAoFAAAADm9uZVBsdXNFeHBOZWdaAAAAAAAAAAABCQAFFAAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAIJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAABmlucHV0WgUAAAAHY2FwcGVkWgkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAHZXhwTmVnWgUAAAAHZXhwTmVnWgkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAOb25lUGx1c0V4cE5lZ1oFAAAADm9uZVBsdXNFeHBOZWdaCQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACCQABLAAAAAIFAAAAC2RlYnVnUHJlZml4AgAAAAhzaWdWYWx1ZQUAAAAIc2lnVmFsdWUFAAAAA25pbAUAAAAIc2lnVmFsdWUBAAAAEWZvcndhcmRQYXNzTGF5ZXIxAAAABAAAAAVpbnB1dAAAAAd3ZWlnaHRzAAAABmJpYXNlcwAAAAtkZWJ1Z1ByZWZpeAQAAAAEc3VtMAkAAGQAAAACCQAAZAAAAAIJAABoAAAAAgkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAAkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAAJAABoAAAAAgkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAQkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAEJAAGRAAAAAgUAAAAGYmlhc2VzAAAAAAAAAAAABAAAAARzdW0xCQAAZAAAAAIJAABkAAAAAgkAAGgAAAACCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAAACQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAAkAAGgAAAACCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAABCQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAQkAAZEAAAACBQAAAAZiaWFzZXMAAAAAAAAAAAEEAAAACyR0MDE0MDkxNDYyCQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTACAAAACExheWVyMU4wBAAAAA1kZWJ1Z0VudHJpZXMwCAUAAAALJHQwMTQwOTE0NjIAAAACXzEEAAAABHNpZzAIBQAAAAskdDAxNDA5MTQ2MgAAAAJfMgQAAAALJHQwMTQ2NzE1MjAJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMQIAAAAITGF5ZXIxTjEEAAAADWRlYnVnRW50cmllczEIBQAAAAskdDAxNDY3MTUyMAAAAAJfMQQAAAAEc2lnMQgFAAAACyR0MDE0NjcxNTIwAAAAAl8yBAAAAAlkZWJ1Z0luZm8JAAROAAAAAgUAAAANZGVidWdFbnRyaWVzMAUAAAANZGVidWdFbnRyaWVzMQQAAAAGb3V0cHV0CQAETAAAAAIFAAAABHNpZzAJAARMAAAAAgUAAAAEc2lnMQUAAAADbmlsCQAFFAAAAAIFAAAACWRlYnVnSW5mbwUAAAAGb3V0cHV0AQAAABFmb3J3YXJkUGFzc0xheWVyMgAAAAQAAAAFaW5wdXQAAAAHd2VpZ2h0cwAAAAZiaWFzZXMAAAALZGVidWdQcmVmaXgEAAAABHN1bTAJAABkAAAAAgkAAGQAAAACCQAAaAAAAAIJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAAJAAGRAAAAAgkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAAAAAAAAAAAAAAACQAAaAAAAAIJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAEJAAGRAAAAAgkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAAAAAAAAAAAAAABCQABkQAAAAIFAAAABmJpYXNlcwAAAAAAAAAAAAQAAAALJHQwMTgyMTE4NzQJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMAIAAAAITGF5ZXIyTjAEAAAADWRlYnVnRW50cmllczAIBQAAAAskdDAxODIxMTg3NAAAAAJfMQQAAAAEc2lnMAgFAAAACyR0MDE4MjExODc0AAAAAl8yBAAAAAlkZWJ1Z0luZm8FAAAADWRlYnVnRW50cmllczAEAAAABm91dHB1dAUAAAAEc2lnMAkABRQAAAACBQAAAAlkZWJ1Z0luZm8FAAAABm91dHB1dAAAAAEAAAABaQEAAAAHcHJlZGljdAAAAAIAAAAGaW5wdXQxAAAABmlucHV0MgQAAAAMc2NhbGVkSW5wdXQxAwkAAAAAAAACBQAAAAZpbnB1dDEAAAAAAAAAAAEAAAAAAAAAAAEAAAAAAAAAAAAEAAAADHNjYWxlZElucHV0MgMJAAAAAAAAAgUAAAAGaW5wdXQyAAAAAAAAAAABAAAAAAAAAAABAAAAAAAAAAAABAAAAAZpbnB1dHMJAARMAAAAAgUAAAAMc2NhbGVkSW5wdXQxCQAETAAAAAIFAAAADHNjYWxlZElucHV0MgUAAAADbmlsBAAAAAskdDAyMTc0MjI3MgkBAAAAEWZvcndhcmRQYXNzTGF5ZXIxAAAABAUAAAAGaW5wdXRzBQAAAA1sYXllcjFXZWlnaHRzBQAAAAxsYXllcjFCaWFzZXMCAAAABkxheWVyMQQAAAALZGVidWdMYXllcjEIBQAAAAskdDAyMTc0MjI3MgAAAAJfMQQAAAAMbGF5ZXIxT3V0cHV0CAUAAAALJHQwMjE3NDIyNzIAAAACXzIEAAAACyR0MDIyNzcyMzgxCQEAAAARZm9yd2FyZFBhc3NMYXllcjIAAAAEBQAAAAxsYXllcjFPdXRwdXQFAAAADWxheWVyMldlaWdodHMFAAAADGxheWVyMkJpYXNlcwIAAAAGTGF5ZXIyBAAAAAtkZWJ1Z0xheWVyMggFAAAACyR0MDIyNzcyMzgxAAAAAl8xBAAAAAxsYXllcjJPdXRwdXQIBQAAAAskdDAyMjc3MjM4MQAAAAJfMgkABE4AAAACCQAETgAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAICAAAABnJlc3VsdAUAAAAMbGF5ZXIyT3V0cHV0BQAAAANuaWwFAAAAC2RlYnVnTGF5ZXIxBQAAAAtkZWJ1Z0xheWVyMgAAAAAMksVa", "height": 3082711, "applicationStatus": "succeeded", "spentComplexity": 0 } View: original | compacted Prev: 9TNLmNWabdFsX6poDq4zXG3jVPsTx2rkDGxqQChk5qUw Next: 4Y5WL8dQqwHHtb9Ei4jYd9217YqLbJ62uGymgyngzKKK Diff:
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | - | let layer1Weights = [[ | |
4 | + | let layer1Weights = [[600496, 600733], [414197, 414253]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-259050, -635637] | |
7 | 7 | ||
8 | - | let layer2Weights = [[832966, - | |
8 | + | let layer2Weights = [[832966, -897141]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
19 | 19 | ||
20 | 20 | ||
21 | 21 | func sigmoid (z,debugPrefix) = { | |
22 | - | let expNegZ = expApprox(-(z)) | |
22 | + | let cappedZ = if ((z > 200)) | |
23 | + | then 200 | |
24 | + | else if ((-200 > z)) | |
25 | + | then -200 | |
26 | + | else z | |
27 | + | let expNegZ = expApprox(-(cappedZ)) | |
23 | 28 | let onePlusExpNegZ = (10 + expNegZ) | |
24 | 29 | let sigValue = fraction(10, onePlusExpNegZ, 1) | |
25 | - | $Tuple2([IntegerEntry((debugPrefix + "inputZ"), | |
30 | + | $Tuple2([IntegerEntry((debugPrefix + "inputZ"), cappedZ), IntegerEntry((debugPrefix + "expNegZ"), expNegZ), IntegerEntry((debugPrefix + "onePlusExpNegZ"), onePlusExpNegZ), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
26 | 31 | } | |
27 | 32 | ||
28 | 33 | ||
29 | 34 | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
30 | 35 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
31 | 36 | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + biases[1]) | |
32 | - | let $ | |
33 | - | let debugEntries0 = $ | |
34 | - | let sig0 = $ | |
35 | - | let $ | |
36 | - | let debugEntries1 = $ | |
37 | - | let sig1 = $ | |
37 | + | let $t014091462 = sigmoid(sum0, "Layer1N0") | |
38 | + | let debugEntries0 = $t014091462._1 | |
39 | + | let sig0 = $t014091462._2 | |
40 | + | let $t014671520 = sigmoid(sum1, "Layer1N1") | |
41 | + | let debugEntries1 = $t014671520._1 | |
42 | + | let sig1 = $t014671520._2 | |
38 | 43 | let debugInfo = (debugEntries0 ++ debugEntries1) | |
39 | 44 | let output = [sig0, sig1] | |
40 | 45 | $Tuple2(debugInfo, output) | |
43 | 48 | ||
44 | 49 | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
45 | 50 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
46 | - | let $ | |
47 | - | let debugEntries0 = $ | |
48 | - | let sig0 = $ | |
51 | + | let $t018211874 = sigmoid(sum0, "Layer2N0") | |
52 | + | let debugEntries0 = $t018211874._1 | |
53 | + | let sig0 = $t018211874._2 | |
49 | 54 | let debugInfo = debugEntries0 | |
50 | 55 | let output = sig0 | |
51 | 56 | $Tuple2(debugInfo, output) | |
61 | 66 | then 1 | |
62 | 67 | else 0 | |
63 | 68 | let inputs = [scaledInput1, scaledInput2] | |
64 | - | let $ | |
65 | - | let debugLayer1 = $ | |
66 | - | let layer1Output = $ | |
67 | - | let $ | |
68 | - | let debugLayer2 = $ | |
69 | - | let layer2Output = $ | |
69 | + | let $t021742272 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
70 | + | let debugLayer1 = $t021742272._1 | |
71 | + | let layer1Output = $t021742272._2 | |
72 | + | let $t022772381 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
73 | + | let debugLayer2 = $t022772381._1 | |
74 | + | let layer2Output = $t022772381._2 | |
70 | 75 | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
71 | 76 | } | |
72 | 77 |
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | - | let layer1Weights = [[ | |
4 | + | let layer1Weights = [[600496, 600733], [414197, 414253]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-259050, -635637] | |
7 | 7 | ||
8 | - | let layer2Weights = [[832966, - | |
8 | + | let layer2Weights = [[832966, -897141]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
12 | 12 | func expApprox (x) = { | |
13 | 13 | let scaledX = fraction(x, 1, 10) | |
14 | 14 | let scaledX2 = fraction(scaledX, scaledX, 10) | |
15 | 15 | let term1 = (10 - scaledX) | |
16 | 16 | let term2 = fraction(scaledX2, 5, 1) | |
17 | 17 | (term1 + term2) | |
18 | 18 | } | |
19 | 19 | ||
20 | 20 | ||
21 | 21 | func sigmoid (z,debugPrefix) = { | |
22 | - | let expNegZ = expApprox(-(z)) | |
22 | + | let cappedZ = if ((z > 200)) | |
23 | + | then 200 | |
24 | + | else if ((-200 > z)) | |
25 | + | then -200 | |
26 | + | else z | |
27 | + | let expNegZ = expApprox(-(cappedZ)) | |
23 | 28 | let onePlusExpNegZ = (10 + expNegZ) | |
24 | 29 | let sigValue = fraction(10, onePlusExpNegZ, 1) | |
25 | - | $Tuple2([IntegerEntry((debugPrefix + "inputZ"), | |
30 | + | $Tuple2([IntegerEntry((debugPrefix + "inputZ"), cappedZ), IntegerEntry((debugPrefix + "expNegZ"), expNegZ), IntegerEntry((debugPrefix + "onePlusExpNegZ"), onePlusExpNegZ), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
26 | 31 | } | |
27 | 32 | ||
28 | 33 | ||
29 | 34 | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
30 | 35 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
31 | 36 | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + biases[1]) | |
32 | - | let $ | |
33 | - | let debugEntries0 = $ | |
34 | - | let sig0 = $ | |
35 | - | let $ | |
36 | - | let debugEntries1 = $ | |
37 | - | let sig1 = $ | |
37 | + | let $t014091462 = sigmoid(sum0, "Layer1N0") | |
38 | + | let debugEntries0 = $t014091462._1 | |
39 | + | let sig0 = $t014091462._2 | |
40 | + | let $t014671520 = sigmoid(sum1, "Layer1N1") | |
41 | + | let debugEntries1 = $t014671520._1 | |
42 | + | let sig1 = $t014671520._2 | |
38 | 43 | let debugInfo = (debugEntries0 ++ debugEntries1) | |
39 | 44 | let output = [sig0, sig1] | |
40 | 45 | $Tuple2(debugInfo, output) | |
41 | 46 | } | |
42 | 47 | ||
43 | 48 | ||
44 | 49 | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
45 | 50 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
46 | - | let $ | |
47 | - | let debugEntries0 = $ | |
48 | - | let sig0 = $ | |
51 | + | let $t018211874 = sigmoid(sum0, "Layer2N0") | |
52 | + | let debugEntries0 = $t018211874._1 | |
53 | + | let sig0 = $t018211874._2 | |
49 | 54 | let debugInfo = debugEntries0 | |
50 | 55 | let output = sig0 | |
51 | 56 | $Tuple2(debugInfo, output) | |
52 | 57 | } | |
53 | 58 | ||
54 | 59 | ||
55 | 60 | @Callable(i) | |
56 | 61 | func predict (input1,input2) = { | |
57 | 62 | let scaledInput1 = if ((input1 == 1)) | |
58 | 63 | then 1 | |
59 | 64 | else 0 | |
60 | 65 | let scaledInput2 = if ((input2 == 1)) | |
61 | 66 | then 1 | |
62 | 67 | else 0 | |
63 | 68 | let inputs = [scaledInput1, scaledInput2] | |
64 | - | let $ | |
65 | - | let debugLayer1 = $ | |
66 | - | let layer1Output = $ | |
67 | - | let $ | |
68 | - | let debugLayer2 = $ | |
69 | - | let layer2Output = $ | |
69 | + | let $t021742272 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
70 | + | let debugLayer1 = $t021742272._1 | |
71 | + | let layer1Output = $t021742272._2 | |
72 | + | let $t022772381 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
73 | + | let debugLayer2 = $t022772381._1 | |
74 | + | let layer2Output = $t022772381._2 | |
70 | 75 | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
71 | 76 | } | |
72 | 77 | ||
73 | 78 |
github/deemru/w8io/169f3d6 73.89 ms ◑![]()