tx · 5K3SesFjoRF1HkRqSncDmVU95ew5cKw5DxSrJdSM3sXu 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY: -0.01000000 Waves 2024.04.28 13:59 [3082630] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves
{ "type": 13, "id": "5K3SesFjoRF1HkRqSncDmVU95ew5cKw5DxSrJdSM3sXu", "fee": 1000000, "feeAssetId": null, "timestamp": 1714301865213, "version": 2, "chainId": 84, "sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY", "senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc", "proofs": [ "2MeJbQyFn1ZSDpv2Pic5PwkEDF8QEPAGoSqmGPbspgDhALhYVZoabYY3jN5BQkEcsn9k4QZbY6gRrC3PJbPbph45" ], "script": "base64:AAIFAAAAAAAAAAgIAhIECgIBAQAAAAkAAAAADWxheWVyMVdlaWdodHMJAARMAAAAAgkABEwAAAACAAAAAAAACSmwCQAETAAAAAIAAAAAAAAJKp0FAAAAA25pbAkABEwAAAACCQAETAAAAAIAAAAAAAAGUfUJAARMAAAAAgAAAAAAAAZSLAUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMUJpYXNlcwkABEwAAAACAP///////AwWCQAETAAAAAIA///////2TQsFAAAAA25pbAAAAAANbGF5ZXIyV2VpZ2h0cwkABEwAAAACCQAETAAAAAIAAAAAAAAMtcUJAARMAAAAAgD///////JPigUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMkJpYXNlcwkABEwAAAACAP//////+i8FBQAAAANuaWwBAAAABmNsYW1wWgAAAAIAAAABegAAAAVsaW1pdAMJAABmAAAAAgUAAAABegUAAAAFbGltaXQFAAAABWxpbWl0AwkAAGYAAAACCQEAAAABLQAAAAEFAAAABWxpbWl0BQAAAAF6CQEAAAABLQAAAAEFAAAABWxpbWl0BQAAAAF6AQAAAApleHBfYXBwcm94AAAAAQAAAAF4BAAAAAZtYXhFeHAAAAAAAAABhqADCQAAZgAAAAIJAQAAAAEtAAAAAQUAAAAGbWF4RXhwBQAAAAF4AAAAAAAAAAABAwkAAGYAAAACBQAAAAF4BQAAAAZtYXhFeHAAAAAAADuaygAEAAAACHNjYWxlZF94CQAAaQAAAAIFAAAAAXgAAAAAAAAAJxAEAAAACXNjYWxlZF94MgkBAAAACGZyYWN0aW9uAAAABAUAAAAIc2NhbGVkX3gFAAAACHNjYWxlZF94AAAAAAAAACcQBQAAAARET1dOBAAAAApleHBfcmVzdWx0CQAAZAAAAAIJAABlAAAAAgAAAAAAAAAnEAUAAAAIc2NhbGVkX3gJAABpAAAAAgUAAAAJc2NhbGVkX3gyAAAAAAAAAAACCQAAZQAAAAIAAAAAAAAAJxAFAAAACmV4cF9yZXN1bHQBAAAAB3NpZ21vaWQAAAACAAAAAXoAAAALZGVidWdQcmVmaXgEAAAACGNsYW1wZWRaCQEAAAAGY2xhbXBaAAAAAgUAAAABegAAAAAAAAGGoAQAAAAIZXhwVmFsdWUJAQAAAApleHBfYXBwcm94AAAAAQkBAAAAAS0AAAABBQAAAAhjbGFtcGVkWgQAAAAIc2lnVmFsdWUJAQAAAAhmcmFjdGlvbgAAAAQAAAAAAAAAJxAJAABkAAAAAgAAAAAAAAAnEAUAAAAIZXhwVmFsdWUAAAAAAAAAAAEFAAAABERPV04JAAUUAAAAAgkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAIY2xhbXBlZFoFAAAACGNsYW1wZWRaCQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACCQABLAAAAAIFAAAAC2RlYnVnUHJlZml4AgAAAAhleHBWYWx1ZQUAAAAIZXhwVmFsdWUJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAIJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAACHNpZ1ZhbHVlBQAAAAhzaWdWYWx1ZQUAAAADbmlsBQAAAAhzaWdWYWx1ZQEAAAARZm9yd2FyZFBhc3NMYXllcjEAAAAEAAAABWlucHV0AAAAB3dlaWdodHMAAAAGYmlhc2VzAAAAC2RlYnVnUHJlZml4BAAAAARzdW0wCQAAZAAAAAIJAABkAAAAAgkAAGgAAAACCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAAACQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAAAAAAAAAAAAAAkAAGgAAAACCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAABCQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAAAAAAAAAAAAAQkAAGgAAAACCQABkQAAAAIFAAAABmJpYXNlcwAAAAAAAAAAAAAAAAAAAAGGoAQAAAAEc3VtMQkAAGQAAAACCQAAZAAAAAIJAABoAAAAAgkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAAkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAEAAAAAAAAAAAAJAABoAAAAAgkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAQkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAEAAAAAAAAAAAEJAABoAAAAAgkAAZEAAAACBQAAAAZiaWFzZXMAAAAAAAAAAAEAAAAAAAABhqAEAAAACyR0MDE3NjUxODE4CQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTACAAAACExheWVyMU4wBAAAAA1kZWJ1Z0VudHJpZXMwCAUAAAALJHQwMTc2NTE4MTgAAAACXzEEAAAABHNpZzAIBQAAAAskdDAxNzY1MTgxOAAAAAJfMgQAAAALJHQwMTgyMzE4NzYJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMQIAAAAITGF5ZXIxTjEEAAAADWRlYnVnRW50cmllczEIBQAAAAskdDAxODIzMTg3NgAAAAJfMQQAAAAEc2lnMQgFAAAACyR0MDE4MjMxODc2AAAAAl8yBAAAAAlkZWJ1Z0luZm8JAAROAAAAAgUAAAANZGVidWdFbnRyaWVzMAUAAAANZGVidWdFbnRyaWVzMQQAAAAGb3V0cHV0CQAETAAAAAIFAAAABHNpZzAJAARMAAAAAgUAAAAEc2lnMQUAAAADbmlsCQAFFAAAAAIFAAAACWRlYnVnSW5mbwUAAAAGb3V0cHV0AQAAABFmb3J3YXJkUGFzc0xheWVyMgAAAAQAAAAFaW5wdXQAAAAHd2VpZ2h0cwAAAAZiaWFzZXMAAAALZGVidWdQcmVmaXgEAAAABHN1bTAJAABkAAAAAgkAAGQAAAACCQAAaAAAAAIJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAAJAAGRAAAAAgkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAAAAAAAAAAAAAAACQAAaAAAAAIJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAEJAAGRAAAAAgkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAAAAAAAAAAAAAABCQAAaAAAAAIJAAGRAAAAAgUAAAAGYmlhc2VzAAAAAAAAAAAAAAAAAAAAAYagBAAAAAskdDAyMTg2MjIzOQkBAAAAB3NpZ21vaWQAAAACBQAAAARzdW0wAgAAAAhMYXllcjJOMAQAAAANZGVidWdFbnRyaWVzMAgFAAAACyR0MDIxODYyMjM5AAAAAl8xBAAAAARzaWcwCAUAAAALJHQwMjE4NjIyMzkAAAACXzIEAAAACWRlYnVnSW5mbwUAAAANZGVidWdFbnRyaWVzMAQAAAAGb3V0cHV0BQAAAARzaWcwCQAFFAAAAAIFAAAACWRlYnVnSW5mbwUAAAAGb3V0cHV0AAAAAQAAAAFpAQAAAAdwcmVkaWN0AAAAAgAAAAZpbnB1dDEAAAAGaW5wdXQyBAAAAAxzY2FsZWRJbnB1dDEDCQAAAAAAAAIFAAAABmlucHV0MQAAAAAAAAAAAQAAAAAAAA9CQAAAAAAAAAAAAAQAAAAMc2NhbGVkSW5wdXQyAwkAAAAAAAACBQAAAAZpbnB1dDIAAAAAAAAAAAEAAAAAAAAPQkAAAAAAAAAAAAAEAAAABmlucHV0cwkABEwAAAACBQAAAAxzY2FsZWRJbnB1dDEJAARMAAAAAgUAAAAMc2NhbGVkSW5wdXQyBQAAAANuaWwEAAAACyR0MDI1NTEyNjQ5CQEAAAARZm9yd2FyZFBhc3NMYXllcjEAAAAEBQAAAAZpbnB1dHMFAAAADWxheWVyMVdlaWdodHMFAAAADGxheWVyMUJpYXNlcwIAAAAGTGF5ZXIxBAAAAAtkZWJ1Z0xheWVyMQgFAAAACyR0MDI1NTEyNjQ5AAAAAl8xBAAAAAxsYXllcjFPdXRwdXQIBQAAAAskdDAyNTUxMjY0OQAAAAJfMgQAAAALJHQwMjY1NDI3NTgJAQAAABFmb3J3YXJkUGFzc0xheWVyMgAAAAQFAAAADGxheWVyMU91dHB1dAUAAAANbGF5ZXIyV2VpZ2h0cwUAAAAMbGF5ZXIyQmlhc2VzAgAAAAZMYXllcjIEAAAAC2RlYnVnTGF5ZXIyCAUAAAALJHQwMjY1NDI3NTgAAAACXzEEAAAADGxheWVyMk91dHB1dAgFAAAACyR0MDI2NTQyNzU4AAAAAl8yCQAETgAAAAIJAAROAAAAAgkABEwAAAACCQEAAAAMSW50ZWdlckVudHJ5AAAAAgIAAAAGcmVzdWx0BQAAAAxsYXllcjJPdXRwdXQFAAAAA25pbAUAAAALZGVidWdMYXllcjEFAAAAC2RlYnVnTGF5ZXIyAAAAAKS23mA=", "height": 3082630, "applicationStatus": "succeeded", "spentComplexity": 0 } View: original | compacted Prev: BQxK6Q9RxFkuvKPme7hUqZ728mZQD9T4F4MFJRiKaPvb Next: 8Tiq9PrpLGFMXBGGABe7MVibryxyyrcnbvTnKbFxbEWk Diff:
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | - | let layer1Weights = [[600496, | |
4 | + | let layer1Weights = [[600496, 600733], [414197, 414252]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-259050, -635637] | |
7 | 7 | ||
8 | - | let layer2Weights = [[832965, - | |
8 | + | let layer2Weights = [[832965, -897142]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
17 | 17 | ||
18 | 18 | ||
19 | 19 | func exp_approx (x) = { | |
20 | - | let scaled_x = (x / 10000) | |
21 | - | let scaled_x2 = fraction(scaled_x, scaled_x, 1, DOWN) | |
22 | - | let scaled_x3 = fraction(scaled_x2, scaled_x, 1, DOWN) | |
23 | - | let exp_result = (((10000 - fraction(scaled_x, 10, 1, DOWN)) + fraction(scaled_x2, 200, 1, DOWN)) - fraction(scaled_x3, 6000, 1, DOWN)) | |
24 | - | if ((0 > x)) | |
25 | - | then (10000 + exp_result) | |
26 | - | else (10000 - exp_result) | |
20 | + | let maxExp = 100000 | |
21 | + | if ((-(maxExp) > x)) | |
22 | + | then 1 | |
23 | + | else if ((x > maxExp)) | |
24 | + | then 1000000000 | |
25 | + | else { | |
26 | + | let scaled_x = (x / 10000) | |
27 | + | let scaled_x2 = fraction(scaled_x, scaled_x, 10000, DOWN) | |
28 | + | let exp_result = ((10000 - scaled_x) + (scaled_x2 / 2)) | |
29 | + | (10000 - exp_result) | |
30 | + | } | |
27 | 31 | } | |
28 | 32 | ||
29 | 33 | ||
30 | 34 | func sigmoid (z,debugPrefix) = { | |
31 | 35 | let clampedZ = clampZ(z, 100000) | |
32 | - | let positiveZ = if ((0 > z)) | |
33 | - | then -(z) | |
34 | - | else z | |
35 | - | let expValue = exp_approx(-(positiveZ)) | |
36 | + | let expValue = exp_approx(-(clampedZ)) | |
36 | 37 | let sigValue = fraction(10000, (10000 + expValue), 1, DOWN) | |
37 | - | $Tuple2([IntegerEntry((debugPrefix + " | |
38 | + | $Tuple2([IntegerEntry((debugPrefix + "clampedZ"), clampedZ), IntegerEntry((debugPrefix + "expValue"), expValue), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
38 | 39 | } | |
39 | 40 | ||
40 | 41 | ||
41 | 42 | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
42 | 43 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
43 | 44 | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + (biases[1] * 100000)) | |
44 | - | let $ | |
45 | - | let debugEntries0 = $ | |
46 | - | let sig0 = $ | |
47 | - | let $ | |
48 | - | let debugEntries1 = $ | |
49 | - | let sig1 = $ | |
45 | + | let $t017651818 = sigmoid(sum0, "Layer1N0") | |
46 | + | let debugEntries0 = $t017651818._1 | |
47 | + | let sig0 = $t017651818._2 | |
48 | + | let $t018231876 = sigmoid(sum1, "Layer1N1") | |
49 | + | let debugEntries1 = $t018231876._1 | |
50 | + | let sig1 = $t018231876._2 | |
50 | 51 | let debugInfo = (debugEntries0 ++ debugEntries1) | |
51 | 52 | let output = [sig0, sig1] | |
52 | 53 | $Tuple2(debugInfo, output) | |
55 | 56 | ||
56 | 57 | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
57 | 58 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
58 | - | let $ | |
59 | - | let debugEntries0 = $ | |
60 | - | let sig0 = $ | |
59 | + | let $t021862239 = sigmoid(sum0, "Layer2N0") | |
60 | + | let debugEntries0 = $t021862239._1 | |
61 | + | let sig0 = $t021862239._2 | |
61 | 62 | let debugInfo = debugEntries0 | |
62 | 63 | let output = sig0 | |
63 | 64 | $Tuple2(debugInfo, output) | |
73 | 74 | then 1000000 | |
74 | 75 | else 0 | |
75 | 76 | let inputs = [scaledInput1, scaledInput2] | |
76 | - | let $ | |
77 | - | let debugLayer1 = $ | |
78 | - | let layer1Output = $ | |
79 | - | let $ | |
80 | - | let debugLayer2 = $ | |
81 | - | let layer2Output = $ | |
77 | + | let $t025512649 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
78 | + | let debugLayer1 = $t025512649._1 | |
79 | + | let layer1Output = $t025512649._2 | |
80 | + | let $t026542758 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
81 | + | let debugLayer2 = $t026542758._1 | |
82 | + | let layer2Output = $t026542758._2 | |
82 | 83 | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
83 | 84 | } | |
84 | 85 |
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | - | let layer1Weights = [[600496, | |
4 | + | let layer1Weights = [[600496, 600733], [414197, 414252]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-259050, -635637] | |
7 | 7 | ||
8 | - | let layer2Weights = [[832965, - | |
8 | + | let layer2Weights = [[832965, -897142]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
12 | 12 | func clampZ (z,limit) = if ((z > limit)) | |
13 | 13 | then limit | |
14 | 14 | else if ((-(limit) > z)) | |
15 | 15 | then -(limit) | |
16 | 16 | else z | |
17 | 17 | ||
18 | 18 | ||
19 | 19 | func exp_approx (x) = { | |
20 | - | let scaled_x = (x / 10000) | |
21 | - | let scaled_x2 = fraction(scaled_x, scaled_x, 1, DOWN) | |
22 | - | let scaled_x3 = fraction(scaled_x2, scaled_x, 1, DOWN) | |
23 | - | let exp_result = (((10000 - fraction(scaled_x, 10, 1, DOWN)) + fraction(scaled_x2, 200, 1, DOWN)) - fraction(scaled_x3, 6000, 1, DOWN)) | |
24 | - | if ((0 > x)) | |
25 | - | then (10000 + exp_result) | |
26 | - | else (10000 - exp_result) | |
20 | + | let maxExp = 100000 | |
21 | + | if ((-(maxExp) > x)) | |
22 | + | then 1 | |
23 | + | else if ((x > maxExp)) | |
24 | + | then 1000000000 | |
25 | + | else { | |
26 | + | let scaled_x = (x / 10000) | |
27 | + | let scaled_x2 = fraction(scaled_x, scaled_x, 10000, DOWN) | |
28 | + | let exp_result = ((10000 - scaled_x) + (scaled_x2 / 2)) | |
29 | + | (10000 - exp_result) | |
30 | + | } | |
27 | 31 | } | |
28 | 32 | ||
29 | 33 | ||
30 | 34 | func sigmoid (z,debugPrefix) = { | |
31 | 35 | let clampedZ = clampZ(z, 100000) | |
32 | - | let positiveZ = if ((0 > z)) | |
33 | - | then -(z) | |
34 | - | else z | |
35 | - | let expValue = exp_approx(-(positiveZ)) | |
36 | + | let expValue = exp_approx(-(clampedZ)) | |
36 | 37 | let sigValue = fraction(10000, (10000 + expValue), 1, DOWN) | |
37 | - | $Tuple2([IntegerEntry((debugPrefix + " | |
38 | + | $Tuple2([IntegerEntry((debugPrefix + "clampedZ"), clampedZ), IntegerEntry((debugPrefix + "expValue"), expValue), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
38 | 39 | } | |
39 | 40 | ||
40 | 41 | ||
41 | 42 | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
42 | 43 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
43 | 44 | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + (biases[1] * 100000)) | |
44 | - | let $ | |
45 | - | let debugEntries0 = $ | |
46 | - | let sig0 = $ | |
47 | - | let $ | |
48 | - | let debugEntries1 = $ | |
49 | - | let sig1 = $ | |
45 | + | let $t017651818 = sigmoid(sum0, "Layer1N0") | |
46 | + | let debugEntries0 = $t017651818._1 | |
47 | + | let sig0 = $t017651818._2 | |
48 | + | let $t018231876 = sigmoid(sum1, "Layer1N1") | |
49 | + | let debugEntries1 = $t018231876._1 | |
50 | + | let sig1 = $t018231876._2 | |
50 | 51 | let debugInfo = (debugEntries0 ++ debugEntries1) | |
51 | 52 | let output = [sig0, sig1] | |
52 | 53 | $Tuple2(debugInfo, output) | |
53 | 54 | } | |
54 | 55 | ||
55 | 56 | ||
56 | 57 | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
57 | 58 | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + (biases[0] * 100000)) | |
58 | - | let $ | |
59 | - | let debugEntries0 = $ | |
60 | - | let sig0 = $ | |
59 | + | let $t021862239 = sigmoid(sum0, "Layer2N0") | |
60 | + | let debugEntries0 = $t021862239._1 | |
61 | + | let sig0 = $t021862239._2 | |
61 | 62 | let debugInfo = debugEntries0 | |
62 | 63 | let output = sig0 | |
63 | 64 | $Tuple2(debugInfo, output) | |
64 | 65 | } | |
65 | 66 | ||
66 | 67 | ||
67 | 68 | @Callable(i) | |
68 | 69 | func predict (input1,input2) = { | |
69 | 70 | let scaledInput1 = if ((input1 == 1)) | |
70 | 71 | then 1000000 | |
71 | 72 | else 0 | |
72 | 73 | let scaledInput2 = if ((input2 == 1)) | |
73 | 74 | then 1000000 | |
74 | 75 | else 0 | |
75 | 76 | let inputs = [scaledInput1, scaledInput2] | |
76 | - | let $ | |
77 | - | let debugLayer1 = $ | |
78 | - | let layer1Output = $ | |
79 | - | let $ | |
80 | - | let debugLayer2 = $ | |
81 | - | let layer2Output = $ | |
77 | + | let $t025512649 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
78 | + | let debugLayer1 = $t025512649._1 | |
79 | + | let layer1Output = $t025512649._2 | |
80 | + | let $t026542758 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
81 | + | let debugLayer2 = $t026542758._1 | |
82 | + | let layer2Output = $t026542758._2 | |
82 | 83 | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
83 | 84 | } | |
84 | 85 | ||
85 | 86 |
github/deemru/w8io/026f985 46.60 ms ◑