tx · 4Y5WL8dQqwHHtb9Ei4jYd9217YqLbJ62uGymgyngzKKK 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY: -0.01000000 Waves 2024.05.04 11:34 [3091149] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves
{ "type": 13, "id": "4Y5WL8dQqwHHtb9Ei4jYd9217YqLbJ62uGymgyngzKKK", "fee": 1000000, "feeAssetId": null, "timestamp": 1714811692029, "version": 2, "chainId": 84, "sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY", "senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc", "proofs": [ "2J7ZsQc173asjfCRUFbtaS8fW1zLuZBiNCGyf3McxtzistfyN8cWHtHu9yqWkEyDmKNoSeyJb8NNjvgEMxj1mg67" ], "script": "base64:AAIFAAAAAAAAAAgIAhIECgIBAQAAAAcAAAAADWxheWVyMVdlaWdodHMJAARMAAAAAgkABEwAAAACAAAAAAAACSmwCQAETAAAAAIAAAAAAAAJKp0FAAAAA25pbAkABEwAAAACCQAETAAAAAIAAAAAAAAGUfUJAARMAAAAAgAAAAAAAAZSLQUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMUJpYXNlcwkABEwAAAACAP///////AwWCQAETAAAAAIA///////2TQsFAAAAA25pbAAAAAANbGF5ZXIyV2VpZ2h0cwkABEwAAAACCQAETAAAAAIAAAAAAAAMtcYJAARMAAAAAgD///////JPiwUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMkJpYXNlcwkABEwAAAACAP//////+i8FBQAAAANuaWwBAAAABHJlbHUAAAABAAAAAXgDCQAAZgAAAAIFAAAAAXgAAAAAAAAAAAAFAAAAAXgAAAAAAAAAAAABAAAACmRvdFByb2R1Y3QAAAACAAAAAnYxAAAAAnYyBAAAAARzdW0xCQAAaQAAAAIJAABoAAAAAgkAAZEAAAACBQAAAAJ2MQAAAAAAAAAAAAkAAZEAAAACBQAAAAJ2MgAAAAAAAAAAAAAAAAAAAAAnEAQAAAAEc3VtMgkAAGkAAAACCQAAaAAAAAIJAAGRAAAAAgUAAAACdjEAAAAAAAAAAAEJAAGRAAAAAgUAAAACdjIAAAAAAAAAAAEAAAAAAAAAJxAJAABkAAAAAgUAAAAEc3VtMQUAAAAEc3VtMgEAAAALZmVlZGZvcndhcmQAAAABAAAABmlucHV0cwQAAAANbGF5ZXIxUmVzdWx0MQkBAAAABHJlbHUAAAABCQAAZAAAAAIJAQAAAApkb3RQcm9kdWN0AAAAAgUAAAAGaW5wdXRzCQABkQAAAAIFAAAADWxheWVyMVdlaWdodHMAAAAAAAAAAAAJAAGRAAAAAgUAAAAMbGF5ZXIxQmlhc2VzAAAAAAAAAAAABAAAAA1sYXllcjFSZXN1bHQyCQEAAAAEcmVsdQAAAAEJAABkAAAAAgkBAAAACmRvdFByb2R1Y3QAAAACBQAAAAZpbnB1dHMJAAGRAAAAAgUAAAANbGF5ZXIxV2VpZ2h0cwAAAAAAAAAAAQkAAZEAAAACBQAAAAxsYXllcjFCaWFzZXMAAAAAAAAAAAEEAAAADGxheWVyMklucHV0cwkABEwAAAACBQAAAA1sYXllcjFSZXN1bHQxCQAETAAAAAIFAAAADWxheWVyMVJlc3VsdDIFAAAAA25pbAkAAGQAAAACCQEAAAAKZG90UHJvZHVjdAAAAAIFAAAADGxheWVyMklucHV0cwkAAZEAAAACBQAAAA1sYXllcjJXZWlnaHRzAAAAAAAAAAAACQABkQAAAAIFAAAADGxheWVyMkJpYXNlcwAAAAAAAAAAAAAAAAEAAAABaQEAAAAHcHJlZGljdAAAAAIAAAAGaW5wdXQxAAAABmlucHV0MgQAAAAGaW5wdXRzCQAETAAAAAIFAAAABmlucHV0MQkABEwAAAACBQAAAAZpbnB1dDIFAAAAA25pbAQAAAAKcHJlZGljdGlvbgkBAAAAC2ZlZWRmb3J3YXJkAAAAAQUAAAAGaW5wdXRzCQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACAgAAAApwcmVkaWN0aW9uBQAAAApwcmVkaWN0aW9uBQAAAANuaWwAAAAAkC33xA==", "height": 3091149, "applicationStatus": "succeeded", "spentComplexity": 0 } View: original | compacted Prev: Bwy1SyhVSdH6tc9jHiiZaCcFLvycRcpAd58odgAUgqL6 Next: CrFqEGY8DWvwsYq3yExJteW66i3hdNFg277CA3AXDGFm Diff:
Old | New | Differences | |
---|---|---|---|
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
12 | - | func expApprox (x) = { | |
13 | - | let scaledX = fraction(x, 1, 10) | |
14 | - | let scaledX2 = fraction(scaledX, scaledX, 10) | |
15 | - | let term1 = (10 - scaledX) | |
16 | - | let term2 = fraction(scaledX2, 5, 1) | |
17 | - | (term1 + term2) | |
12 | + | func relu (x) = if ((x > 0)) | |
13 | + | then x | |
14 | + | else 0 | |
15 | + | ||
16 | + | ||
17 | + | func dotProduct (v1,v2) = { | |
18 | + | let sum1 = ((v1[0] * v2[0]) / 10000) | |
19 | + | let sum2 = ((v1[1] * v2[1]) / 10000) | |
20 | + | (sum1 + sum2) | |
18 | 21 | } | |
19 | 22 | ||
20 | 23 | ||
21 | - | func sigmoid (z,debugPrefix) = { | |
22 | - | let cappedZ = if ((z > 200)) | |
23 | - | then 200 | |
24 | - | else if ((-200 > z)) | |
25 | - | then -200 | |
26 | - | else z | |
27 | - | let expNegZ = expApprox(-(cappedZ)) | |
28 | - | let onePlusExpNegZ = (10 + expNegZ) | |
29 | - | let sigValue = fraction(10, onePlusExpNegZ, 1) | |
30 | - | $Tuple2([IntegerEntry((debugPrefix + "inputZ"), cappedZ), IntegerEntry((debugPrefix + "expNegZ"), expNegZ), IntegerEntry((debugPrefix + "onePlusExpNegZ"), onePlusExpNegZ), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
31 | - | } | |
32 | - | ||
33 | - | ||
34 | - | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
35 | - | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
36 | - | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + biases[1]) | |
37 | - | let $t014091462 = sigmoid(sum0, "Layer1N0") | |
38 | - | let debugEntries0 = $t014091462._1 | |
39 | - | let sig0 = $t014091462._2 | |
40 | - | let $t014671520 = sigmoid(sum1, "Layer1N1") | |
41 | - | let debugEntries1 = $t014671520._1 | |
42 | - | let sig1 = $t014671520._2 | |
43 | - | let debugInfo = (debugEntries0 ++ debugEntries1) | |
44 | - | let output = [sig0, sig1] | |
45 | - | $Tuple2(debugInfo, output) | |
46 | - | } | |
47 | - | ||
48 | - | ||
49 | - | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
50 | - | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
51 | - | let $t018211874 = sigmoid(sum0, "Layer2N0") | |
52 | - | let debugEntries0 = $t018211874._1 | |
53 | - | let sig0 = $t018211874._2 | |
54 | - | let debugInfo = debugEntries0 | |
55 | - | let output = sig0 | |
56 | - | $Tuple2(debugInfo, output) | |
24 | + | func feedforward (inputs) = { | |
25 | + | let layer1Result1 = relu((dotProduct(inputs, layer1Weights[0]) + layer1Biases[0])) | |
26 | + | let layer1Result2 = relu((dotProduct(inputs, layer1Weights[1]) + layer1Biases[1])) | |
27 | + | let layer2Inputs = [layer1Result1, layer1Result2] | |
28 | + | (dotProduct(layer2Inputs, layer2Weights[0]) + layer2Biases[0]) | |
57 | 29 | } | |
58 | 30 | ||
59 | 31 | ||
60 | 32 | @Callable(i) | |
61 | 33 | func predict (input1,input2) = { | |
62 | - | let scaledInput1 = if ((input1 == 1)) | |
63 | - | then 1 | |
64 | - | else 0 | |
65 | - | let scaledInput2 = if ((input2 == 1)) | |
66 | - | then 1 | |
67 | - | else 0 | |
68 | - | let inputs = [scaledInput1, scaledInput2] | |
69 | - | let $t021742272 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
70 | - | let debugLayer1 = $t021742272._1 | |
71 | - | let layer1Output = $t021742272._2 | |
72 | - | let $t022772381 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
73 | - | let debugLayer2 = $t022772381._1 | |
74 | - | let layer2Output = $t022772381._2 | |
75 | - | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
34 | + | let inputs = [input1, input2] | |
35 | + | let prediction = feedforward(inputs) | |
36 | + | [IntegerEntry("prediction", prediction)] | |
76 | 37 | } | |
77 | 38 | ||
78 | 39 |
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | 4 | let layer1Weights = [[600496, 600733], [414197, 414253]] | |
5 | 5 | ||
6 | 6 | let layer1Biases = [-259050, -635637] | |
7 | 7 | ||
8 | 8 | let layer2Weights = [[832966, -897141]] | |
9 | 9 | ||
10 | 10 | let layer2Biases = [-381179] | |
11 | 11 | ||
12 | - | func expApprox (x) = { | |
13 | - | let scaledX = fraction(x, 1, 10) | |
14 | - | let scaledX2 = fraction(scaledX, scaledX, 10) | |
15 | - | let term1 = (10 - scaledX) | |
16 | - | let term2 = fraction(scaledX2, 5, 1) | |
17 | - | (term1 + term2) | |
12 | + | func relu (x) = if ((x > 0)) | |
13 | + | then x | |
14 | + | else 0 | |
15 | + | ||
16 | + | ||
17 | + | func dotProduct (v1,v2) = { | |
18 | + | let sum1 = ((v1[0] * v2[0]) / 10000) | |
19 | + | let sum2 = ((v1[1] * v2[1]) / 10000) | |
20 | + | (sum1 + sum2) | |
18 | 21 | } | |
19 | 22 | ||
20 | 23 | ||
21 | - | func sigmoid (z,debugPrefix) = { | |
22 | - | let cappedZ = if ((z > 200)) | |
23 | - | then 200 | |
24 | - | else if ((-200 > z)) | |
25 | - | then -200 | |
26 | - | else z | |
27 | - | let expNegZ = expApprox(-(cappedZ)) | |
28 | - | let onePlusExpNegZ = (10 + expNegZ) | |
29 | - | let sigValue = fraction(10, onePlusExpNegZ, 1) | |
30 | - | $Tuple2([IntegerEntry((debugPrefix + "inputZ"), cappedZ), IntegerEntry((debugPrefix + "expNegZ"), expNegZ), IntegerEntry((debugPrefix + "onePlusExpNegZ"), onePlusExpNegZ), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
31 | - | } | |
32 | - | ||
33 | - | ||
34 | - | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
35 | - | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
36 | - | let sum1 = (((input[0] * weights[1][0]) + (input[1] * weights[1][1])) + biases[1]) | |
37 | - | let $t014091462 = sigmoid(sum0, "Layer1N0") | |
38 | - | let debugEntries0 = $t014091462._1 | |
39 | - | let sig0 = $t014091462._2 | |
40 | - | let $t014671520 = sigmoid(sum1, "Layer1N1") | |
41 | - | let debugEntries1 = $t014671520._1 | |
42 | - | let sig1 = $t014671520._2 | |
43 | - | let debugInfo = (debugEntries0 ++ debugEntries1) | |
44 | - | let output = [sig0, sig1] | |
45 | - | $Tuple2(debugInfo, output) | |
46 | - | } | |
47 | - | ||
48 | - | ||
49 | - | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
50 | - | let sum0 = (((input[0] * weights[0][0]) + (input[1] * weights[0][1])) + biases[0]) | |
51 | - | let $t018211874 = sigmoid(sum0, "Layer2N0") | |
52 | - | let debugEntries0 = $t018211874._1 | |
53 | - | let sig0 = $t018211874._2 | |
54 | - | let debugInfo = debugEntries0 | |
55 | - | let output = sig0 | |
56 | - | $Tuple2(debugInfo, output) | |
24 | + | func feedforward (inputs) = { | |
25 | + | let layer1Result1 = relu((dotProduct(inputs, layer1Weights[0]) + layer1Biases[0])) | |
26 | + | let layer1Result2 = relu((dotProduct(inputs, layer1Weights[1]) + layer1Biases[1])) | |
27 | + | let layer2Inputs = [layer1Result1, layer1Result2] | |
28 | + | (dotProduct(layer2Inputs, layer2Weights[0]) + layer2Biases[0]) | |
57 | 29 | } | |
58 | 30 | ||
59 | 31 | ||
60 | 32 | @Callable(i) | |
61 | 33 | func predict (input1,input2) = { | |
62 | - | let scaledInput1 = if ((input1 == 1)) | |
63 | - | then 1 | |
64 | - | else 0 | |
65 | - | let scaledInput2 = if ((input2 == 1)) | |
66 | - | then 1 | |
67 | - | else 0 | |
68 | - | let inputs = [scaledInput1, scaledInput2] | |
69 | - | let $t021742272 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
70 | - | let debugLayer1 = $t021742272._1 | |
71 | - | let layer1Output = $t021742272._2 | |
72 | - | let $t022772381 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2") | |
73 | - | let debugLayer2 = $t022772381._1 | |
74 | - | let layer2Output = $t022772381._2 | |
75 | - | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
34 | + | let inputs = [input1, input2] | |
35 | + | let prediction = feedforward(inputs) | |
36 | + | [IntegerEntry("prediction", prediction)] | |
76 | 37 | } | |
77 | 38 | ||
78 | 39 |
github/deemru/w8io/169f3d6 30.13 ms ◑