tx · FYY5e1t899S1xjoEgphoEeWuywfGRLTVAt65iBN8Pxv9
3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY: -0.01000000 Waves
2024.05.26 22:09 [3123362] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves
{
"type": 13,
"id": "FYY5e1t899S1xjoEgphoEeWuywfGRLTVAt65iBN8Pxv9",
"fee": 1000000,
"feeAssetId": null,
"timestamp": 1716750539690,
"version": 2,
"chainId": 84,
"sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY",
"senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc",
"proofs": [
"2qdkweuCvzG2Nn3VHBtU7C799U9TLti6Bdr5x1WtDec1HvGkuLeV4XBtGzwZ4qyXQuHTh7xvHHWcsx36b5Fp9msH"
],
"script": "base64:BwIICAISBAoCAQEIAA93ZWlnaHRzX2xheWVyXzEJAMwIAgkAzAgCAJHVAwkAzAgCAKnVAwUDbmlsCQDMCAIJAMwIAgDLwwIJAMwIAgDRwwIFA25pbAUDbmlsAA5iaWFzZXNfbGF5ZXJfMQkAzAgCAM+1/v///////wEJAMwIAgC1j/z///////8BBQNuaWwAD3dlaWdodHNfbGF5ZXJfMgkAzAgCCQDMCAIA4IoFCQDMCAIAjsP6////////AQUDbmlsBQNuaWwADmJpYXNlc19sYXllcl8yCQDMCAIAm9b9////////AQUDbmlsARBsaW5lYXJfZm9yd2FyZF8xAwVpbnB1dAd3ZWlnaHRzBmJpYXNlcwQNd2VpZ2h0ZWRfc3VtMQkAZAIJAGkCCQBkAgkAaAIJAJEDAgUFaW5wdXQAAAkAkQMCCQCRAwIFB3dlaWdodHMAAAAACQBoAgkAkQMCBQVpbnB1dAABCQCRAwIJAJEDAgUHd2VpZ2h0cwAAAAEAkE4JAJEDAgUGYmlhc2VzAAAEDXdlaWdodGVkX3N1bTIJAGQCCQBpAgkAZAIJAGgCCQCRAwIFBWlucHV0AAAJAJEDAgkAkQMCBQd3ZWlnaHRzAAEAAAkAaAIJAJEDAgUFaW5wdXQAAQkAkQMCCQCRAwIFB3dlaWdodHMAAQABAJBOCQCRAwIFBmJpYXNlcwABCQDMCAIFDXdlaWdodGVkX3N1bTEJAMwIAgUNd2VpZ2h0ZWRfc3VtMgUDbmlsARBsaW5lYXJfZm9yd2FyZF8yAwVpbnB1dAd3ZWlnaHRzBmJpYXNlcwQNd2VpZ2h0ZWRfc3VtMQkAZAIJAGkCCQBkAgkAaAIJAJEDAgUFaW5wdXQAAAkAkQMCCQCRAwIFB3dlaWdodHMAAAAACQBoAgkAkQMCBQVpbnB1dAABCQCRAwIJAJEDAgUHd2VpZ2h0cwAAAAEAkE4JAJEDAgUGYmlhc2VzAAAJAMwIAgUNd2VpZ2h0ZWRfc3VtMQUDbmlsAQdzaWdtb2lkAQVpbnB1dAMJAGYCAPCx/////////wEFBWlucHV0AAADCQBmAgUFaW5wdXQAkE4AkE4JAGQCAIgnCQBpAgUFaW5wdXQAAgESc2lnbW9pZF9hY3RpdmF0aW9uAQZpbnB1dHMJAMwIAgkBB3NpZ21vaWQBCQCRAwIFBmlucHV0cwAABQNuaWwBAWkBB3ByZWRpY3QCAngxAngyBAl4MV9zY2FsZWQJAGgCBQJ4MQCQTgQJeDJfc2NhbGVkCQBoAgUCeDIAkE4EBmlucHV0cwkAzAgCBQl4MV9zY2FsZWQJAMwIAgUJeDJfc2NhbGVkBQNuaWwEAnoxCQEQbGluZWFyX2ZvcndhcmRfMQMFBmlucHV0cwUPd2VpZ2h0c19sYXllcl8xBQ5iaWFzZXNfbGF5ZXJfMQQCYTEJARJzaWdtb2lkX2FjdGl2YXRpb24BBQJ6MQQCejIJARBsaW5lYXJfZm9yd2FyZF8yAwUCYTEFD3dlaWdodHNfbGF5ZXJfMgUOYmlhc2VzX2xheWVyXzIEAmEyCQEHc2lnbW9pZAEJAJEDAgUCejIAAAQGcmVzdWx0CQBpAgUCYTIAkE4EDWRlYnVnX291dHB1dHMFA25pbAkAlAoCBQ1kZWJ1Z19vdXRwdXRzBQZyZXN1bHQAfKg7uA==",
"height": 3123362,
"applicationStatus": "succeeded",
"spentComplexity": 0
}
View: original | compacted
Prev: 8bA5jMEQ6Vm9Rh4z9C9TER3GXZ2QKwTuAjWySbzGzVBv
Next: 6E7zaZ4wjSB9fjAqTL9Py14raWfez4jogsN994iBCpPg
Diff:
Old | New | | Differences |
---|
9 | 9 | | |
---|
10 | 10 | | let biases_layer_2 = [-38117] |
---|
11 | 11 | | |
---|
12 | | - | func linear_forward (input,weights,biases) = { |
---|
| 12 | + | func linear_forward_1 (input,weights,biases) = { |
---|
13 | 13 | | let weighted_sum1 = ((((input[0] * weights[0][0]) + (input[1] * weights[0][1])) / 10000) + biases[0]) |
---|
14 | 14 | | let weighted_sum2 = ((((input[0] * weights[1][0]) + (input[1] * weights[1][1])) / 10000) + biases[1]) |
---|
15 | 15 | | [weighted_sum1, weighted_sum2] |
---|
| 16 | + | } |
---|
| 17 | + | |
---|
| 18 | + | |
---|
| 19 | + | func linear_forward_2 (input,weights,biases) = { |
---|
| 20 | + | let weighted_sum1 = ((((input[0] * weights[0][0]) + (input[1] * weights[0][1])) / 10000) + biases[0]) |
---|
| 21 | + | [weighted_sum1] |
---|
16 | 22 | | } |
---|
17 | 23 | | |
---|
18 | 24 | | |
---|
|
23 | 29 | | else (5000 + (input / 2)) |
---|
24 | 30 | | |
---|
25 | 31 | | |
---|
26 | | - | func sigmoid_activation (inputs) = [sigmoid(inputs[0]), sigmoid(inputs[1])] |
---|
| 32 | + | func sigmoid_activation (inputs) = [sigmoid(inputs[0])] |
---|
27 | 33 | | |
---|
28 | 34 | | |
---|
29 | 35 | | @Callable(i) |
---|
|
31 | 37 | | let x1_scaled = (x1 * 10000) |
---|
32 | 38 | | let x2_scaled = (x2 * 10000) |
---|
33 | 39 | | let inputs = [x1_scaled, x2_scaled] |
---|
34 | | - | let z1 = linear_forward(inputs, weights_layer_1, biases_layer_1) |
---|
| 40 | + | let z1 = linear_forward_1(inputs, weights_layer_1, biases_layer_1) |
---|
35 | 41 | | let a1 = sigmoid_activation(z1) |
---|
36 | | - | let z2 = ((((a1[0] * weights_layer_2[0][0]) + (a1[1] * weights_layer_2[0][1])) / 10000) + biases_layer_2[0]) |
---|
37 | | - | let a2 = sigmoid(z2) |
---|
| 42 | + | let z2 = linear_forward_2(a1, weights_layer_2, biases_layer_2) |
---|
| 43 | + | let a2 = sigmoid(z2[0]) |
---|
38 | 44 | | let result = (a2 / 10000) |
---|
39 | | - | let debug_outputs = [IntegerEntry("debug_z1_1", z1[0]), IntegerEntry("debug_a1_1", a1[0]), IntegerEntry("debug_z1_2", z1[1]), IntegerEntry("debug_a1_2", a1[1]), IntegerEntry("debug_a2", a2), IntegerEntry("debug_z2", z2), IntegerEntry("debug_result", result)] |
---|
| 45 | + | let debug_outputs = nil |
---|
40 | 46 | | $Tuple2(debug_outputs, result) |
---|
41 | 47 | | } |
---|
42 | 48 | | |
---|
Full:
Old | New | | Differences |
---|
1 | 1 | | {-# STDLIB_VERSION 7 #-} |
---|
2 | 2 | | {-# SCRIPT_TYPE ACCOUNT #-} |
---|
3 | 3 | | {-# CONTENT_TYPE DAPP #-} |
---|
4 | 4 | | let weights_layer_1 = [[60049, 60073], [41419, 41425]] |
---|
5 | 5 | | |
---|
6 | 6 | | let biases_layer_1 = [-25905, -63563] |
---|
7 | 7 | | |
---|
8 | 8 | | let weights_layer_2 = [[83296, -89714]] |
---|
9 | 9 | | |
---|
10 | 10 | | let biases_layer_2 = [-38117] |
---|
11 | 11 | | |
---|
12 | | - | func linear_forward (input,weights,biases) = { |
---|
| 12 | + | func linear_forward_1 (input,weights,biases) = { |
---|
13 | 13 | | let weighted_sum1 = ((((input[0] * weights[0][0]) + (input[1] * weights[0][1])) / 10000) + biases[0]) |
---|
14 | 14 | | let weighted_sum2 = ((((input[0] * weights[1][0]) + (input[1] * weights[1][1])) / 10000) + biases[1]) |
---|
15 | 15 | | [weighted_sum1, weighted_sum2] |
---|
| 16 | + | } |
---|
| 17 | + | |
---|
| 18 | + | |
---|
| 19 | + | func linear_forward_2 (input,weights,biases) = { |
---|
| 20 | + | let weighted_sum1 = ((((input[0] * weights[0][0]) + (input[1] * weights[0][1])) / 10000) + biases[0]) |
---|
| 21 | + | [weighted_sum1] |
---|
16 | 22 | | } |
---|
17 | 23 | | |
---|
18 | 24 | | |
---|
19 | 25 | | func sigmoid (input) = if ((-10000 > input)) |
---|
20 | 26 | | then 0 |
---|
21 | 27 | | else if ((input > 10000)) |
---|
22 | 28 | | then 10000 |
---|
23 | 29 | | else (5000 + (input / 2)) |
---|
24 | 30 | | |
---|
25 | 31 | | |
---|
26 | | - | func sigmoid_activation (inputs) = [sigmoid(inputs[0]), sigmoid(inputs[1])] |
---|
| 32 | + | func sigmoid_activation (inputs) = [sigmoid(inputs[0])] |
---|
27 | 33 | | |
---|
28 | 34 | | |
---|
29 | 35 | | @Callable(i) |
---|
30 | 36 | | func predict (x1,x2) = { |
---|
31 | 37 | | let x1_scaled = (x1 * 10000) |
---|
32 | 38 | | let x2_scaled = (x2 * 10000) |
---|
33 | 39 | | let inputs = [x1_scaled, x2_scaled] |
---|
34 | | - | let z1 = linear_forward(inputs, weights_layer_1, biases_layer_1) |
---|
| 40 | + | let z1 = linear_forward_1(inputs, weights_layer_1, biases_layer_1) |
---|
35 | 41 | | let a1 = sigmoid_activation(z1) |
---|
36 | | - | let z2 = ((((a1[0] * weights_layer_2[0][0]) + (a1[1] * weights_layer_2[0][1])) / 10000) + biases_layer_2[0]) |
---|
37 | | - | let a2 = sigmoid(z2) |
---|
| 42 | + | let z2 = linear_forward_2(a1, weights_layer_2, biases_layer_2) |
---|
| 43 | + | let a2 = sigmoid(z2[0]) |
---|
38 | 44 | | let result = (a2 / 10000) |
---|
39 | | - | let debug_outputs = [IntegerEntry("debug_z1_1", z1[0]), IntegerEntry("debug_a1_1", a1[0]), IntegerEntry("debug_z1_2", z1[1]), IntegerEntry("debug_a1_2", a1[1]), IntegerEntry("debug_a2", a2), IntegerEntry("debug_z2", z2), IntegerEntry("debug_result", result)] |
---|
| 45 | + | let debug_outputs = nil |
---|
40 | 46 | | $Tuple2(debug_outputs, result) |
---|
41 | 47 | | } |
---|
42 | 48 | | |
---|
43 | 49 | | |
---|