diff --git a/.gitignore b/.gitignore
index c565b431f4..3833c499a2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,8 @@
/.idea
/bin
/mainnetdb
+/spent-addresses-db
+/spent-addresses-log
/mainnet.log
/*.iml
.classpath
@@ -87,4 +89,4 @@ $RECYCLE.BIN/
*.lnk
-# End of https://www.gitignore.io/api/osx,linux,windows
\ No newline at end of file
+# End of https://www.gitignore.io/api/osx,linux,windows
diff --git a/Security.MD b/Security.MD
new file mode 100644
index 0000000000..18366470fd
--- /dev/null
+++ b/Security.MD
@@ -0,0 +1,23 @@
+
org.ini4j
ini4j
@@ -410,6 +417,9 @@
not used anymore
org.reflections:reflections:0.9.10:jar:null:compile:3812159b1b4b7c296fa80f53eed6362961eb85da
-->
+
+ net.openhft:zero-allocation-hashing:${zero-allocation-hashing.version}:jar:null:compile:9e32cfe154dca19e6024d2faa6027d6cad8cea3c
+
com.jayway.restassured:rest-assured:2.9.0:jar:null:test:d0d5b6720a58472ab99287c931a8205373d6e7b2
diff --git a/python-regression/IXI/LocalSnapshots.ixi/README.md b/python-regression/IXI/LocalSnapshots.ixi/README.md
new file mode 100644
index 0000000000..3286f37afd
--- /dev/null
+++ b/python-regression/IXI/LocalSnapshots.ixi/README.md
@@ -0,0 +1,2 @@
+# LocalSnapshots.ixi
+A tool for fetching data from a running node's local snapshots.
diff --git a/python-regression/IXI/LocalSnapshots.ixi/index.js b/python-regression/IXI/LocalSnapshots.ixi/index.js
new file mode 100644
index 0000000000..e8a7199ff3
--- /dev/null
+++ b/python-regression/IXI/LocalSnapshots.ixi/index.js
@@ -0,0 +1,58 @@
+var System = java.lang.System;
+
+var snapshotState = com.iota.iri.service.snapshot.snapshotState;
+var snapshotMeta = com.iota.iri.service.snapshot.snapshotMetaData;
+
+var snapshotProvider = IOTA.snapshotProvider;
+
+var iri = com.iota.iri;
+var Callable = iri.service.CallableRequest;
+var Response = iri.service.dto.IXIResponse;
+var ErrorResponse = iri.service.dto.ErrorResponse;
+
+
+function getInitialSnapshotClone(){
+ return snapshotProvider.getInitialSnapshot().clone();
+}
+
+function getLatestSnapshotClone(){
+ return snapshotProvider.getLatestSnapshot().clone();
+}
+
+
+function getSnapshotState(){
+ var ledgerState = getInitialSnapshotClone();
+
+ return Response.create({
+ index: ledgerState.getIndex(),
+ state: ledgerState.getBalances()
+ });
+
+}
+
+function getSnapshotMetaData(){
+ var ledgerState = getInitialSnapshotClone();
+
+ return Response.create({
+ index: ledgerState.getIndex(),
+ metaData: ledgerState.getSeenMilestones()
+ });
+}
+
+
+function getSnapshotIndexes(){
+ //console.log("Fetching balances");
+ var ledgerState = getLatestSnapshotClone();
+
+ return Response.create({
+ initialIndex: ledgerState.getInitialIndex(),
+ currentIndex: ledgerState.getIndex(),
+ });
+}
+
+
+
+
+API.put("getIndexes", new Callable({ call: getSnapshotIndexes }))
+API.put("getState", new Callable({ call: getSnapshotState }))
+API.put("getMetaData", new Callable({ call: getSnapshotMetaData }))
diff --git a/python-regression/IXI/LocalSnapshots.ixi/package.json b/python-regression/IXI/LocalSnapshots.ixi/package.json
new file mode 100644
index 0000000000..14ab704d8f
--- /dev/null
+++ b/python-regression/IXI/LocalSnapshots.ixi/package.json
@@ -0,0 +1,3 @@
+{
+ "main": "index.js"
+}
diff --git a/python-regression/setup.py b/python-regression/setup.py
index 624f1db2f5..834f184bdf 100644
--- a/python-regression/setup.py
+++ b/python-regression/setup.py
@@ -8,6 +8,7 @@
install_requires=[
'pyota',
'aloe',
- 'pyyaml'
- ]
- )
\ No newline at end of file
+ 'pyyaml',
+ ]
+ )
+
diff --git a/python-regression/tests/features/machine1/1_api_tests.feature b/python-regression/tests/features/machine1/1_api_tests.feature
index 135210f904..4b06dd878d 100644
--- a/python-regression/tests/features/machine1/1_api_tests.feature
+++ b/python-regression/tests/features/machine1/1_api_tests.feature
@@ -55,10 +55,13 @@ Feature: Test API calls on Machine 1
|keys |
|address |
|numberOfAllTransactions |
- |numberOfRandomTransactionRequests |
|numberOfNewTransactions |
|numberOfInvalidTransactions |
+ |numberOfStaleTransactions |
|numberOfSentTransactions |
+ |numberOfRandomTransactionRequests |
+ |numberOfDroppedSentPackets |
+ |connected |
|connectionType |
diff --git a/python-regression/tests/features/machine6/6_local_snapshots_tests.feature b/python-regression/tests/features/machine6/6_local_snapshots_tests.feature
new file mode 100644
index 0000000000..e05d0497a8
--- /dev/null
+++ b/python-regression/tests/features/machine6/6_local_snapshots_tests.feature
@@ -0,0 +1,79 @@
+Feature: Test Bootstrapping With LS
+ A test to determine whether or not nodes can bootstrap and sync correctly from Local Snapshot Files and DB's. One
+ permanode will be started containing all the relevant files/folders for a full sync upon start. Two more nodes will
+ be started, connected to this node and one another: One will have only a DB and snapshot file, while the other will
+ have only the snapshot meta and state file, along with the spent addresses DB and the snapshot file. All three nodes
+ should sync with one another. And a snapshot should be taken on the node started with just a DB.
+ [NodeA: Permanode, NodeB: Just DB, NodeC: Just LS Files]
+
+ Scenario: PermaNode is synced
+ Check that the permanode has been started correctly and is synced.
+
+ #First make sure nodes are neighbored
+ Given "nodeA" and "nodeB" are neighbors
+ And "nodeA" and "nodeC" are neighbors
+
+ When we wait "30" second/seconds
+ Then "nodeA" is synced up to milestone 10321
+
+
+ Scenario: DB node is synced, and files contain expected values
+ Check that the node started with just a DB is synced correctly, and that the proper addresses and hashes have been
+ stored correctly.
+
+ #First make sure nodes are neighbored
+ Given "nodeB" and "nodeA" are neighbors
+ And "nodeB" and "nodeC" are neighbors
+
+ #Give the node time to finish syncing properly, then make sure that the node is synced to the latest milestone.
+ And we wait "30" second/seconds
+ Then "nodeB" is synced up to milestone 10321
+ And A local snapshot was taken on "nodeB" at index 10220
+
+ When reading the local snapshot state on "nodeB" returns with:
+ |keys |values |type |
+ |address |LS_TEST_STATE_ADDRESSES |staticValue |
+
+ And reading the local snapshot metadata on "nodeB" returns with:
+ |keys |values |type |
+ |hashes |LS_TEST_MILESTONE_HASHES |staticValue |
+
+
+ Scenario: LS File node is synced
+ Check that the node started with just LS Files is synced correctly.
+
+ #First make sure nodes are neighbored
+ Given "nodeC" and "nodeA" are neighbors
+ And "nodeC" and "nodeB" are neighbors
+
+ #Give the node time to finish syncing properly, then make sure that the node is synced to the latest milestone.
+ When we wait "30" second/seconds
+ Then "nodeC" is synced up to milestone 10321
+
+
+ Scenario: Check DB for milestone hashes
+ Give the db-less node some time to receive the latest milestones from the permanode, then check if the milestones
+ are present in the new node.
+
+ #First make sure nodes are neighbored
+ Given "nodeC" and "nodeA" are neighbors
+ And we wait "60" second/seconds
+
+ When "checkConsistency" is called on "nodeC" with:
+ |keys |values |type |
+ |tails |LS_TEST_MILESTONE_HASHES |staticValue |
+
+ Then the response for "checkConsistency" should return with:
+ |keys |values |type |
+ |state |True |bool |
+
+
+ Scenario: Old transactions are pruned
+ Takes a node with a large db and transaction pruning enabled, and checks to make sure that the transactions below
+ the pruning depth are no longer present.
+
+ Given "checkConsistency" is called on "nodeD" with:
+ |keys |values |type |
+ |tails |LS_PRUNED_TRANSACTIONS |staticValue |
+
+ Then the response for "checkConsistency" should return null
\ No newline at end of file
diff --git a/python-regression/tests/features/machine6/config.yml b/python-regression/tests/features/machine6/config.yml
new file mode 100644
index 0000000000..595919a3ad
--- /dev/null
+++ b/python-regression/tests/features/machine6/config.yml
@@ -0,0 +1,60 @@
+default_args: &args
+ ['--testnet-coordinator',
+ 'EFPNKGPCBXXXLIBYFGIGYBYTFFPIOQVNNVVWTTIYZO9NFREQGVGDQQHUUQ9CLWAEMXVDFSSMOTGAHVIBH',
+ '--mwm',
+ '1',
+ '--milestone-start',
+ '0',
+ '--testnet-no-coo-validation',
+ 'true',
+ '--testnet',
+ 'true',
+ '--snapshot',
+ './snapshot.txt',
+ '--local-snapshots-pruning-enabled',
+ 'true',
+ '--local-snapshots-pruning-delay',
+ '10000'
+ ]
+
+default_ixi: &ixi
+ ['IXI/LocalSnapshots.ixi']
+
+java_options: -agentlib:jdwp=transport=dt_socket,server=y,address=8000,suspend=n -javaagent:/opt/jacoco/lib/jacocoagent.jar=destfile=/iri/jacoco.exec,output=file,append=true,dumponexit=true
+
+defaults: &db_full
+ db: https://s3.eu-central-1.amazonaws.com/iotaledger-dbfiles/dev/LS_Test_All_Files.tar
+ db_checksum: 4628789811b4ccbcc3cd898debff0e1b256a1cfdd636c7b5e1beb43fed461091
+ iri_args: *args
+ ixis: *ixi
+
+db_with_snapshot: &db_with_snapshot
+ db: https://s3.eu-central-1.amazonaws.com/iotaledger-dbfiles/dev/LS_Test_DB_and_Snapshot.tar
+ db_checksum: eabb81b0570a20e8d1c65c3d29e4b4e723de537ebca0eada536e3155d5a96972
+ iri_args: *args
+ ixis: *ixi
+
+db_with_ls_files: &db_with_LS_files
+ db: https://s3.eu-central-1.amazonaws.com/iotaledger-dbfiles/dev/LS_Test_LS_Files_and_Snapshot.tar
+ db_checksum: 87fdc9c37a6b98ec8623a5b07c49737681b7b7ac36252ab2e18cc75999f766b6
+ iri_args: *args
+ ixis: *ixi
+
+db_for_pruning: &db_for_pruning
+ db: https://s3.eu-central-1.amazonaws.com/iotaledger-dbfiles/dev/PruningTestDB.tar
+ db_checksum: 15122ba80c0a03dc5b6b4186e5d880d0a1a15b5a6de48bafe4002c4c9b682221
+ iri_args: *args
+
+
+nodes:
+ nodeA: #name
+ <<: *db_full
+
+ nodeB:
+ <<: *db_with_snapshot
+
+ nodeC:
+ <<: *db_with_LS_files
+
+ nodeD:
+ <<: *db_for_pruning
diff --git a/python-regression/tests/features/steps/api_test_steps.py b/python-regression/tests/features/steps/api_test_steps.py
index d2396f6b3f..104d051ff9 100644
--- a/python-regression/tests/features/steps/api_test_steps.py
+++ b/python-regression/tests/features/steps/api_test_steps.py
@@ -50,9 +50,9 @@ def api_method_is_called(step, api_call, node_name):
api = api_utils.prepare_api_call(node_name)
response = api_utils.fetch_call(api_call, api, options)
- assert type(response) is dict, 'There may be something wrong with the response format: {}'.format(response)
world.responses[api_call] = {}
world.responses[api_call][node_name] = response
+ return response
# This method is identical to the method above, but creates a new thread
@@ -198,7 +198,7 @@ def make_neighbors(step, node1, node2):
for node in neighbor_candidates:
host = world.machine['nodes'][node]['podip']
- port = world.machine['nodes'][node]['clusterip_ports']['gossip-udp']
+ port = world.machine['nodes'][node]['clusterip_ports']['gossip-tcp']
api = api_utils.prepare_api_call(node)
response = api.get_neighbors()
neighbor_info[node] = {
@@ -216,4 +216,15 @@ def make_neighbors(step, node1, node2):
neighbor_info[node2]['node_neighbors'], neighbor_info[node1]['address'])
+@step(r'"([^"]+)" is synced up to milestone (\d+)')
+def check_node_sync(step, node, milestone):
+ node_info = api_method_is_called(step, "getNodeInfo", node)
+ latestMilestone = node_info.get('latestMilestoneIndex')
+ latestSolidMilestone = node_info.get('latestSolidSubtangleMilestoneIndex')
+
+ assert latestMilestone == int(milestone), \
+ "Latest Milestone {} on {} is not the expected {}".format(latestMilestone, node, milestone)
+ assert latestSolidMilestone == int(milestone), \
+ "Latest Solid Milestone {} on {} is not the expected {}".format(latestSolidMilestone, node, milestone)
+
diff --git a/python-regression/tests/features/steps/local_snapshots_steps.py b/python-regression/tests/features/steps/local_snapshots_steps.py
new file mode 100644
index 0000000000..831b177c5e
--- /dev/null
+++ b/python-regression/tests/features/steps/local_snapshots_steps.py
@@ -0,0 +1,77 @@
+from aloe import step
+from util.test_logic import api_test_logic as api_utils
+
+import logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+
+@step(r'A local snapshot was taken on "([^"]+)" at index (\d+)')
+def check_ls_indexes(step, node, index):
+ '''
+ Uses an ixi module to check the index of the latest snapshot file. It checks to make sure that the initialIndex is
+ not equal to 0. If it is, that means that a local snapshot has not been taken. If it passes this check it then
+ ensures that the index registered in the node's snapshot provider is equal to the index provided.
+
+ :param node: The node that the IXI request will be made on
+ :param index: The expected index of the Local Snapshot
+ '''
+ command ={"command": "LocalSnapshots.getIndexes"}
+ request_return = api_utils.send_ixi_request(node, command)
+ assert 'ixi' in request_return, "Error: {}".format(request_return['error'])
+ snapshot_index = request_return['ixi']['initialIndex']
+
+ assert int(snapshot_index) != 0, "Local Snapshot not generated."
+ assert int(snapshot_index) == int(index), "Snapshot index {} does not match the expected {}."\
+ .format(snapshot_index, index)
+ logger.info("Local Snapshot Index matches expected value")
+
+
+@step(r'reading the local snapshot state on "([^"]+)" returns with:')
+def read_ls_state(step, node):
+ """
+ Uses an ixi module to check the current snapshot state of the node. It cycles through a provided list of addresses
+ to make sure the snapshot state contains them.
+
+ :param step.hashes: A pointer to the list of addresses that should be present in the snapshot state
+ :param node: The node that the IXI request will be made on
+ """
+ arg_list = step.hashes
+
+ options = {}
+ api_utils.prepare_options(arg_list, options)
+
+ command = {"command": "LocalSnapshots.getState"}
+ request_return = api_utils.send_ixi_request(node, command)
+ assert 'ixi' in request_return, "Error: {}".format(request_return['error'])
+ node_state = request_return['ixi']['state']
+ addresses_with_value = options['address']
+
+ for address in addresses_with_value:
+ assert address in node_state, "Provided address: {} was not found in the snapshot state".format(address)
+ logger.info("Snapshot State contains the provided addresses.")
+
+
+@step(r'reading the local snapshot metadata on "([^"]+)" returns with:')
+def read_ls_metadata(step, node):
+ """
+ Uses an ixi module to check the current snapshot state of the node. It cycles through a provided list of addresses
+ to make sure the snapshot state contains them.
+
+ :param step.hashes: A pointer to the list of milestone hashes that should be present in the snapshot metadata
+ :param node: The node that the IXI request will be made on
+ """
+ arg_list = step.hashes
+
+ options = {}
+ api_utils.prepare_options(arg_list, options)
+
+ command = {"command": "LocalSnapshots.getMetaData"}
+ request_return = api_utils.send_ixi_request(node, command)
+ assert 'ixi' in request_return, "Error: {}".format(request_return['error'])
+ node_metadata = request_return['ixi']['metaData']
+ hashes = options['hashes']
+
+ for hash_val in hashes:
+ assert hash_val in node_metadata, "Provided hash: {} was not found in the snapshot metadata".format(hash_val)
+ logger.info("Snapshot Metadata contains the provided hashes.")
diff --git a/python-regression/tests/features/steps/response_handling_steps.py b/python-regression/tests/features/steps/response_handling_steps.py
index a8175c380a..2eef3b9818 100644
--- a/python-regression/tests/features/steps/response_handling_steps.py
+++ b/python-regression/tests/features/steps/response_handling_steps.py
@@ -92,6 +92,12 @@ def check_response_for_value(step, api_call):
logger.info('Response contained expected values')
+@step(r'the response for "([^"]*)" should return null')
+def check_if_null(step, api_call):
+ response = world.responses[api_call][world.config['nodeId']]
+ assert response is None, "Response is not null"
+
+
@step(r'a response with the following is returned:')
def compare_response(step):
"""
diff --git a/python-regression/util/neighbor_logic/neighbor_logic.py b/python-regression/util/neighbor_logic/neighbor_logic.py
index 09f86e2864..bfcf103a23 100644
--- a/python-regression/util/neighbor_logic/neighbor_logic.py
+++ b/python-regression/util/neighbor_logic/neighbor_logic.py
@@ -16,16 +16,15 @@ def check_if_neighbors(api, neighbors, expected_neighbor):
methods in the step.
"""
is_neighbor = False
- for neighbor in enumerate(neighbors):
- logger.info(neighbor)
- if expected_neighbor == neighbors[neighbor[0]]['address']:
+
+ for neighbor in neighbors:
+ if expected_neighbor == neighbor['address']:
logger.info("Already a neighbor")
is_neighbor = True
else:
logger.info('Adding neighbor')
if is_neighbor is False:
- udp_address = "udp://" + expected_neighbor
- logger.info('Adding {} as neighbor'.format(udp_address))
- api.add_neighbors([udp_address.decode()])
- logger.info('{} added as neighbor'.format(udp_address))
+ tcp_address = "tcp://" + expected_neighbor
+ api.add_neighbors([tcp_address.decode()])
+ logger.info('{} added as neighbor'.format(tcp_address))
diff --git a/python-regression/util/static_vals.py b/python-regression/util/static_vals.py
index f5b20e4ced..d0d1b16dc8 100644
--- a/python-regression/util/static_vals.py
+++ b/python-regression/util/static_vals.py
@@ -4,7 +4,7 @@
TEST_TRANSACTION_HASHES = ["BNKODGPOSCN9ENBCFYXPIJZMSACAFTZIAGSWOCZFG9BYECELVD9JLBDSFIDKNXOQIRPTGNWZDMSYZ9999",
"ZPZKTOXRHKRPGNJKOCMHBQWGSMTMSDTVSYHVNZN9MMMPAZHOJYHOESCXGIDLTMXPFWDFKRNHAILRZ9999"]
-TEST_NEIGHBORS = [u'udp://178.128.236.6:14600', u'udp://167.99.178.3:14600']
+TEST_NEIGHBORS = [u'tcp://178.128.236.6:14600', u'tcp://167.99.178.3:14600']
TEST_HASH = "NMPXODIWSCTMWRTQ9AI9VROYNFACWCQDXDSJLNC9HKCECBCGQTBUBKVROXZLQWAZRQUGIJTLTMAMSH999"
CONFIRMATION_REFERENCE_HASH = "ZSPRRUJRXHBSRFGCCPVHNXWJJKXRYZSAU9ZEGWFD9LPTWOJZARRLOEQYYWIKOPSXIBFD9ADNIVAHKG999"
@@ -74,7 +74,7 @@
WTFMS9VNCQNU9MREAWURCRFP999RY9WPBQNZJQVWPEA9PLGEYFKO9KDLHUIKBLHAWFMJQHVBADMAECNWOOMJSOSQDXCASDUITIOQTCAFR999OOL99999999\
9999999999999999OCRAFGUKE999999999L99999999IPEHQKQXYFIYKCAATJXKXYLICFF"
-EMPTY_TRANSACTION_TRYTES = "99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
+EMPTY_TRANSACTION_TRYTES = "9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
@@ -92,9 +92,32 @@
99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
-999999999999999999999999999999999999999999999999999999999999999999TEST9TRANSACTION9TEST9TRANSACTION9TEST9TRANSACTION9TE\
-ST9TRANSACTION9TEST999999999999999999999999999999999999LA9999999999999999999999999PENPEZD99999999999999999999ZKJDCAXDVI\
-LDLFAPDQZCKROIQRDKHZZIX9QQ9RQICWYLH9EUCFZUBKWAAREIXSIPLNQBGXAACBZAKCWLC999999999999999999999999999999999999999999999999\
+9999999999999999999999999999999999999999999999999999999999999999999999999TEST9TRANSACTION9TEST9TRANSACTION9TEST9TRANSAC\
+TION9TEST9TRANSACTION9TEST999999999999999999999999999999999999LA9999999999999999999999999PENPEZD99999999999999999999ZKJ\
+DCAXDVILDLFAPDQZCKROIQRDKHZZIX9QQ9RQICWYLH9EUCFZUBKWAAREIXSIPLNQBGXAACBZAKCWLC99999999999999999999999999999999999999999\
99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\
-9999999999999999999999999999999999999999999999999999999999999999999999999999"
+99999999999999999999999999999999999999999999999999999999999999999999999999999999999"
+
+# Local Snapshot Test files
+LS_TEST_STATE_ADDRESSES = [
+ "UAWQVYYQGEWMXJLQHWQKQZSFZFFZCUREWZFFWNNBNA9HDJHDPNSLWNMSPDWUPUQQLVXZGMWNB9ABXMPND",
+ "SENDREMAINDERTOHERE99999999999999999999999999999999999999999999999999999999999999"
+ ]
+
+
+LS_TEST_MILESTONE_HASHES = [
+ "SGANDWBKEVRMTMUPMTWTRLZJGAOKATGLEZFNJFAQIWJPVCVYVEWULOIBNIYKDXYRQCSGLUDGIFCVDRKO9",
+ "9LNZBOEFPVDSCKCZEYRUWRHTOYGSSKMUYWNOQVZAUXVDXYGKQ9WIIKZKCVVVTJ9JVQKDZKULTYZVDKBV9",
+ "YECIXIYMT9TMUXAEXOAEMDBKWMRVQXMLABVI9PLCJKEKWD9OOBPECXSNDZVRFIMDRDHFPHOC9KQB9VNA9",
+ "FVLJEBOTGXCJUDBUGMHYZEHPQGLM9P99ZTDDXLMRDJEEOQEPYBKDSIBZKQJKUXEKBHDRKXGCTYCAKUOU9",
+ "PQUJXJRUF9HSILXEE9DVLMVDDSNZSMS99ZWV9FDJXFLIYHOZWIRFJDGSGOQPJCTDHYPNYFQHQQ9LDPFW9"
+ ]
+
+LS_PRUNED_TRANSACTIONS = [
+ "FKGOUSFWFWSPQXPGEVBXQRYFRGFJWBXRYCXJDZTQWQBQKUJOGEY9JCIVYYSHGNFVIJBIXMZIVDUPBUIYA",
+ "EVQEKYWYYDSPHYDVPHATRUHBMKANTTIRA9FXXUYSVRBSEMESKWDBJBWBDARWMGBCFWGI9CFOEI9MXGMYX",
+ "HVGYYBVCBWKSMQFTFURLQWWRTNBKYXMXQ9QP9SHSNZJQQ9P9HL9IGNWZBTNMOOLV9GVOBPIBEUCXWNFAY",
+ "KESYDCSVJ9TJJIOIMHJDMD9BSPWHQVUIKEBSMYLBTUZZTKKFALDTMATCQWAKIQHNHHZLP9NJFZJMFNEMC",
+ "TJRXEMZZMORIHQSGOTRUFATRMMQF9NCGVVCKIDJFIMEZNXFGC9HZQQWZXTMIWILGYBBBFZURU99G9YRLY"
+]
diff --git a/python-regression/util/test_logic/api_test_logic.py b/python-regression/util/test_logic/api_test_logic.py
index f4ab69ebe6..16c8ae005d 100644
--- a/python-regression/util/test_logic/api_test_logic.py
+++ b/python-regression/util/test_logic/api_test_logic.py
@@ -1,15 +1,15 @@
from aloe import world
-from iota import Iota,Address,Tag,TryteString
-from copy import deepcopy
+from iota import Iota, Address, Tag, TryteString
+import json
+import urllib3
from . import value_fetch_logic as value_fetch
-
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
-def prepare_api_call(node_name, **seed):
+def prepare_api_call(node_name, **kwargs):
"""
Prepares an api target as an entry point for API calls on a specified node.
@@ -18,10 +18,8 @@ def prepare_api_call(node_name, **seed):
"""
logger.info('Preparing api call')
- host = world.machine['nodes'][node_name]['host']
- port = world.machine['nodes'][node_name]['ports']['api']
- address = "http://" + host + ":" + str(port)
- api = Iota(address, **seed)
+ address = fetch_node_api_address(node_name)
+ api = Iota(address, seed=kwargs.get('seed'))
logger.info('API call prepared for %s', address)
return api
@@ -133,7 +131,10 @@ def fetch_call(api_call, api, options):
'interruptAttachingToTangle': api.interrupt_attaching_to_tangle,
}
- response = call_list[api_call](**options)
+ try:
+ response = call_list[api_call](**options)
+ except ValueError:
+ response = None
return response
@@ -191,19 +192,36 @@ def prepare_transaction_arguments(arg_list):
arg_list[key] = TryteString.from_unicode(arg_list[key])
-
-def duplicate_arguments(arg_list):
+def fetch_node_api_address(node):
+ """
+ Fetches an api address from the machine configurations for the provided node.
+ :param node: The node that the address will be fetched for
+ :return: The api endpoint for the node
"""
- Duplicates the step arguments, providing a copy for storage and comparison.
+ host = world.machine['nodes'][node]['host']
+ port = world.machine['nodes'][node]['ports']['api']
+ address = "http://" + str(host) + ":" + str(port)
+ return address
- :param arg_list: The original step arguments you would like to copy.
- :return: Copy of the original argument list.
+
+def send_ixi_request(node, command):
"""
+ Sends an IXI command to the provided node.
- stored_values = deepcopy(arg_list)
- stored_value_list = {}
- for index, value in enumerate(stored_values):
- stored_value_list[index] = value
+ :param node: The node that the IXI request will be made on
+ :param command: The IXI command that will be placed in the request
+ :return: The response value from the node
+ """
+ address = fetch_node_api_address(node)
+ headers = {
+ 'content-type': 'application/json',
+ 'X-IOTA-API-Version': '1'
+ }
- return stored_value_list
+ command_string = json.dumps(command)
+ logger.info("Sending command")
+ http = urllib3.PoolManager()
+ request = http.request("POST", address, body=command_string, headers=headers)
+ logger.info("request sent")
+ return json.loads(request.data.decode('utf-8'))
diff --git a/python-regression/util/test_logic/value_fetch_logic.py b/python-regression/util/test_logic/value_fetch_logic.py
index 95aa4ee784..51854eb9cf 100644
--- a/python-regression/util/test_logic/value_fetch_logic.py
+++ b/python-regression/util/test_logic/value_fetch_logic.py
@@ -74,8 +74,8 @@ def fetch_node_address(value):
:return: The address of the referenced node in list format
"""
host = world.machine['nodes'][value]['host']
- port = world.machine['nodes'][value]['ports']['gossip-udp']
- address = "udp://" + host + ":" + str(port)
+ port = world.machine['nodes'][value]['ports']['gossip-tcp']
+ address = "tcp://" + host + ":" + str(port)
return [address.decode()]
diff --git a/src/main/java/com/iota/iri/IRI.java b/src/main/java/com/iota/iri/IRI.java
index 674898a132..cda83ab0d7 100644
--- a/src/main/java/com/iota/iri/IRI.java
+++ b/src/main/java/com/iota/iri/IRI.java
@@ -120,18 +120,18 @@ public static void main(String [] args) throws Exception {
iota = new Iota(config);
ixi = new IXI(iota);
- api = new API(iota.configuration, ixi, iota.transactionRequester,
+ api = new API(config, ixi, iota.transactionRequester,
iota.spentAddressesService, iota.tangle, iota.bundleValidator,
- iota.snapshotProvider, iota.ledgerService, iota.node, iota.tipsSelector,
+ iota.snapshotProvider, iota.ledgerService, iota.neighborRouter, iota.tipsSelector,
iota.tipsViewModel, iota.transactionValidator,
- iota.latestMilestoneTracker);
+ iota.latestMilestoneTracker, iota.txPipeline);
shutdownHook();
try {
iota.init();
//TODO redundant parameter but we will touch this when we refactor IXI
ixi.init(config.getIxiDir());
- api.init(new RestEasy(iota.configuration));
+ api.init(new RestEasy(config));
log.info("IOTA Node initialised correctly.");
} catch (Exception e) {
log.error("Exception during IOTA node initialisation: ", e);
diff --git a/src/main/java/com/iota/iri/Iota.java b/src/main/java/com/iota/iri/Iota.java
index d9a14be2c8..b739a56034 100644
--- a/src/main/java/com/iota/iri/Iota.java
+++ b/src/main/java/com/iota/iri/Iota.java
@@ -4,11 +4,12 @@
import com.iota.iri.conf.TipSelConfig;
import com.iota.iri.controllers.TipsViewModel;
import com.iota.iri.controllers.TransactionViewModel;
-import com.iota.iri.network.Node;
+import com.iota.iri.network.impl.TipsRequesterImpl;
import com.iota.iri.network.TransactionRequester;
-import com.iota.iri.network.UDPReceiver;
+import com.iota.iri.network.NeighborRouter;
import com.iota.iri.network.impl.TransactionRequesterWorkerImpl;
-import com.iota.iri.network.replicator.Replicator;
+import com.iota.iri.network.pipeline.TransactionProcessingPipeline;
+import com.iota.iri.network.pipeline.TransactionProcessingPipelineImpl;
import com.iota.iri.service.TipsSolidifier;
import com.iota.iri.service.ledger.impl.LedgerServiceImpl;
import com.iota.iri.service.milestone.impl.*;
@@ -37,21 +38,20 @@
/**
*
- * The main class of IRI. This will propagate transactions into and throughout the network.
- * This data is stored as a {@link Tangle}, a form of a Directed acyclic graph.
- * All incoming data will be stored in one or more implementations of {@link PersistenceProvider}.
+ * The main class of IRI. This will propagate transactions into and throughout the network. This data is stored as a
+ * {@link Tangle}, a form of a Directed acyclic graph. All incoming data will be stored in one or more implementations
+ * of {@link PersistenceProvider}.
*
*
- * During initialization, all the Providers can be set to rescan or revalidate their transactions.
- * After initialization, an asynchronous process has started which will process inbound and outbound transactions.
- * Each full node should be peered with 7-9 other full nodes (neighbors) to function optimally.
+ * During initialization, all the Providers can be set to rescan or revalidate their transactions. After initialization,
+ * an asynchronous process has started which will process inbound and outbound transactions. Each full node should be
+ * peered with 3-5 other full nodes (neighbors) to function optimally.
*
*
- * If this node has no Neighbors defined, no data is transferred.
- * However, if the node has Neighbors, but no Internet connection,
- * synchronization will continue after Internet connection is established.
- * Any transactions sent to this node in its local network will then be processed.
- * This makes IRI able to run partially offline if an already existing database exists on this node.
+ * If this node has no Neighbors defined, no data is transferred. However, if the node has Neighbors, but no Internet
+ * connection, synchronization will continue after Internet connection is established. Any transactions sent to this
+ * node in its local network will then be processed. This makes IRI able to run partially offline if an already existing
+ * database exists on this node.
*
*
* Validation of a transaction is the process by which other devices choose the transaction.
@@ -68,6 +68,7 @@
*
*/
public class Iota {
+
private static final Logger log = LoggerFactory.getLogger(Iota.class);
public final SpentAddressesProviderImpl spentAddressesProvider;
@@ -102,9 +103,9 @@ public class Iota {
public final TransactionValidator transactionValidator;
public final TipsSolidifier tipsSolidifier;
public final TransactionRequester transactionRequester;
- public final Node node;
- public final UDPReceiver udpReceiver;
- public final Replicator replicator;
+ public final TipsRequesterImpl tipRequester;
+ public final TransactionProcessingPipeline txPipeline;
+ public final NeighborRouter neighborRouter;
public final IotaConfig configuration;
public final TipsViewModel tipsViewModel;
public final TipSelector tipsSelector;
@@ -114,10 +115,11 @@ public class Iota {
*
* @param configuration Information about how this node will be configured.
* @throws TransactionPruningException If the TransactionPruner could not restore its state.
- * @throws SnapshotException If the Snapshot fails to initialize.
- * This can happen if the snapshot signature is invalid or the file cannot be read.
+ * @throws SnapshotException If the Snapshot fails to initialize. This can happen if the snapshot
+ * signature is invalid or the file cannot be read.
*/
- public Iota(IotaConfig configuration) throws TransactionPruningException, SnapshotException, SpentAddressesException {
+ public Iota(IotaConfig configuration)
+ throws TransactionPruningException, SnapshotException, SpentAddressesException {
this.configuration = configuration;
// new refactored instances
@@ -125,18 +127,19 @@ public Iota(IotaConfig configuration) throws TransactionPruningException, Snapsh
spentAddressesService = new SpentAddressesServiceImpl();
snapshotProvider = new SnapshotProviderImpl();
snapshotService = new SnapshotServiceImpl();
- localSnapshotManager = configuration.getLocalSnapshotsEnabled()
- ? new LocalSnapshotManagerImpl()
- : null;
+ localSnapshotManager = configuration.getLocalSnapshotsEnabled() ? new LocalSnapshotManagerImpl() : null;
milestoneService = new MilestoneServiceImpl();
latestMilestoneTracker = new LatestMilestoneTrackerImpl();
latestSolidMilestoneTracker = new LatestSolidMilestoneTrackerImpl();
seenMilestonesRetriever = new SeenMilestonesRetrieverImpl();
milestoneSolidifier = new MilestoneSolidifierImpl();
transactionPruner = configuration.getLocalSnapshotsEnabled() && configuration.getLocalSnapshotsPruningEnabled()
- ? new AsyncTransactionPruner()
- : null;
+ ? new AsyncTransactionPruner()
+ : null;
transactionRequesterWorker = new TransactionRequesterWorkerImpl();
+ neighborRouter = new NeighborRouter();
+ txPipeline = new TransactionProcessingPipelineImpl();
+ tipRequester = new TipsRequesterImpl();
// legacy code
bundleValidator = new BundleValidator();
@@ -144,10 +147,7 @@ public Iota(IotaConfig configuration) throws TransactionPruningException, Snapsh
tipsViewModel = new TipsViewModel();
transactionRequester = new TransactionRequester(tangle, snapshotProvider);
transactionValidator = new TransactionValidator(tangle, snapshotProvider, tipsViewModel, transactionRequester);
- node = new Node(tangle, snapshotProvider, transactionValidator, transactionRequester, tipsViewModel,
- latestMilestoneTracker, configuration);
- replicator = new Replicator(node, configuration);
- udpReceiver = new UDPReceiver(node, configuration);
+
tipsSolidifier = new TipsSolidifier(tangle, transactionValidator, tipsViewModel, configuration);
tipsSelector = createTipSelector(configuration);
@@ -161,14 +161,14 @@ public Iota(IotaConfig configuration) throws TransactionPruningException, Snapsh
*
* After this function, incoming and outbound transaction processing has started.
*
- * @throws Exception If along the way a service fails to initialize.
- * Most common cause is a file read or database error.
+ * @throws Exception If along the way a service fails to initialize. Most common cause is a file read or database
+ * error.
*/
public void init() throws Exception {
initializeTangle();
tangle.init();
- if (configuration.isRescanDb()){
+ if (configuration.isRescanDb()) {
rescanDb();
}
@@ -181,9 +181,10 @@ public void init() throws Exception {
transactionValidator.init(configuration.isTestnet(), configuration.getMwm());
tipsSolidifier.init();
transactionRequester.init(configuration.getpRemoveRequest());
- udpReceiver.init();
- replicator.init();
- node.init();
+
+ txPipeline.start();
+ neighborRouter.start();
+ tipRequester.start();
latestMilestoneTracker.start();
latestSolidMilestoneTracker.start();
@@ -200,8 +201,8 @@ public void init() throws Exception {
}
private void injectDependencies() throws SnapshotException, TransactionPruningException, SpentAddressesException {
- //snapshot provider must be initialized first
- //because we check whether spent addresses data exists
+ // snapshot provider must be initialized first
+ // because we check whether spent addresses data exists
snapshotProvider.init(configuration);
spentAddressesProvider.init(configuration);
spentAddressesService.init(tangle, snapshotProvider, spentAddressesProvider, bundleValidator, configuration);
@@ -209,6 +210,7 @@ private void injectDependencies() throws SnapshotException, TransactionPruningEx
if (localSnapshotManager != null) {
localSnapshotManager.init(snapshotProvider, snapshotService, transactionPruner, configuration);
}
+
milestoneService.init(tangle, snapshotProvider, snapshotService, bundleValidator, configuration);
latestMilestoneTracker.init(tangle, snapshotProvider, milestoneService, milestoneSolidifier,
configuration);
@@ -221,11 +223,15 @@ private void injectDependencies() throws SnapshotException, TransactionPruningEx
if (transactionPruner != null) {
transactionPruner.init(tangle, snapshotProvider, spentAddressesService, spentAddressesProvider, tipsViewModel, configuration);
}
- transactionRequesterWorker.init(tangle, transactionRequester, tipsViewModel, node);
+ transactionRequesterWorker.init(tangle, transactionRequester, tipsViewModel, neighborRouter);
+ neighborRouter.init(configuration, configuration, transactionRequester, txPipeline);
+ txPipeline.init(neighborRouter, configuration, transactionValidator, tangle, snapshotProvider, tipsViewModel,
+ latestMilestoneTracker);
+ tipRequester.init(neighborRouter, tangle, latestMilestoneTracker, transactionRequester);
}
private void rescanDb() throws Exception {
- //delete all transaction indexes
+ // delete all transaction indexes
tangle.clearColumn(com.iota.iri.model.persistables.Address.class);
tangle.clearColumn(com.iota.iri.model.persistables.Bundle.class);
tangle.clearColumn(com.iota.iri.model.persistables.Approvee.class);
@@ -235,7 +241,7 @@ private void rescanDb() throws Exception {
tangle.clearColumn(com.iota.iri.model.StateDiff.class);
tangle.clearMetadata(com.iota.iri.model.persistables.Transaction.class);
- //rescan all tx & refill the columns
+ // rescan all tx & refill the columns
TransactionViewModel tx = TransactionViewModel.first(tangle);
int counter = 0;
while (tx != null) {
@@ -250,8 +256,8 @@ private void rescanDb() throws Exception {
}
/**
- * Gracefully shuts down by calling shutdown() on all used services.
- * Exceptions during shutdown are not caught.
+ * Gracefully shuts down by calling shutdown() on all used services. Exceptions during shutdown are not
+ * caught.
*/
public void shutdown() throws Exception {
// shutdown in reverse starting order (to not break any dependencies)
@@ -269,9 +275,9 @@ public void shutdown() throws Exception {
}
tipsSolidifier.shutdown();
- node.shutdown();
- udpReceiver.shutdown();
- replicator.shutdown();
+ tipRequester.shutdown();
+ txPipeline.shutdown();
+ neighborRouter.shutdown();
transactionValidator.shutdown();
tangle.shutdown();
@@ -282,13 +288,9 @@ public void shutdown() throws Exception {
private void initializeTangle() {
switch (configuration.getMainDb()) {
case "rocksdb": {
- tangle.addPersistenceProvider(new RocksDBPersistenceProvider(
- configuration.getDbPath(),
- configuration.getDbLogPath(),
- configuration.getDbCacheSize(),
- Tangle.COLUMN_FAMILIES,
- Tangle.METADATA_COLUMN_FAMILY)
- );
+ tangle.addPersistenceProvider(
+ new RocksDBPersistenceProvider(configuration.getDbPath(), configuration.getDbLogPath(),
+ configuration.getDbCacheSize(), Tangle.COLUMN_FAMILIES, Tangle.METADATA_COLUMN_FAMILY));
break;
}
default: {
diff --git a/src/main/java/com/iota/iri/conf/BaseIotaConfig.java b/src/main/java/com/iota/iri/conf/BaseIotaConfig.java
index 104f4dcd2b..c382fb429c 100644
--- a/src/main/java/com/iota/iri/conf/BaseIotaConfig.java
+++ b/src/main/java/com/iota/iri/conf/BaseIotaConfig.java
@@ -45,14 +45,15 @@ public abstract class BaseIotaConfig implements IotaConfig {
//We don't have a REMOTE config but we have a remote flag. We must add a field for JCommander
private boolean remote;
-
//Network
- protected int udpReceiverPort = Defaults.UDP_RECEIVER_PORT;
- protected int tcpReceiverPort = Defaults.TCP_RECEIVER_PORT;
+ protected String neighboringSocketAddress = Defaults.NEIGHBORING_SOCKET_ADDRESS;
+ protected int neighboringSocketPort = Defaults.NEIGHBORING_SOCKET_PORT;
+ protected int reconnectAttemptIntervalSeconds = Defaults.RECONNECT_ATTEMPT_INTERVAL_SECONDS;
+ protected boolean autoTetheringEnabled = Defaults.AUTO_TETHERING_ENABLED;
protected double pRemoveRequest = Defaults.P_REMOVE_REQUEST;
protected double pDropCacheEntry = Defaults.P_DROP_CACHE_ENTRY;
protected int sendLimit = Defaults.SEND_LIMIT;
- protected int maxPeers = Defaults.MAX_PEERS;
+ protected int maxNeighbors = Defaults.MAX_NEIGHBORS;
protected boolean dnsRefresherEnabled = Defaults.DNS_REFRESHER_ENABLED;
protected boolean dnsResolutionEnabled = Defaults.DNS_RESOLUTION_ENABLED;
protected List neighbors = new ArrayList<>();
@@ -151,7 +152,7 @@ protected void setTestnet(boolean testnet) {
}
@JsonProperty
- @Parameter(names = {"--help", "-h"} , help = true, hidden = true)
+ @Parameter(names = {"--help", "-h"}, help = true, hidden = true)
public void setHelp(boolean help) {
this.help = help;
}
@@ -278,26 +279,48 @@ protected void setRemoteAuth(String remoteAuth) {
this.remoteAuth = remoteAuth;
}
+ @JsonProperty
+ @Parameter(names = {"--neighboring-socket-address"}, description = NetworkConfig.Descriptions.NEIGHBORING_SOCKET_ADDRESS)
+ public void setNeighboringSocketAddress(String neighboringSocketAddress) {
+ this.neighboringSocketAddress = neighboringSocketAddress;
+ }
+
+ @Override
+ public String getNeighboringSocketAddress() {
+ return neighboringSocketAddress;
+ }
+
+ @JsonProperty
+ @Parameter(names = {"--neighboring-socket-port", "-t"}, description = NetworkConfig.Descriptions.NEIGHBORING_SOCKET_PORT)
+ public void setNeighboringSocketPort(int neighboringSocketPort) {
+ this.neighboringSocketPort = neighboringSocketPort;
+ }
+
@Override
- public int getUdpReceiverPort() {
- return udpReceiverPort;
+ public int getNeighboringSocketPort() {
+ return neighboringSocketPort;
+ }
+
+ @Override
+ public int getReconnectAttemptIntervalSeconds() {
+ return reconnectAttemptIntervalSeconds;
}
@JsonProperty
- @Parameter(names = {"-u", "--udp-receiver-port"}, description = NetworkConfig.Descriptions.UDP_RECEIVER_PORT)
- public void setUdpReceiverPort(int udpReceiverPort) {
- this.udpReceiverPort = udpReceiverPort;
+ @Parameter(names = {"--reconnect-attempt-interval-seconds"}, description = NetworkConfig.Descriptions.RECONNECT_ATTEMPT_INTERVAL_SECONDS)
+ protected void setReconnectAttemptIntervalSeconds(int reconnectAttemptIntervalSeconds) {
+ this.reconnectAttemptIntervalSeconds = reconnectAttemptIntervalSeconds;
}
@Override
- public int getTcpReceiverPort() {
- return tcpReceiverPort;
+ public boolean isAutoTetheringEnabled() {
+ return autoTetheringEnabled;
}
@JsonProperty
- @Parameter(names = {"-t", "--tcp-receiver-port"}, description = NetworkConfig.Descriptions.TCP_RECEIVER_PORT)
- protected void setTcpReceiverPort(int tcpReceiverPort) {
- this.tcpReceiverPort = tcpReceiverPort;
+ @Parameter(names = {"--auto-tethering"}, description = NetworkConfig.Descriptions.AUTO_TETHERING_ENABLED, arity = 1)
+ protected void setAutoTetheringEnabled(boolean autoTetheringEnabled) {
+ this.autoTetheringEnabled = autoTetheringEnabled;
}
@Override
@@ -323,14 +346,14 @@ protected void setSendLimit(int sendLimit) {
}
@Override
- public int getMaxPeers() {
- return maxPeers;
+ public int getMaxNeighbors() {
+ return maxNeighbors;
}
@JsonProperty
- @Parameter(names = {"--max-peers"}, description = NetworkConfig.Descriptions.MAX_PEERS)
- protected void setMaxPeers(int maxPeers) {
- this.maxPeers = maxPeers;
+ @Parameter(names = {"--max-neighbors"}, description = NetworkConfig.Descriptions.MAX_NEIGHBORS)
+ protected void setMaxNeighbors(int maxNeighbors) {
+ this.maxNeighbors = maxNeighbors;
}
@Override
@@ -566,7 +589,7 @@ public int getLocalSnapshotsIntervalSynced() {
protected void setLocalSnapshotsIntervalSynced(int localSnapshotsIntervalSynced) {
if (localSnapshotsIntervalSynced < 1) {
throw new ParameterException("LOCAL_SNAPSHOTS_INTERVAL_SYNCED should be at least 1 (found " +
- localSnapshotsIntervalSynced +")");
+ localSnapshotsIntervalSynced + ")");
}
this.localSnapshotsIntervalSynced = localSnapshotsIntervalSynced;
@@ -583,7 +606,7 @@ public int getLocalSnapshotsIntervalUnsynced() {
protected void setLocalSnapshotsIntervalUnsynced(int localSnapshotsIntervalUnsynced) {
if (localSnapshotsIntervalUnsynced < 1) {
throw new ParameterException("LOCAL_SNAPSHOTS_INTERVAL_UNSYNCED should be at least 1 (found " +
- localSnapshotsIntervalUnsynced +")");
+ localSnapshotsIntervalUnsynced + ")");
}
this.localSnapshotsIntervalUnsynced = localSnapshotsIntervalUnsynced;
@@ -600,7 +623,7 @@ protected void setLocalSnapshotsDepth(int localSnapshotsDepth) {
if (localSnapshotsDepth < Defaults.LOCAL_SNAPSHOTS_DEPTH_MIN) {
throw new ParameterException("LOCAL_SNAPSHOTS_DEPTH should be at least "
+ Defaults.LOCAL_SNAPSHOTS_DEPTH_MIN
- + "(found " + localSnapshotsDepth +")");
+ + "(found " + localSnapshotsDepth + ")");
}
this.localSnapshotsDepth = localSnapshotsDepth;
@@ -892,11 +915,13 @@ public interface Defaults {
String REMOTE_AUTH = "";
//Network
- int UDP_RECEIVER_PORT = 14600;
- int TCP_RECEIVER_PORT = 15600;
+ String NEIGHBORING_SOCKET_ADDRESS = "0.0.0.0";
+ int NEIGHBORING_SOCKET_PORT = 15600;
+ int RECONNECT_ATTEMPT_INTERVAL_SECONDS = 60;
+ boolean AUTO_TETHERING_ENABLED = false;
double P_REMOVE_REQUEST = 0.01d;
int SEND_LIMIT = -1;
- int MAX_PEERS = 0;
+ int MAX_NEIGHBORS = 5;
boolean DNS_REFRESHER_ENABLED = true;
boolean DNS_RESOLUTION_ENABLED = true;
@@ -925,7 +950,6 @@ public interface Defaults {
int CACHE_SIZE_BYTES = 150_000;
-
//Zmq
int ZMQ_THREADS = 1;
boolean ZMQ_ENABLE_IPC = false;
diff --git a/src/main/java/com/iota/iri/conf/NetworkConfig.java b/src/main/java/com/iota/iri/conf/NetworkConfig.java
index 4d7271104d..8b113d0aef 100644
--- a/src/main/java/com/iota/iri/conf/NetworkConfig.java
+++ b/src/main/java/com/iota/iri/conf/NetworkConfig.java
@@ -9,14 +9,25 @@
public interface NetworkConfig extends Config {
/**
- * @return Descriptions#UDP_RECEIVER_PORT
+ * @return Descriptions#NEIGHBORING_SOCKET_ADDRESS
*/
- int getUdpReceiverPort();
+ String getNeighboringSocketAddress();
/**
- * @return Descriptions#TCP_RECEIVER_PORT
+ * @return Descriptions#NEIGHBORING_SOCKET_PORT
*/
- int getTcpReceiverPort();
+ int getNeighboringSocketPort();
+
+ /**
+ *
+ * @return Descriptions#RECONNECT_ATTEMPT_INTERVAL_SECONDS
+ */
+ int getReconnectAttemptIntervalSeconds();
+
+ /**
+ * @return Descriptions#AUTO_TETHERING_ENABLED
+ */
+ boolean isAutoTetheringEnabled();
/**
* @return Descriptions#P_REMOVE_REQUEST
@@ -29,9 +40,9 @@ public interface NetworkConfig extends Config {
int getSendLimit();
/**
- * @return Descriptions#MAX_PEERS
+ * @return Descriptions#MAX_NEIGHBORS
*/
- int getMaxPeers();
+ int getMaxNeighbors();
/**
* @return Descriptions#DNS_REFRESHER_ENABLED
@@ -64,15 +75,18 @@ public interface NetworkConfig extends Config {
int getCacheSizeBytes();
interface Descriptions {
- String UDP_RECEIVER_PORT = "The UDP Receiver Port.";
- String TCP_RECEIVER_PORT = "The TCP Receiver Port.";
+ String NEIGHBORING_SOCKET_ADDRESS = "The address to bind the TCP server socket to.";
+ String NEIGHBORING_SOCKET_PORT = "The TCP Receiver Port.";
+ String RECONNECT_ATTEMPT_INTERVAL_SECONDS = "The interval at which to reconnect to wanted neighbors.";
+ String AUTO_TETHERING_ENABLED = "Whether to accept new connections from unknown neighbors. "
+ + "Unknown meaning neighbors which are not defined in the config and were not added via addNeighbors.";
String P_REMOVE_REQUEST = DescriptionHelper.PROB_OF + " stopping to request a transaction. This number should be " +
- "closer to 0 so non-existing transaction hashes will eventually be removed.";
+ "closer to 0 so non-existing transaction hashes will eventually be removed.";
String SEND_LIMIT = "The maximum number of packets that may be sent by this node in a 1 second interval. If this number is below 0 then there is no limit.";
- String MAX_PEERS = "The maximum number of non mutually tethered connections allowed. Works only in testnet mode";
+ String MAX_NEIGHBORS = "The maximum number of neighbors allowed to be connected.";
String DNS_REFRESHER_ENABLED = "Reconnect to neighbors that have dynamic IPs.";
String DNS_RESOLUTION_ENABLED = "Enable using DNS for neighbor peering.";
- String NEIGHBORS = "Urls of peer iota nodes.";
+ String NEIGHBORS = "Urls of neighbor iota nodes.";
String Q_SIZE_NODE = "The size of the REPLY, BROADCAST, and RECEIVE network queues.";
String P_DROP_CACHE_ENTRY = DescriptionHelper.PROB_OF + "dropping recently seen transactions out of the network cache.";
String CACHE_SIZE_BYTES = "The size of the network cache in bytes";
diff --git a/src/main/java/com/iota/iri/conf/ProtocolConfig.java b/src/main/java/com/iota/iri/conf/ProtocolConfig.java
index 20c4526af6..036e200846 100644
--- a/src/main/java/com/iota/iri/conf/ProtocolConfig.java
+++ b/src/main/java/com/iota/iri/conf/ProtocolConfig.java
@@ -1,5 +1,7 @@
package com.iota.iri.conf;
+import com.iota.iri.model.Hash;
+
/**
* Configuration for protocol rules. Controls what transactions will be accepted by the network, and how they will
* be propagated to other nodes.
@@ -11,6 +13,11 @@ public interface ProtocolConfig extends Config {
*/
int getMwm();
+ /**
+ * @return Descriptions#COORDINATOR
+ */
+ Hash getCoordinator();
+
/**
* @return Descriptions#TRANSACTION_PACKET_SIZE
*/
@@ -39,6 +46,7 @@ public interface ProtocolConfig extends Config {
interface Descriptions {
String MWM = "The minimum weight magnitude is the number of trailing 0s that must appear in the end of a transaction hash. Increasing this number by 1 will result in proof of work that is 3 times as hard.";
+ String COORDINATOR = "The address of the coordinator";
String TRANSACTION_PACKET_SIZE = "The size of the packet in bytes received by a node. In the mainnet the packet size should always be 1650. It consists of 1604 bytes of a received transaction and 46 bytes of a requested transaction hash. This value can be changed in order to create testnets with different rules.";
String REQUEST_HASH_SIZE = "The size of the requested hash in a packet. Its size is derived from the minimal MWM value the network accepts. The larger the MWM -> the more trailing zeroes we can ignore -> smaller hash size.";
String P_DROP_TRANSACTION = DescriptionHelper.PROB_OF + "dropping a received transaction. This is used only for testing purposes.";
diff --git a/src/main/java/com/iota/iri/controllers/TransactionViewModel.java b/src/main/java/com/iota/iri/controllers/TransactionViewModel.java
index fb620f1da9..50668790a9 100644
--- a/src/main/java/com/iota/iri/controllers/TransactionViewModel.java
+++ b/src/main/java/com/iota/iri/controllers/TransactionViewModel.java
@@ -893,3 +893,4 @@ public String toString() {
return "transaction " + hash.toString();
}
}
+
diff --git a/src/main/java/com/iota/iri/crypto/batched/BCTCurl.java b/src/main/java/com/iota/iri/crypto/batched/BCTCurl.java
new file mode 100644
index 0000000000..93a4e9d305
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BCTCurl.java
@@ -0,0 +1,121 @@
+package com.iota.iri.crypto.batched;
+
+/**
+ * A Curl implementation which absorbs binary-encoded-ternary inputs.
+ */
+public class BCTCurl {
+
+ private static final long HIGH_LONG_BITS = 0xFFFF_FFFF_FFFF_FFFFL;
+
+ private int hashLength;
+ private int numberOfRounds;
+ private int stateLength;
+ private BCTrinary state;
+
+ /**
+ * Creates a new {@link BCTCurl} with the given hash length and number of rounds.
+ * @param hashLength the desired hash length
+ * @param numberOfRounds the number of hashing rounds to apply
+ */
+ public BCTCurl(int hashLength, int numberOfRounds) {
+ this.hashLength = hashLength;
+ this.numberOfRounds = numberOfRounds;
+ this.stateLength = 3 * hashLength;
+ this.state = new BCTrinary(new long[3 * hashLength], new long[3 * hashLength]);
+ reset();
+ }
+
+ /**
+ * Resets the state of the hashing function.
+ */
+ public void reset() {
+ for (int i = 0; i < stateLength; i++) {
+ state.low[i] = HIGH_LONG_BITS;
+ state.high[i] = HIGH_LONG_BITS;
+ }
+ }
+
+
+ /**
+ * Transforms the state of the hashing function.
+ */
+ public void transform() {
+ long[] scratchPadLow = new long[stateLength];
+ long[] scratchPadHigh = new long[stateLength];
+ int scratchPadIndex = 0;
+
+ for (int round = numberOfRounds; round > 0; round--) {
+ System.arraycopy(state.low, 0, scratchPadLow, 0, state.low.length);
+ System.arraycopy(state.high, 0, scratchPadHigh, 0, state.high.length);
+ for (int stateIndex = 0; stateIndex < stateLength; stateIndex++) {
+ long alpha = scratchPadLow[scratchPadIndex];
+ long beta = scratchPadHigh[scratchPadIndex];
+
+ if (scratchPadIndex < 365) {
+ scratchPadIndex += 364;
+ } else {
+ scratchPadIndex -= 365;
+ }
+
+ long delta = beta ^ scratchPadLow[scratchPadIndex];
+ state.low[stateIndex] = ~(delta & alpha);
+ state.high[stateIndex] = (alpha ^ scratchPadHigh[scratchPadIndex]) | delta;
+ }
+ }
+ }
+
+ /**
+ * Absorbs the given binary-encoded-ternary trits data.
+ * @param bcTrits the binary-encoded-ternary trits to absorb
+ */
+ public void absorb(BCTrinary bcTrits) {
+ int length = bcTrits.low.length;
+ int offset = 0;
+
+ for (; ; ) {
+ int lengthToCopy;
+ if (length < hashLength) {
+ lengthToCopy = length;
+ } else {
+ lengthToCopy = hashLength;
+ }
+
+ System.arraycopy(bcTrits.low, offset, state.low, 0, lengthToCopy);
+ System.arraycopy(bcTrits.high, offset, state.high, 0, lengthToCopy);
+ transform();
+ offset += lengthToCopy;
+ length -= lengthToCopy;
+
+ if (length <= 0) {
+ break;
+ }
+ }
+ }
+
+ /**
+ * Squeezes the desired length from the state of the hashing function.
+ * @param tritCount the desired length of the result
+ * @return the binary-encoded-trinary data
+ */
+ public BCTrinary squeeze(int tritCount) {
+ BCTrinary result = new BCTrinary(new long[tritCount], new long[tritCount]);
+
+ int hashCount = tritCount / hashLength;
+
+ for (int i = 0; i < hashCount; i++) {
+ System.arraycopy(state.low, 0, result.low, i * hashLength, hashLength);
+ System.arraycopy(state.high, 0, result.high, i * hashLength, hashLength);
+ transform();
+ }
+
+ int last = tritCount - hashCount * hashLength;
+
+ System.arraycopy(state.low, 0, result.low, tritCount - last, last);
+ System.arraycopy(state.high, 0, result.high, tritCount - last, last);
+ if (tritCount % hashLength != 0) {
+ transform();
+ }
+ return result;
+ }
+
+}
diff --git a/src/main/java/com/iota/iri/crypto/batched/BCTernaryDemultiplexer.java b/src/main/java/com/iota/iri/crypto/batched/BCTernaryDemultiplexer.java
new file mode 100644
index 0000000000..5ed2c67943
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BCTernaryDemultiplexer.java
@@ -0,0 +1,47 @@
+package com.iota.iri.crypto.batched;
+
+/**
+ * Demultiplexes long values into byte arrays.
+ */
+public class BCTernaryDemultiplexer {
+
+ private BCTrinary bcTrinary;
+
+ /**
+ * Creates a new {@link BCTernaryDemultiplexer} with the given
+ * binary-encoded-ternary data to demultiplex.
+ * @param bcTrinary the binary-encoded-trinary objet to demultiplex
+ */
+ public BCTernaryDemultiplexer(BCTrinary bcTrinary) {
+ this.bcTrinary = bcTrinary;
+ }
+
+ /**
+ * Constructs the demultiplexed version of a given column index.
+ * @param index the column index to demultiplex
+ * @return the byte array at the given column index
+ */
+ public byte[] get(int index) {
+ int length = bcTrinary.low.length;
+ byte[] result = new byte[length];
+
+ for (int i = 0; i < length; i++) {
+ long low = (bcTrinary.low[i] >> index) & 1;
+ long high = (bcTrinary.high[i] >> index) & 1;
+
+ if (low == 1 && high == 0) {
+ result[i] = -1;
+ continue;
+ }
+
+ if (low == 0 && high == 1) {
+ result[i] = 1;
+ continue;
+ }
+
+ result[i] = 0;
+ }
+ return result;
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/com/iota/iri/crypto/batched/BCTernaryMultiplexer.java b/src/main/java/com/iota/iri/crypto/batched/BCTernaryMultiplexer.java
new file mode 100644
index 0000000000..51415cfeae
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BCTernaryMultiplexer.java
@@ -0,0 +1,58 @@
+package com.iota.iri.crypto.batched;
+
+import java.util.List;
+
+/**
+ * Multiplexes input trits data to a {@link BCTrinary}.
+ */
+public class BCTernaryMultiplexer {
+
+ private List inputs;
+
+ /**
+ * Creates a new {@link BCTernaryMultiplexer} which multiplexes
+ * the given trits data.
+ * @param inputs the input trits data to multiplex
+ */
+ public BCTernaryMultiplexer(List inputs) {
+ this.inputs = inputs;
+ }
+
+ /**
+ * Multiplexes the input data into a binary-encoded ternary format.
+ *
+ * @return the extracted data in binary-encoded-ternary format
+ */
+ public BCTrinary extract() {
+ final int trinariesCount = inputs.size();
+ final int tritsCount = inputs.get(0).length;
+
+ BCTrinary result = new BCTrinary(new long[tritsCount], new long[tritsCount]);
+ for (int i = 0; i < tritsCount; i++) {
+ BCTrit bcTrit = new BCTrit();
+
+ for (int j = 0; j < trinariesCount; j++) {
+ switch (inputs.get(j)[i]) {
+ case -1:
+ bcTrit.low |= 1L << j;
+ break;
+ case 1:
+ bcTrit.high |= 1L << j;
+ break;
+ case 0:
+ bcTrit.low |= 1L << j;
+ bcTrit.high |= 1L << j;
+ break;
+ default:
+ // do nothing
+ }
+ }
+
+ result.low[i] = bcTrit.low;
+ result.high[i] = bcTrit.high;
+ }
+
+ return result;
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/com/iota/iri/crypto/batched/BCTrinary.java b/src/main/java/com/iota/iri/crypto/batched/BCTrinary.java
new file mode 100644
index 0000000000..70ecd3bf20
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BCTrinary.java
@@ -0,0 +1,21 @@
+package com.iota.iri.crypto.batched;
+
+/**
+ * Represents multiplexed binary-encoded-ternary values.
+ */
+public class BCTrinary {
+
+ public long[] low;
+ public long[] high;
+
+ /**
+ * Creates a new {@link BCTrinary} with the given low/high bit long values.
+ * @param low the low bit values
+ * @param high the high bit values
+ */
+ public BCTrinary(long[] low, long[] high) {
+ this.low = low;
+ this.high = high;
+ }
+
+}
diff --git a/src/main/java/com/iota/iri/crypto/batched/BCTrit.java b/src/main/java/com/iota/iri/crypto/batched/BCTrit.java
new file mode 100644
index 0000000000..346f7600cf
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BCTrit.java
@@ -0,0 +1,13 @@
+package com.iota.iri.crypto.batched;
+
+/**
+ * Represents a single row of multiplexed binary-encoded-ternary values.
+ * Following formula applies: trit value -1 => high 0, low 1,
+ * trit value 0 => high 1, low 1, trit value 1 => high 1, low 0
+ */
+public class BCTrit {
+
+ public long low;
+ public long high;
+
+}
diff --git a/src/main/java/com/iota/iri/crypto/batched/BatchedBCTCurl.java b/src/main/java/com/iota/iri/crypto/batched/BatchedBCTCurl.java
new file mode 100644
index 0000000000..7f91283215
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BatchedBCTCurl.java
@@ -0,0 +1,143 @@
+package com.iota.iri.crypto.batched;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+/**
+ * BatchedBCTCurl takes care of batching up hashing requests and starts processing them through a BCTCurl once either
+ * all available slots are filled or no request is submitted within a given timeout.
+ */
+public class BatchedBCTCurl implements BatchedHasher {
+
+ private static final Logger log = LoggerFactory.getLogger(BatchedBCTCurl.class);
+
+ // we have max 64 bits/slots available for requests to fill up
+ private final static int MAX_BATCH_SIZE = 64;
+
+ private ArrayBlockingQueue reqQueue;
+ private int hashLength;
+ private int numberOfRounds;
+ private int batchTimeoutMilliSec;
+
+ /**
+ * Creates a new {@link BatchedBCTCurl} with the given hash length, number of rounds and default batch timeout.
+ *
+ * @param hashLength the desired hash length
+ * @param numberOfRounds the number of hashing rounds to apply
+ */
+ public BatchedBCTCurl(int hashLength, int numberOfRounds) {
+ this.hashLength = hashLength;
+ this.numberOfRounds = numberOfRounds;
+ this.batchTimeoutMilliSec = BatchedHasher.DEFAULT_BATCH_TIMEOUT_MILLISECONDS;
+ this.reqQueue = new ArrayBlockingQueue<>(MAX_BATCH_SIZE * 2);
+ }
+
+ /**
+ * Creates a new {@link BatchedBCTCurl} with the given hash length, number of rounds and batch timeout.
+ *
+ * @param hashLength the desired hash length
+ * @param numberOfRounds the number of hashing rounds to apply
+ * @param timeoutMilliseconds the timeout to wait for new incoming hashing requests before starting the process
+ */
+ public BatchedBCTCurl(int hashLength, int numberOfRounds, int timeoutMilliseconds) {
+ this.hashLength = hashLength;
+ this.numberOfRounds = numberOfRounds;
+ this.batchTimeoutMilliSec = timeoutMilliseconds;
+ this.reqQueue = new ArrayBlockingQueue<>(MAX_BATCH_SIZE * 2);
+ }
+
+ @Override
+ public void submitHashingRequest(HashRequest req) {
+ try {
+ reqQueue.put(req);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void run() {
+ List reqs = new ArrayList<>();
+ long last = System.currentTimeMillis();
+ long processed = 0, cycles = 0, cyclesTimeSum = 0;
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ long start = System.currentTimeMillis();
+ // await the first request
+ reqs.add(reqQueue.take());
+
+ // batch up requests until we hit the timeout once
+ while (true) {
+ HashRequest newReq = reqQueue.poll(batchTimeoutMilliSec, TimeUnit.MILLISECONDS);
+
+ // didn't get any request within the timeout, lets thereby
+ // start processing batched up requests.
+ if (newReq == null) {
+ break;
+ }
+ reqs.add(newReq);
+ if (reqs.size() == MAX_BATCH_SIZE) {
+ break;
+ }
+ }
+ processed += reqs.size();
+ process(reqs);
+ reqs.clear();
+
+ // remember some stats
+ long now = System.currentTimeMillis();
+ cycles++;
+ cyclesTimeSum += now - start;
+
+ // print some stats every now and then
+ if (now - last >= 20000L) {
+ long maxReqsPossibleToBeProcessed = cycles * MAX_BATCH_SIZE;
+ double ratio = Math.floor(((double) processed / (double) maxReqsPossibleToBeProcessed) * 100);
+ double avgCycleTime = cyclesTimeSum / cycles;
+ log.info(
+ "batching saturation ratio {}% (processed {} / max possible {}), cycles {}, avg. cycle time {}ms",
+ ratio, processed, maxReqsPossibleToBeProcessed, cycles, avgCycleTime);
+ last = now;
+ processed = 0;
+ cycles = 0;
+ cyclesTimeSum = 0;
+ }
+ } catch (InterruptedException e) {
+ log.info("shutdown signal received");
+ Thread.currentThread().interrupt();
+ }
+ }
+ log.info("BatchedBCTCurl shutdown");
+ }
+
+ /**
+ * Processes the list of the given requests and executes the callbacks provided with each request after completion.
+ *
+ * @param reqs The requests to process.
+ */
+ private void process(List reqs) {
+ // multiplex input data
+ ArrayList inputs = reqs.stream().map(HashRequest::getInput)
+ .collect(Collectors.toCollection(ArrayList::new));
+ BCTernaryMultiplexer multiplexer = new BCTernaryMultiplexer(inputs);
+ BCTrinary multiplexedData = multiplexer.extract();
+
+ // hash
+ BCTCurl bctCurl = new BCTCurl(hashLength, numberOfRounds);
+ bctCurl.reset();
+ bctCurl.absorb(multiplexedData);
+
+ // demultiplex and fire callbacks
+ BCTrinary result = bctCurl.squeeze(hashLength);
+ BCTernaryDemultiplexer demultiplexer = new BCTernaryDemultiplexer(result);
+ for (int i = 0; i < reqs.size(); i++) {
+ reqs.get(i).getCallback().process(demultiplexer.get(i));
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/com/iota/iri/crypto/batched/BatchedHasher.java b/src/main/java/com/iota/iri/crypto/batched/BatchedHasher.java
new file mode 100644
index 0000000000..670473b117
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BatchedHasher.java
@@ -0,0 +1,23 @@
+package com.iota.iri.crypto.batched;
+
+/**
+ * A BatchedHasher is a hasher which collects inputs in order
+ * to perform optimized hashing by hashing multiple inputs at once.
+ */
+public interface BatchedHasher extends Runnable {
+
+ /**
+ * Default max timeout in milliseconds {@link BatchedHasher}s
+ * await for a new incoming request before starting the batched hashing process.
+ */
+ int DEFAULT_BATCH_TIMEOUT_MILLISECONDS = 50;
+
+ /**
+ * Submits the given request to the {@link BatchedHasher} for processing.
+ * The request's callback is executed within the thread of the BatchedHasher
+ * up on completion of the processing.
+ *
+ * @param req The hashing request.
+ */
+ void submitHashingRequest(HashRequest req);
+}
diff --git a/src/main/java/com/iota/iri/crypto/batched/BatchedHasherFactory.java b/src/main/java/com/iota/iri/crypto/batched/BatchedHasherFactory.java
new file mode 100644
index 0000000000..bb36d214a1
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/BatchedHasherFactory.java
@@ -0,0 +1,53 @@
+package com.iota.iri.crypto.batched;
+
+import com.iota.iri.crypto.Curl;
+import com.iota.iri.crypto.SpongeFactory;
+
+/**
+ * Creates {@link BatchedHasher} objects based on the required type.
+ */
+public class BatchedHasherFactory {
+
+ /**
+ * The specific implementations of a {@link BatchedHasher}.
+ */
+ public enum Type {
+ BCTCURL81,
+ BCTCURL27,
+ FakeBatchedCURL81,
+ FakeBatchedCURL27,
+ }
+
+ /**
+ * Creates a new {@link BatchedHasher} instances with a default
+ * batch timeout of {@link BatchedHasher#DEFAULT_BATCH_TIMEOUT_MILLISECONDS}.
+ *
+ * @param type the specific implementation of the {@link BatchedHasher}
+ * @return the BatchedHasher instance
+ */
+ public static BatchedHasher create(Type type) {
+ return create(type, BatchedHasher.DEFAULT_BATCH_TIMEOUT_MILLISECONDS);
+ }
+
+ /**
+ * Creates a new {@link BatchedHasher} instance.
+ *
+ * @param type the specific implementation of the {@link BatchedHasher}
+ * @return the BatchedHasher instance
+ */
+ public static BatchedHasher create(Type type, int batchTimeoutMilliSecs) {
+ switch (type) {
+ case BCTCURL81:
+ return new BatchedBCTCurl(Curl.HASH_LENGTH, 81, batchTimeoutMilliSecs);
+ case BCTCURL27:
+ return new BatchedBCTCurl(Curl.HASH_LENGTH, 27, batchTimeoutMilliSecs);
+ case FakeBatchedCURL81:
+ return new FakeBatchedCurl(Curl.HASH_LENGTH, SpongeFactory.Mode.CURLP81);
+ case FakeBatchedCURL27:
+ return new FakeBatchedCurl(Curl.HASH_LENGTH, SpongeFactory.Mode.CURLP27);
+ default:
+ return null;
+ }
+ }
+
+}
diff --git a/src/main/java/com/iota/iri/crypto/batched/FakeBatchedCurl.java b/src/main/java/com/iota/iri/crypto/batched/FakeBatchedCurl.java
new file mode 100644
index 0000000000..2da806d4fe
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/FakeBatchedCurl.java
@@ -0,0 +1,41 @@
+package com.iota.iri.crypto.batched;
+
+import com.iota.iri.crypto.Sponge;
+import com.iota.iri.crypto.SpongeFactory;
+
+/**
+ * FakeBatchedCurl implements the {@link BatchedHasher} interface
+ * but doesn't actually do any batching. The callbacks are called
+ * within the thread which submits the hashing requests.
+ */
+public class FakeBatchedCurl implements BatchedHasher {
+
+ private int hashLength;
+ private Sponge spongeFunc;
+
+ /**
+ * Creates a new {@link FakeBatchedCurl} with the given
+ * hash length and mode.
+ *
+ * @param hashLength the desired hash length
+ * @param mode the mode of the sponge function to use
+ */
+ public FakeBatchedCurl(int hashLength, SpongeFactory.Mode mode) {
+ this.hashLength = hashLength;
+ this.spongeFunc = SpongeFactory.create(mode);
+ }
+
+ @Override
+ public void submitHashingRequest(HashRequest req) {
+ spongeFunc.absorb(req.getInput(), 0, req.getInput().length);
+ byte[] hashTrits = new byte[hashLength];
+ spongeFunc.squeeze(hashTrits, 0, hashLength);
+ req.getCallback().process(hashTrits);
+ spongeFunc.reset();
+ }
+
+ @Override
+ public void run() {
+ // do nothing
+ }
+}
diff --git a/src/main/java/com/iota/iri/crypto/batched/HashRequest.java b/src/main/java/com/iota/iri/crypto/batched/HashRequest.java
new file mode 100644
index 0000000000..2e6014e5b4
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/HashRequest.java
@@ -0,0 +1,37 @@
+package com.iota.iri.crypto.batched;
+
+/**
+ * A HashRequest represents a request against a {@link BatchedHasher} to hash
+ * something and execute the given callback up on completion of the hashing.
+ */
+public class HashRequest {
+
+ private byte[] input;
+ private HashRequestCallback callback;
+
+ /**
+ * Creates a new {@link HashRequest} with the given input and callback.
+ * @param input the trits input to hash
+ * @param callback the callback to fire up on completion
+ */
+ public HashRequest(byte[] input, HashRequestCallback callback) {
+ this.input = input;
+ this.callback = callback;
+ }
+
+ /**
+ * Gets the input of this {@link HashRequest}.
+ * @return the input
+ */
+ public byte[] getInput() {
+ return input;
+ }
+
+ /**
+ * Gets the callback of this {@link HashRequest}.
+ * @return the callback
+ */
+ public HashRequestCallback getCallback() {
+ return callback;
+ }
+}
diff --git a/src/main/java/com/iota/iri/crypto/batched/HashRequestCallback.java b/src/main/java/com/iota/iri/crypto/batched/HashRequestCallback.java
new file mode 100644
index 0000000000..e1a0d3235b
--- /dev/null
+++ b/src/main/java/com/iota/iri/crypto/batched/HashRequestCallback.java
@@ -0,0 +1,12 @@
+package com.iota.iri.crypto.batched;
+
+/**
+ * A callback executed with the result of a {@link HashRequest}.
+ */
+public interface HashRequestCallback {
+ /**
+ * The callback which handles the result hash trits.
+ * @param trits the result hash trits
+ */
+ void process(byte[] trits);
+}
diff --git a/src/main/java/com/iota/iri/model/persistables/Transaction.java b/src/main/java/com/iota/iri/model/persistables/Transaction.java
index 98a7d60a54..f803a4b476 100644
--- a/src/main/java/com/iota/iri/model/persistables/Transaction.java
+++ b/src/main/java/com/iota/iri/model/persistables/Transaction.java
@@ -40,6 +40,10 @@ public class Transaction implements Persistable {
public int validity = 0;
public int type = TransactionViewModel.PREFILLED_SLOT;
+
+ /**
+ * The time when the transaction arrived. In milliseconds.
+ */
public long arrivalTime = 0;
//public boolean confirmed = false;
diff --git a/src/main/java/com/iota/iri/network/FIFOCache.java b/src/main/java/com/iota/iri/network/FIFOCache.java
new file mode 100644
index 0000000000..4ba19e3281
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/FIFOCache.java
@@ -0,0 +1,104 @@
+package com.iota.iri.network;
+
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * The {@link FIFOCache} is a simple FIFO cache which removes entries at the front of the queue when the capacity is
+ * reached.
+ *
+ * @param the key type
+ * @param the value type
+ */
+public class FIFOCache {
+
+ private ReadWriteLock cacheLock = new ReentrantReadWriteLock(true);
+ private final int capacity;
+ private Map map = new LinkedHashMap<>();
+ private AtomicLong cacheHits = new AtomicLong();
+ private AtomicLong cacheMisses = new AtomicLong();
+
+ /**
+ * Creates a new {@link FIFOCache}.
+ *
+ * @param capacity the maximum capacity of the cache
+ */
+ public FIFOCache(int capacity) {
+ this.capacity = capacity;
+ }
+
+ /**
+ * Gets the entry by the given key.
+ *
+ * @param key the key to use to retrieve the entry
+ * @return the entry
+ */
+ public V get(K key) {
+ try {
+ cacheLock.readLock().lock();
+ V v = this.map.get(key);
+ if (v == null) {
+ cacheMisses.incrementAndGet();
+ } else {
+ cacheHits.incrementAndGet();
+ }
+ return v;
+ } finally {
+ cacheLock.readLock().unlock();
+ }
+ }
+
+ /**
+ * Adds the given entry by the given key.
+ *
+ * @param key the key to use for the entry
+ * @param value the value of the entry
+ * @return the added entry
+ */
+ public V put(K key, V value) {
+ try {
+ cacheLock.writeLock().lock();
+ if (this.map.containsKey(key)) {
+ return value;
+ }
+ if (this.map.size() >= this.capacity) {
+ Iterator it = this.map.keySet().iterator();
+ it.next();
+ it.remove();
+ }
+ return this.map.put(key, value);
+ } finally {
+ cacheLock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Gets the amount of cache hits.
+ *
+ * @return amount of cache hits
+ */
+ public long getCacheHits() {
+ return cacheHits.get();
+ }
+
+ /**
+ * Gets the amount of ache misses.
+ *
+ * @return amount of cache misses
+ */
+ public long getCacheMisses() {
+ return cacheMisses.get();
+ }
+
+ /**
+ * Resets the cache hits and misses stats back to 0.
+ */
+ public void resetCacheStats() {
+ cacheHits.set(0);
+ cacheMisses.set(0);
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/Neighbor.java b/src/main/java/com/iota/iri/network/Neighbor.java
deleted file mode 100644
index 5b459e77f8..0000000000
--- a/src/main/java/com/iota/iri/network/Neighbor.java
+++ /dev/null
@@ -1,123 +0,0 @@
-package com.iota.iri.network;
-
-import org.apache.commons.lang3.StringUtils;
-
-import java.net.DatagramPacket;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.SocketAddress;
-import java.util.concurrent.atomic.AtomicInteger;
-
-public abstract class Neighbor {
-
- private final InetSocketAddress address;
-
- private long numberOfAllTransactions;
- private long numberOfNewTransactions;
- private long numberOfInvalidTransactions;
- private long randomTransactionRequests;
- private long numberOfSentTransactions;
- private long numberOfStaleTransactions;
-
- private final boolean flagged;
- public boolean isFlagged() {
- return flagged;
- }
-
- private final static AtomicInteger numPeers = new AtomicInteger(0);
- public static int getNumPeers() {
- return numPeers.get();
- }
- public static void incNumPeers() {
- numPeers.incrementAndGet();
- }
-
- private final String hostAddress;
-
- public String getHostAddress() {
- return hostAddress;
- }
-
- public Neighbor(final InetSocketAddress address, boolean isConfigured) {
- this.address = address;
- this.hostAddress = address.getAddress().getHostAddress();
- this.flagged = isConfigured;
- }
-
- public abstract void send(final DatagramPacket packet);
- public abstract int getPort();
- public abstract String connectionType();
-
- @Override
- public boolean equals(final Object obj) {
- return this == obj || !((obj == null) || (obj.getClass() != this.getClass())) && address.equals(((Neighbor) obj).address);
- }
-
- @Override
- public int hashCode() {
- return address.hashCode();
- }
-
- public InetSocketAddress getAddress() {
- return address;
- }
-
- protected boolean matches(SocketAddress address) {
- if (address instanceof InetSocketAddress) {
- // faster than fallback
- InetAddress adr = ((InetSocketAddress) address).getAddress();
- return adr != null && StringUtils.equals(adr.getHostAddress(), hostAddress);
- } else { // fallback
- return address != null && address.toString().contains(hostAddress);
- }
- }
-
- void incAllTransactions() {
- numberOfAllTransactions++;
- }
-
- void incNewTransactions() {
- numberOfNewTransactions++;
- }
-
- void incRandomTransactionRequests() {
- randomTransactionRequests++;
- }
-
- public void incInvalidTransactions() {
- numberOfInvalidTransactions++;
- }
-
- void incStaleTransactions() {
- numberOfStaleTransactions++;
- }
-
- public void incSentTransactions() {
- numberOfSentTransactions++;
- }
-
- public long getNumberOfAllTransactions() {
- return numberOfAllTransactions;
- }
-
- public long getNumberOfInvalidTransactions() {
- return numberOfInvalidTransactions;
- }
-
- public long getNumberOfStaleTransactions() {
- return numberOfStaleTransactions;
- }
-
- public long getNumberOfNewTransactions() {
- return numberOfNewTransactions;
- }
-
- public long getNumberOfRandomTransactionRequests() {
- return randomTransactionRequests;
- }
-
- public long getNumberOfSentTransactions() {
- return numberOfSentTransactions;
- }
-
-}
diff --git a/src/main/java/com/iota/iri/network/NeighborRouter.java b/src/main/java/com/iota/iri/network/NeighborRouter.java
new file mode 100644
index 0000000000..25ac7a6c33
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/NeighborRouter.java
@@ -0,0 +1,962 @@
+package com.iota.iri.network;
+
+import com.iota.iri.conf.BaseIotaConfig;
+import com.iota.iri.conf.NetworkConfig;
+import com.iota.iri.conf.ProtocolConfig;
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.model.Hash;
+import com.iota.iri.network.neighbor.Neighbor;
+import com.iota.iri.network.neighbor.NeighborState;
+import com.iota.iri.network.neighbor.impl.NeighborImpl;
+import com.iota.iri.network.pipeline.TransactionProcessingPipeline;
+import com.iota.iri.network.pipeline.TransactionProcessingPipelineImpl;
+import com.iota.iri.network.protocol.Handshake;
+import com.iota.iri.network.protocol.Protocol;
+import com.iota.iri.utils.Converter;
+
+import java.io.IOException;
+import java.net.*;
+import java.nio.ByteBuffer;
+import java.nio.channels.*;
+import java.security.SecureRandom;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import net.openhft.hashing.LongHashFunction;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A NeighborRouter takes care of managing connections to {@link Neighbor} instances, executing reads and writes from/to
+ * neighbors and ensuring that wanted neighbors are connected.
+ * A neighbor is identified by its identity which is made up of the IP address and the neighbor's own server socket port
+ * for new incoming connections; for example: 153.59.34.101:15600.
+ * The NeighborRouter and foreign neighbor will first exchange their server socket port via a handshaking packet, in
+ * order to fully build up the identity between each other. If handshaking fails, the connection is dropped.
+ */
+public class NeighborRouter {
+
+ private static final Logger log = LoggerFactory.getLogger(NeighborRouter.class);
+ private static final String PROTOCOL_PREFIX = "tcp://";
+
+ private final AtomicBoolean shutdown = new AtomicBoolean(false);
+
+ private static final SecureRandom rnd = new SecureRandom();
+ private final ExecutorService executor = Executors.newSingleThreadExecutor(r -> new Thread(r, "Neighbor Router"));
+
+ // external
+ private NetworkConfig networkConfig;
+ private ProtocolConfig protocolConfig;
+ private TransactionRequester txRequester;
+ private TransactionProcessingPipeline txPipeline;
+
+ // internal
+ private Selector selector;
+ private ServerSocketChannel serverSocketChannel;
+ private static final LongHashFunction TX_CACHE_DIGEST_HASH_FUNC = LongHashFunction.xx();
+
+ /**
+ * a mapping of host address + port (identity) to fully handshaked/connected neighbor
+ */
+ private ConcurrentHashMap connectedNeighbors = new ConcurrentHashMap<>();
+
+ /**
+ * neighbors which we want to connect to. entries are added upon initialization of the NeighborRouter, when a
+ * neighbor is added through addNeighbors and when a connection attempt failed.
+ */
+ private Set reconnectPool = new CopyOnWriteArraySet<>();
+
+ /**
+ * contains the IP addresses of neighbors which are allowed to connect to us. we use two sets as we allow multiple
+ * connections from a single IP address.
+ */
+ private Set hostsWhitelist = new HashSet<>();
+
+ /**
+ * contains the mapping of IP addresses to their domain names. this is used to map an initialized connection to the
+ * domain which was defined in the configuration or added on addNeighbors, to ensure, that a reconnect attempt to
+ * the given neighbor is done through the resolved IP address of the origin domain.
+ */
+ private Map ipToDomainMapping = new HashMap<>();
+
+ /**
+ * contains the IP address + port as declared in the configuration file plus subsequent entries added by
+ * addNeighbors. the identity of a neighbor is its IP address and its own server socket port.
+ */
+ private Set allowedNeighbors = new HashSet<>();
+
+ /**
+ * used to silently drop connections. contains plain IP addresses
+ */
+ private Set hostsBlacklist = new CopyOnWriteArraySet<>();
+
+ /**
+ * used to force the selection loop to reconnect to wanted neighbors
+ */
+ private AtomicBoolean forceReconnectAttempt = new AtomicBoolean(false);
+
+ /**
+ * used to match against neighbor's coordinator address to cancel the connection in case it doesn't match this
+ * node's own configured coordinator address
+ */
+ private byte[] byteEncodedCooAddress;
+
+ /**
+ * Defines whether a neighbor got added/removed or not and the corresponding reason.
+ */
+ public enum NeighborMutOp {
+ OK, SLOTS_FILLED, URI_INVALID, UNRESOLVED_DOMAIN, UNKNOWN_NEIGHBOR
+ }
+
+ /**
+ * Initializes the dependencies of the {@link NeighborRouter}.
+ *
+ * @param networkConfig Network related configuration parameters
+ * @param protocolConfig Protocol related configuration parameters
+ * @param txRequester {@link TransactionRequester} instance to load hashes of requested transactions when
+ * gossiping
+ * @param txPipeline {@link TransactionProcessingPipelineImpl} passed to newly created {@link Neighbor}
+ * instances
+ */
+ public void init(NetworkConfig networkConfig, ProtocolConfig protocolConfig, TransactionRequester txRequester,
+ TransactionProcessingPipeline txPipeline) {
+ this.txRequester = txRequester;
+ this.txPipeline = txPipeline;
+ this.networkConfig = networkConfig;
+ this.protocolConfig = protocolConfig;
+
+ // reduce the coordinator address to its byte encoded representation
+ byte[] tritsEncodedCooAddress = new byte[protocolConfig.getCoordinator().toString().length()
+ * Converter.NUMBER_OF_TRITS_IN_A_TRYTE];
+ Converter.trits(protocolConfig.getCoordinator().toString(), tritsEncodedCooAddress, 0);
+ byteEncodedCooAddress = new byte[Handshake.BYTE_ENCODED_COO_ADDRESS_BYTES_LENGTH];
+ Converter.bytes(tritsEncodedCooAddress, byteEncodedCooAddress);
+ }
+
+ private void initNeighbors() {
+ // parse URIs
+ networkConfig.getNeighbors().stream()
+ .distinct()
+ .map(NeighborRouter::parseURI)
+ .filter(Optional::isPresent)
+ .map(Optional::get)
+ .forEach(uri -> reconnectPool.add(uri));
+ }
+
+ /**
+ * Starts a dedicated thread for the {@link NeighborRouter} and then starts the routing mechanism.
+ */
+ public void start() {
+ executor.execute(this::route);
+ }
+
+ /**
+ *
+ * Starts the routing mechanism which first initialises connections to neighbors from the configuration and then
+ * continuously reads and writes messages from/to neighbors.
+ *
+ *
+ * This method will also try to reconnect to wanted neighbors by the given
+ * {@link BaseIotaConfig#getReconnectAttemptIntervalSeconds()} value.
+ *
+ */
+ public void route() {
+ log.info("starting neighbor router");
+
+ // run selector loop
+ try {
+ selector = Selector.open();
+ serverSocketChannel = ServerSocketChannel.open();
+ serverSocketChannel.configureBlocking(false);
+ serverSocketChannel.register(selector, SelectionKey.OP_ACCEPT);
+ InetSocketAddress tcpBindAddr = new InetSocketAddress(networkConfig.getNeighboringSocketAddress(),
+ networkConfig.getNeighboringSocketPort());
+ serverSocketChannel.socket().bind(tcpBindAddr);
+ log.info("bound server TCP socket to {}", tcpBindAddr);
+
+ // parse neighbors from configuration
+ initNeighbors();
+
+ // init connections to the wanted neighbors,
+ // this also ensures that the whitelists are updated with the corresponding
+ // IP addresses and domain name mappings.
+ connectToWantedNeighbors();
+
+ long lastReconnectAttempts = System.currentTimeMillis();
+ long reconnectAttemptTimeout = TimeUnit.SECONDS
+ .toMillis(networkConfig.getReconnectAttemptIntervalSeconds());
+
+ while (!shutdown.get()) {
+ selector.select(reconnectAttemptTimeout);
+ if (shutdown.get()) {
+ break;
+ }
+
+ // reinitialize connections to wanted neighbors
+ long now = System.currentTimeMillis();
+ if (forceReconnectAttempt.get() || now - lastReconnectAttempts > reconnectAttemptTimeout) {
+ lastReconnectAttempts = now;
+ forceReconnectAttempt.set(false);
+ connectToWantedNeighbors();
+ }
+
+ Iterator iterator = selector.selectedKeys().iterator();
+ while (iterator.hasNext()) {
+ try {
+ SelectionKey key = iterator.next();
+
+ if (key.isAcceptable()) {
+ handleNewConnection(key);
+ continue;
+ }
+
+ SocketChannel channel = (SocketChannel) key.channel();
+ Neighbor neighbor = (Neighbor) key.attachment();
+ String identity = neighbor.getHostAddressAndPort();
+
+ // check whether marked for disconnect
+ if (neighbor.getState() == NeighborState.MARKED_FOR_DISCONNECT) {
+ allowedNeighbors.remove(identity);
+ closeNeighborConnection(channel, identity, selector);
+ removeFromReconnectPool(neighbor);
+ continue;
+ }
+
+ if (key.isConnectable()) {
+ handleConnect(channel, key, identity, neighbor);
+ continue;
+ }
+
+ if (key.isWritable() && !handleWrite(channel, key, identity, neighbor)) {
+ continue;
+ }
+
+ if (key.isReadable()) {
+ handleRead(channel, identity, neighbor);
+ }
+
+ } finally {
+ iterator.remove();
+ }
+ }
+ }
+ } catch (IOException e) {
+ log.error("error occurred in the neighbor router", e);
+ } finally {
+ try {
+ if (selector != null) {
+ // close all connections
+ for (SelectionKey keys : selector.keys()) {
+ keys.channel().close();
+ }
+ selector.close();
+ }
+ if (serverSocketChannel != null) {
+ serverSocketChannel.close();
+ }
+ } catch (IOException e) {
+ log.error("error occurred while trying to gracefully shutdown the neighbor router", e);
+ }
+ log.info("neighbor router stopped");
+ }
+ }
+
+ /**
+ * Handles a new incoming connection and if it passes some initial conditions (via
+ * {@link NeighborRouter#okToConnect(String, SocketChannel)}), will start the handshaking process by placing a
+ * handshake packet into the connection's send queue.
+ *
+ * @param key the selection key associated with the server socket channel
+ * @return whether the new connection was accepted
+ */
+ private boolean handleNewConnection(SelectionKey key) {
+ try {
+ // new connection from neighbor
+ ServerSocketChannel srvSocket = (ServerSocketChannel) key.channel();
+ SocketChannel newConn = srvSocket.accept();
+ if (newConn == null) {
+ return false;
+ }
+ InetSocketAddress remoteAddr = (InetSocketAddress) newConn.getRemoteAddress();
+ if (!okToConnect(remoteAddr.getAddress().getHostAddress(), newConn)) {
+ return false;
+ }
+ configureSocket(newConn);
+ Neighbor newNeighbor = new NeighborImpl<>(selector, newConn, remoteAddr.getAddress().getHostAddress(),
+ Neighbor.UNKNOWN_REMOTE_SERVER_SOCKET_PORT, txPipeline);
+ String domain = ipToDomainMapping.get(remoteAddr.getAddress().getHostAddress());
+ if (domain != null) {
+ newNeighbor.setDomain(domain);
+ }
+ newNeighbor.send(Handshake.createHandshakePacket((char) networkConfig.getNeighboringSocketPort(),
+ byteEncodedCooAddress, (byte) protocolConfig.getMwm()));
+ log.info("new connection from {}, performing handshake...", newNeighbor.getHostAddress());
+ newConn.register(selector, SelectionKey.OP_READ | SelectionKey.OP_WRITE, newNeighbor);
+ return true;
+ } catch (IOException ex) {
+ log.info("couldn't accept connection. reason: {}", ex.getMessage());
+ }
+ return false;
+ }
+
+ /**
+ *
+ * Finalizes the underlying connection sequence by calling the channel's finishConnect() method.
+ *
+ *
+ * This method does not finalize the logical protocol level connection, rather, it kicks of that process by
+ * placing a handshake packet into the neighbor's send queue if the connection was successfully established.
+ *
+ *
+ *
+ * Connections passed into this method are self-initialized and were not accepted by the server socket channel.
+ *
+ *
+ * In case the connection sequence fails, the connection will be dropped.
+ *
+ *
+ * @param channel the associated channel for the given connection
+ * @param key the associated selection key associated with the given connection
+ * @param identity the identity of the connection/neighbor
+ * @param neighbor the neighbor associated with this connection
+ * @return whether the connection sequence finished successfully
+ */
+ private boolean handleConnect(SocketChannel channel, SelectionKey key, String identity, Neighbor neighbor) {
+ try {
+ // the neighbor was faster than us to setup the connection
+ if (connectedNeighbors.containsKey(identity)) {
+ log.info("neighbor {} is already connected", identity);
+ removeFromReconnectPool(neighbor);
+ key.cancel();
+ return false;
+ }
+ if (channel.finishConnect()) {
+ log.info("established connection to neighbor {}, now performing handshake...", identity);
+ removeFromReconnectPool(neighbor);
+ // remove connect interest
+ key.interestOps(SelectionKey.OP_READ | SelectionKey.OP_WRITE);
+ // add handshaking packet as the initial packet to send
+ neighbor.send(Handshake.createHandshakePacket((char) networkConfig.getNeighboringSocketPort(),
+ byteEncodedCooAddress, (byte) protocolConfig.getMwm()));
+ return true;
+ }
+ } catch (IOException ex) {
+ log.info("couldn't establish connection to neighbor {}, will attempt to reconnect later. reason: {}",
+ identity, ex.getMessage());
+ closeNeighborConnection(channel, identity, selector);
+ }
+ return false;
+ }
+
+ /**
+ *
+ * Handles the write readiness by the given channel by writing a message into its send buffer.
+ *
+ *
+ *
+ * If there was no message to send, then the channel is de-registered from write interests. If the channel would not
+ * be de-registered from write interests, the channel's write readiness would constantly fire this method even if
+ * there's nothing to send, causing high CPU usage. Re-registering the channel for write interest is implicitly done
+ * via the caller who's interested to send a message through the neighbor's implementation.
+ *
+ *
+ * In case the write fails with an IOException, the connection will be dropped.
+ *
+ *
+ * @param channel the associated channel for the given connection
+ * @param key the associated selection key associated with the given connection
+ * @param identity the identity of the connection/neighbor
+ * @param neighbor the neighbor associated with this connection
+ * @return whether the write operation was successful or not
+ */
+ private boolean handleWrite(SocketChannel channel, SelectionKey key, String identity, Neighbor neighbor) {
+ try {
+ switch (neighbor.write()) {
+ case 0:
+ // nothing was written, because no message was available to be sent.
+ // lets unregister this channel from write interests until at least
+ // one message is back available for sending.
+ key.interestOps(SelectionKey.OP_READ);
+ break;
+ case -1:
+ if (neighbor.getState() == NeighborState.HANDSHAKING) {
+ log.info("closing connection to {} as handshake packet couldn't be written", identity);
+ closeNeighborConnection(channel, null, selector);
+ } else {
+ closeNeighborConnection(channel, identity, selector);
+ }
+ return false;
+ default:
+ // bytes were written to the channel
+ }
+ return true;
+ } catch (IOException ex) {
+ log.warn("unable to write message to neighbor {}. reason: {}", identity, ex.getMessage());
+ closeNeighborConnection(channel, identity, selector);
+ addToReconnectPool(neighbor);
+ }
+ return false;
+ }
+
+ /**
+ *
+ * Handles the read readiness by the given channel by reading from the channel's receive buffer.
+ *
+ *
+ * In case the read fails with an IOException, the connection will be dropped.
+ *
+ *
+ * @param channel the associated channel for the given connection
+ * @param identity the identity of the connection/neighbor
+ * @param neighbor the neighbor associated with this connection
+ * @return whether the read operation was successful or not
+ */
+ private boolean handleRead(SocketChannel channel, String identity, Neighbor neighbor) {
+ try {
+ switch (neighbor.getState()) {
+ case READY_FOR_MESSAGES:
+ if (neighbor.read() == -1) {
+ closeNeighborConnection(channel, identity, selector);
+ return false;
+ }
+ break;
+ case HANDSHAKING:
+ if (finalizeHandshake(identity, neighbor, channel) && availableNeighborSlotsFilled()) {
+ // if all known neighbors or max neighbors are connected we are
+ // no longer interested in any incoming connections
+ // (as long as no neighbor dropped the connection)
+ SelectionKey srvKey = serverSocketChannel.keyFor(selector);
+ srvKey.interestOps(0);
+ }
+ default:
+ // do nothing
+ }
+ return true;
+ } catch (IOException ex) {
+ log.warn("unable to read message from neighbor {}. reason: {}", identity, ex.getMessage());
+ closeNeighborConnection(channel, identity, selector);
+ addToReconnectPool(neighbor);
+ }
+ return false;
+ }
+
+ /**
+ * Computes the digest of the given transaction data.
+ *
+ * @param txBytes The raw byte encoded transaction data
+ * @return The the digest of the transaction data
+ */
+ public static long getTxCacheDigest(byte[] txBytes) {
+ return TX_CACHE_DIGEST_HASH_FUNC.hashBytes(txBytes);
+ }
+
+ /**
+ * Adjusts the given socket's configuration.
+ *
+ * @param socketChannel the socket to configure
+ * @throws IOException throw during adjusting the socket's configuration
+ */
+ private void configureSocket(SocketChannel socketChannel) throws IOException {
+ socketChannel.socket().setTcpNoDelay(true);
+ socketChannel.socket().setSoLinger(true, 0);
+ socketChannel.configureBlocking(false);
+ }
+
+ /**
+ * Adds the given neighbor to the 'reconnect pool' of neighbors which this node will try to reconnect to. If the
+ * domain of the neighbor was known when the connection was established, it will be used to re-establish the
+ * connection to the neighbor, otherwise the neighbor's current known IP address is used.
+ * The neighbor is only added to the 'reconnect pool' if the neighbor was ready to send/process messages.
+ *
+ * @param neighbor the neighbor to attempt to reconnect to
+ * @return whether the neighbor got added to the reconnect pool or not
+ */
+ private boolean addToReconnectPool(Neighbor neighbor) {
+ if (neighbor.getState() != NeighborState.READY_FOR_MESSAGES) {
+ return false;
+ }
+ // try to pull out the origin domain which was used to establish
+ // the connection with this neighbor
+ String domain = ipToDomainMapping.get(neighbor.getHostAddress());
+ URI reconnectURI;
+ if (domain != null) {
+ reconnectURI = URI
+ .create(String.format("%s%s:%d", PROTOCOL_PREFIX, domain, neighbor.getRemoteServerSocketPort()));
+ } else {
+ reconnectURI = URI.create(String.format("%s%s", PROTOCOL_PREFIX, neighbor.getHostAddressAndPort()));
+ }
+ log.info("adding {} to the reconnect pool", reconnectURI);
+ return reconnectPool.add(reconnectURI);
+ }
+
+ /**
+ * Ensures that the neighbor is removed from the reconnect pool by using the neighbor's IP address and domain
+ * identity.
+ *
+ * @param neighbor the neighbor to remove from the reconnect pool
+ * @return whether the neighbor was removed from the reconnect pool or not
+ */
+ private boolean removeFromReconnectPool(Neighbor neighbor) {
+ URI raw = URI.create(String.format("%s%s", PROTOCOL_PREFIX, neighbor.getHostAddressAndPort()));
+ boolean removedByURI = reconnectPool.remove(raw);
+ String domain = neighbor.getDomain();
+ if (domain != null) {
+ URI withDomain = URI
+ .create(String.format("%s%s:%d", PROTOCOL_PREFIX, domain, neighbor.getRemoteServerSocketPort()));
+ if (reconnectPool.remove(withDomain)) {
+ return true;
+ }
+ }
+ return removedByURI;
+ }
+
+ /**
+ * Finalizes the handshaking to a {@link Neighbor} by reading the handshaking packet.
+ * A faulty handshaking will drop the neighbor connection.
+ * The connection will be dropped when:
+ *
+ * - the handshaking is faulty, meaning that a non handshaking packet was sent
+ * - {@link BaseIotaConfig#getMaxNeighbors()} has been reached
+ * - the neighbor has a different coordinator address set as we do
+ * - the neighbor uses a different minimum weight magnitude than we do
+ * - a non matching server socket port was communicated in the handshaking packet
+ * - the neighbor is already connected (checked by the identity)
+ * - the identity is not known (missing in {@link NeighborRouter#allowedNeighbors})
+ *
+ *
+ * @param identity The identity of the neighbor
+ * @param neighbor The {@link Neighbor} to finalize the handshaking with
+ * @param channel The associated {@link SocketChannel} of the {@link Neighbor}
+ * @return whether the handshaking was successful
+ * @throws IOException thrown when reading the handshake packet fails
+ */
+ private boolean finalizeHandshake(String identity, Neighbor neighbor, SocketChannel channel) throws IOException {
+ Handshake handshake = neighbor.handshake();
+ switch (handshake.getState()) {
+ case INIT:
+ // not fully read handshake packet
+ return false;
+ case FAILED:
+ // faulty handshaking
+ log.error("dropping connection to neighbor {} as handshaking was faulty", identity);
+ closeNeighborConnection(channel, identity, selector);
+ return false;
+ default:
+ // do nothing
+ }
+
+ // drop the connection if in the meantime the available neighbor slots were filled
+ if (availableNeighborSlotsFilled()) {
+ log.error("dropping handshaked connection to neighbor {} as all neighbor slots are filled", identity);
+ closeNeighborConnection(channel, null, selector);
+ return false;
+ }
+
+ // check whether same MWM is used
+ if (handshake.getMWM() != protocolConfig.getMwm()) {
+ log.error("dropping handshaked connection to neighbor {} as it uses a different MWM ({} instead of {})",
+ identity, handshake.getMWM(), protocolConfig.getMwm());
+ closeNeighborConnection(channel, null, selector);
+ return false;
+ }
+
+ // check whether the neighbor actually uses the same coordinator address
+ if (!Arrays.equals(byteEncodedCooAddress, handshake.getByteEncodedCooAddress())) {
+ log.error("dropping handshaked connection to neighbor {} as it uses a different coordinator address",
+ identity);
+ closeNeighborConnection(channel, null, selector);
+ return false;
+ }
+
+ // check whether we support the supported protocol versions by the neighbor
+ int supportedVersion = handshake.getNeighborSupportedVersion(Protocol.SUPPORTED_PROTOCOL_VERSIONS);
+ if (supportedVersion <= 0) {
+ log.error(
+ "dropping handshaked connection to neighbor {} as its highest supported protocol version {} is not supported",
+ identity, Math.abs(supportedVersion));
+ closeNeighborConnection(channel, null, selector);
+ return false;
+ }
+ neighbor.setProtocolVersion(supportedVersion);
+
+ // after a successful handshake, the neighbor's server socket port is initialized
+ // and thereby the identity of the neighbor is now fully distinguishable
+
+ // check whether the remote server socket port from the origin URI
+ // actually corresponds to the port advertised in the handshake packet
+ int originPort = neighbor.getRemoteServerSocketPort();
+ int handshakePort = handshake.getServerSocketPort();
+ if (originPort != Neighbor.UNKNOWN_REMOTE_SERVER_SOCKET_PORT && originPort != handshakePort) {
+ log.warn("dropping handshaked connection from {} as neighbor advertised "
+ + "wrong server socket port (wanted {}, got {})", identity, originPort, handshakePort);
+ closeNeighborConnection(channel, null, selector);
+ return false;
+ }
+ neighbor.setRemoteServerSocketPort(handshakePort);
+
+ // check if neighbor is already connected
+ String newIdentity = neighbor.getHostAddressAndPort();
+ if (connectedNeighbors.containsKey(newIdentity)) {
+ log.info("dropping handshaked connection from {} as neighbor is already connected", newIdentity);
+ // pass just host address to not actually delete the already existing connection/neighbor
+ closeNeighborConnection(channel, null, selector);
+ return false;
+ }
+
+ // check if the given host + server socket port combination is actually defined in the config/wanted
+ if (!networkConfig.isAutoTetheringEnabled() && !allowedNeighbors.contains(newIdentity)) {
+ log.info("dropping handshaked connection as neighbor from {} is not allowed to connect", newIdentity);
+ closeNeighborConnection(channel, null, selector);
+ return false;
+ }
+
+ log.info("neighbor connection to {} is ready for messages [latency {} ms, protocol version {}]", newIdentity,
+ System.currentTimeMillis() - handshake.getSentTimestamp(), supportedVersion);
+
+ // the neighbor is now ready to process actual protocol messages
+ neighbor.setState(NeighborState.READY_FOR_MESSAGES);
+
+ // we finally add the neighbor to the connected neighbors map
+ // if the handshake was successful and we got the remote port
+ connectedNeighbors.put(neighbor.getHostAddressAndPort(), neighbor);
+
+ // prevent reconnect attempts from the 'reconnect pool'
+ // by constructing the source URI which was used for this neighbor
+ removeFromReconnectPool(neighbor);
+
+ return true;
+ }
+
+ /**
+ * Initializes connections to wanted neighbors which are neighbors added by
+ * {@link NeighborRouter#addNeighbor(String)} or defined in the configuration.
+ *
+ * A connection attempt is only made if the domain name of the neighbor could be resolved to its IP address.
+ * Reconnect attempts will continuously try to resolve the domain name until the neighbor is explicitly removed via
+ * {@link NeighborRouter#removeNeighbor(String)}.
+ */
+ private void connectToWantedNeighbors() {
+ if (reconnectPool.isEmpty()) {
+ return;
+ }
+ log.info("establishing connections to {} wanted neighbors {}", reconnectPool.size(), reconnectPool.toArray());
+ reconnectPool.forEach(neighborURI -> {
+ InetSocketAddress inetAddr = new InetSocketAddress(neighborURI.getHost(), neighborURI.getPort());
+ try {
+ // if in the meantime the neighbor connected to us, we don't need to reinitialize a connection.
+ if (!inetAddr.isUnresolved()) {
+ String ipAddress = inetAddr.getAddress().getHostAddress();
+ String identity = String.format("%s:%d", ipAddress, inetAddr.getPort());
+ if (connectedNeighbors.containsKey(identity)) {
+ log.info("skipping connecting to {} as neighbor is already connected", identity);
+ reconnectPool.remove(neighborURI);
+ return;
+ }
+ }
+
+ initNeighborConnection(neighborURI, inetAddr);
+ } catch (IOException e) {
+ log.warn("unable to build socket for neighbor {}. reason: {}", neighborURI, e.getMessage());
+ }
+ });
+ }
+
+ /**
+ * Initializes a new {@link SocketChannel} to the given neighbor.
+ * The IP address of the neighbor is removed from the blacklist, added to the whitelist and registered as an allowed
+ * neighbor by its identity.
+ *
+ * @param neighborURI The {@link URI} of the neighbor to connect to
+ * @param addr The {@link InetSocketAddress} extracted from the {@link URI}
+ * @throws IOException if initializing the {@link SocketChannel} fails
+ */
+ private void initNeighborConnection(URI neighborURI, InetSocketAddress addr) throws IOException {
+ if (addr.isUnresolved()) {
+ log.warn("unable to resolve neighbor {} to IP address, will attempt to reconnect later", neighborURI);
+ return;
+ }
+
+ String ipAddress = addr.getAddress().getHostAddress();
+
+ // we are overriding a blacklist entry as we are explicitly trying to create a connection
+ hostsBlacklist.remove(ipAddress);
+
+ // allow connections from the given remote IP address to us.
+ // this comes into place if our own initialized connection fails
+ // but afterwards the added neighbor builds a connection to us.
+ hostsWhitelist.add(ipAddress);
+
+ // map the ip address to the domain
+ ipToDomainMapping.put(ipAddress, addr.getHostString());
+
+ // make the identity of the newly added neighbor clear, so that it gets rejected during handshaking
+ // finalisation, in case the communicated server socket port is wrong.
+ allowedNeighbors.add(String.format("%s:%d", addr.getAddress().getHostAddress(), addr.getPort()));
+
+ // init new TCP socket channel
+ SocketChannel tcpChannel = SocketChannel.open();
+ configureSocket(tcpChannel);
+ tcpChannel.connect(addr);
+ Neighbor neighbor = new NeighborImpl<>(selector, tcpChannel, addr.getAddress().getHostAddress(), addr.getPort(),
+ txPipeline);
+ neighbor.setDomain(addr.getHostName());
+ tcpChannel.register(selector, SelectionKey.OP_CONNECT, neighbor);
+ }
+
+ /**
+ * Checks whether the given host is allowed to connect given its IP address.
+ * The connection is allowed when:
+ *
+ * - the IP address is not in the {@link NeighborRouter#hostsBlacklist}
+ * - {@link BaseIotaConfig#getMaxNeighbors()} has not been reached
+ * - is whitelisted in {@link NeighborRouter#hostsWhitelist} (if {@link BaseIotaConfig#isAutoTetheringEnabled()}
+ * is false)
+ *
+ * The IP address is blacklisted to mute it from subsequent connection attempts. The blacklisting is removed if the
+ * IP address is added through {@link NeighborRouter#addNeighbor(String)}.
+ *
+ * @param ipAddress The IP address
+ * @param newNeighborConn The {@link SocketChannel} to close if the connection is not allowed
+ * @return true if allowed, false if not
+ * @throws IOException if anything goes wrong closing the {@link SocketChannel}
+ */
+ private boolean okToConnect(String ipAddress, SocketChannel newNeighborConn) throws IOException {
+ if (hostsBlacklist.contains(ipAddress)) {
+ // silently drop connection
+ newNeighborConn.close();
+ return false;
+ }
+ if (availableNeighborSlotsFilled()) {
+ log.info("dropping new connection from {} as all neighbor slots are filled", ipAddress);
+ newNeighborConn.close();
+ return false;
+ }
+ boolean whitelisted = hostsWhitelist.contains(ipAddress);
+ if (!whitelisted) {
+ if (!networkConfig.isAutoTetheringEnabled()) {
+ log.info("blacklisting/dropping new connection as neighbor from {} is not defined in the config",
+ ipAddress);
+ hostsBlacklist.add(ipAddress);
+ newNeighborConn.close();
+ return false;
+ }
+ log.info("new auto-tethering connection from {}", ipAddress);
+ }
+ return true;
+ }
+
+ /**
+ * Closes the connection to the neighbor, re-registers the {@link ServerSocketChannel} for
+ * {@link SelectionKey#OP_CONNECT} in case neighbor slots will be available again and finally removes the neighbor
+ * from the connected neighbors map.
+ *
+ * @param channel {@link SocketChannel} to close
+ * @param identity The identity of the neighbor, null must be passed if the neighbor should not be marked as not
+ * connected.
+ * @param selector The used {@link Selector}
+ */
+ private void closeNeighborConnection(SelectableChannel channel, String identity, Selector selector) {
+ try {
+ channel.close();
+ } catch (IOException e) {
+ log.error("error while closing connection: {}", e.getMessage());
+ }
+ if (identity == null) {
+ return;
+ }
+ if (connectedNeighbors.remove(identity) != null) {
+ log.info("removed neighbor {} from connected neighbors", identity);
+ // re-register the server socket for incoming connections as we will have a new slot open
+ if (availableNeighborSlotsFilled()) {
+ serverSocketChannel.keyFor(selector).interestOps(SelectionKey.OP_ACCEPT);
+ }
+ }
+ }
+
+ private boolean availableNeighborSlotsFilled() {
+ // while this check is not thread-safe, initiated connections will be dropped
+ // when their handshaking was done but already all neighbor slots are filled
+ return connectedNeighbors.size() >= networkConfig.getMaxNeighbors();
+ }
+
+ /**
+ * Adds the given neighbor to the {@link NeighborRouter}. The {@link} Selector is woken up and an attempt to connect
+ * to wanted neighbors is initiated.
+ *
+ * @param rawURI The URI of the neighbor
+ * @return whether the neighbor was added or not
+ */
+ public NeighborMutOp addNeighbor(String rawURI) {
+ if (availableNeighborSlotsFilled()) {
+ return NeighborMutOp.SLOTS_FILLED;
+ }
+ Optional optUri = parseURI(rawURI);
+ if (!optUri.isPresent()) {
+ return NeighborMutOp.URI_INVALID;
+ }
+ URI neighborURI = optUri.get();
+ // add to wanted neighbors
+ reconnectPool.add(neighborURI);
+ // wake up the selector and let it build connections to wanted neighbors
+ forceReconnectAttempt.set(true);
+ selector.wakeup();
+ return NeighborMutOp.OK;
+ }
+
+ /**
+ * Removes the given neighbor from the {@link NeighborRouter} by marking it for "disconnect". The neighbor is
+ * disconnected as soon as the next selector loop is executed.
+ *
+ * @param uri The URI of the neighbor
+ * @return whether the neighbor was removed or not
+ */
+ public NeighborMutOp removeNeighbor(String uri) {
+ Optional optUri = parseURI(uri);
+ if (!optUri.isPresent()) {
+ return NeighborMutOp.URI_INVALID;
+ }
+
+ URI neighborURI = optUri.get();
+ InetSocketAddress inetAddr = new InetSocketAddress(neighborURI.getHost(), neighborURI.getPort());
+ if (inetAddr.isUnresolved()) {
+ log.warn("unable to remove neighbor {} as IP address couldn't be resolved", uri);
+ return NeighborMutOp.UNRESOLVED_DOMAIN;
+ }
+
+ // remove the neighbor from connection attempts
+ reconnectPool.remove(neighborURI);
+ URI rawURI = URI.create(String.format("%s%s:%d", PROTOCOL_PREFIX, inetAddr.getAddress().getHostAddress(),
+ neighborURI.getPort()));
+ reconnectPool.remove(rawURI);
+
+ String identity = String.format("%s:%d", inetAddr.getAddress().getHostAddress(), inetAddr.getPort());
+ Neighbor neighbor = connectedNeighbors.get(identity);
+ if (neighbor == null) {
+ return NeighborMutOp.UNKNOWN_NEIGHBOR;
+ }
+
+ // the neighbor will be disconnected inside the selector loop
+ neighbor.setState(NeighborState.MARKED_FOR_DISCONNECT);
+ return NeighborMutOp.OK;
+ }
+
+ /**
+ * Parses the given string to an URI. The URI must use "tcp://" as the protocol.
+ *
+ * @param uri The URI string to parse
+ * @return the parsed URI, if parsed correctly
+ */
+ public static Optional parseURI(final String uri) {
+ if (uri.isEmpty()) {
+ return Optional.empty();
+ }
+
+ URI neighborURI;
+ try {
+ neighborURI = new URI(uri);
+ } catch (URISyntaxException e) {
+ log.error("URI {} raised URI Syntax Exception. reason: {}", uri, e.getMessage());
+ return Optional.empty();
+ }
+ if (!isURIValid(neighborURI)) {
+ return Optional.empty();
+ }
+ return Optional.of(neighborURI);
+ }
+
+ /**
+ * Checks whether the given URI is valid. The URI is valid if it is not null and it uses TCP as the protocol.
+ *
+ * @param uri The URI to check
+ * @return true if the URI is valid, false if not
+ */
+ public static boolean isURIValid(final URI uri) {
+ if (!uri.getScheme().equals("tcp")) {
+ log.error("'{}' is not a valid URI schema, only TCP ({}) is supported", uri, PROTOCOL_PREFIX);
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Returns the {@link TransactionProcessingPipelineImpl}.
+ *
+ * @return the {@link TransactionProcessingPipelineImpl} used by the {@link NeighborRouter}
+ */
+ public TransactionProcessingPipeline getTransactionProcessingPipeline() {
+ return txPipeline;
+ }
+
+ /**
+ * Gets all neighbors the {@link NeighborRouter} currently sees as either connected or attempts to build connections
+ * to.
+ *
+ * @return The neighbors
+ */
+ public List getNeighbors() {
+ List neighbors = new ArrayList<>(connectedNeighbors.values());
+ reconnectPool.forEach(uri -> {
+ neighbors.add(new NeighborImpl<>(null, null, uri.getHost(), uri.getPort(), null));
+ });
+ return neighbors;
+ }
+
+ /**
+ * Gets the currently connected neighbors.
+ *
+ * @return The connected neighbors.
+ */
+ public Map getConnectedNeighbors() {
+ return Collections.unmodifiableMap(connectedNeighbors);
+ }
+
+ /**
+ * Gossips the given transaction to the given neighbor.
+ *
+ * @param neighbor The {@link Neighbor} to gossip the transaction to
+ * @param tvm The transaction to gossip
+ * @throws Exception thrown when loading a hash of transaction to request fails
+ */
+ public void gossipTransactionTo(Neighbor neighbor, TransactionViewModel tvm) throws Exception {
+ gossipTransactionTo(neighbor, tvm, false);
+ }
+
+ /**
+ * Gossips the given transaction to the given neighbor.
+ *
+ * @param neighbor The {@link Neighbor} to gossip the transaction to
+ * @param tvm The transaction to gossip
+ * @param useHashOfTVM Whether to use the hash of the given transaction as the requested transaction hash or not
+ * @throws Exception thrown when loading a hash of transaction to request fails
+ */
+ public void gossipTransactionTo(Neighbor neighbor, TransactionViewModel tvm, boolean useHashOfTVM)
+ throws Exception {
+ byte[] requestedHash = null;
+ if (!useHashOfTVM) {
+ Hash hash = txRequester.transactionToRequest(rnd.nextDouble() < protocolConfig.getpSelectMilestoneChild());
+ if (hash != null) {
+ requestedHash = hash.bytes();
+ }
+ }
+
+ if (requestedHash == null) {
+ requestedHash = tvm.getHash().bytes();
+ }
+
+ ByteBuffer packet = Protocol.createTransactionGossipPacket(tvm, requestedHash);
+ neighbor.send(packet);
+ // tx might actually not be sent, we are merely putting it into the send queue
+ // TODO: find a way to increment once we actually sent the txs into the channel
+ neighbor.getMetrics().incrSentTransactionsCount();
+ }
+
+ /**
+ * Shut downs the {@link NeighborRouter} and all currently open connections.
+ */
+ public void shutdown() {
+ shutdown.set(true);
+ executor.shutdownNow();
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/Node.java b/src/main/java/com/iota/iri/network/Node.java
deleted file mode 100644
index 2cb7bbc0e0..0000000000
--- a/src/main/java/com/iota/iri/network/Node.java
+++ /dev/null
@@ -1,894 +0,0 @@
-package com.iota.iri.network;
-
-import com.iota.iri.TransactionValidator;
-import com.iota.iri.conf.NodeConfig;
-import com.iota.iri.controllers.TipsViewModel;
-import com.iota.iri.controllers.TransactionViewModel;
-import com.iota.iri.crypto.SpongeFactory;
-import com.iota.iri.model.Hash;
-import com.iota.iri.model.HashFactory;
-import com.iota.iri.model.TransactionHash;
-import com.iota.iri.service.milestone.LatestMilestoneTracker;
-import com.iota.iri.service.snapshot.SnapshotProvider;
-import com.iota.iri.storage.Tangle;
-import net.openhft.hashing.LongHashFunction;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.*;
-import java.security.SecureRandom;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Class Node is the core class for handling IRI gossip protocol packets.
- * Both TCP and UDP receivers will pass incoming packets to this class's object.
- * It is also responsible for validating and storing the received transactions
- * into the Tangle Database.
- *
- * The Gossip protocol is specific to IRI nodes and is used for spamming and requesting
- * new transactions between IRI peers. Every message sent on Gossip protocol consists of two
- * parts - the transaction in binary encoded format followed by a hash of another transaction to
- * be requested. The receiving entity will save the newly received transaction into
- * its own database and will respond with the received requested transaction - if
- * available in its own storgage.
- *
- */
-public class Node {
-
- private static final Logger log = LoggerFactory.getLogger(Node.class);
- private final int reqHashSize;
-
-
- private int BROADCAST_QUEUE_SIZE;
- private int RECV_QUEUE_SIZE;
- private int REPLY_QUEUE_SIZE;
- private static final int PAUSE_BETWEEN_TRANSACTIONS = 1;
-
- private final AtomicBoolean shuttingDown = new AtomicBoolean(false);
-
- private final List neighbors = new CopyOnWriteArrayList<>();
- private final ConcurrentSkipListSet broadcastQueue = weightQueue();
- private final ConcurrentSkipListSet> receiveQueue = weightQueueTxPair();
- private final ConcurrentSkipListSet> replyQueue = weightQueueHashPair();
-
-
- private final DatagramPacket sendingPacket;
- private final DatagramPacket tipRequestingPacket;
-
- private final ExecutorService executor = Executors.newFixedThreadPool(5);
- private final NodeConfig configuration;
- private final Tangle tangle;
- private final SnapshotProvider snapshotProvider;
- private final TipsViewModel tipsViewModel;
- private final TransactionValidator transactionValidator;
- private final LatestMilestoneTracker latestMilestoneTracker;
- private final TransactionRequester transactionRequester;
-
- private static final SecureRandom rnd = new SecureRandom();
-
-
- private FIFOCache recentSeenBytes;
- private LongHashFunction recentSeenBytesHashFunction;
-
- private static AtomicLong recentSeenBytesMissCount = new AtomicLong(0L);
- private static AtomicLong recentSeenBytesHitCount = new AtomicLong(0L);
-
- private static long sendLimit = -1;
- private static AtomicLong sendPacketsCounter = new AtomicLong(0L);
- private static AtomicLong sendPacketsTimer = new AtomicLong(0L);
-
- public static final ConcurrentSkipListSet rejectedAddresses = new ConcurrentSkipListSet();
- private DatagramSocket udpSocket;
-
- /**
- * Constructs a Node class instance. The constructor is passed reference
- * of several other instances.
- *
- * @param tangle An instance of the Tangle storage interface
- * @param snapshotProvider data provider for the snapshots that are relevant for the node
- * @param transactionValidator makes sure transaction is not malformed.
- * @param transactionRequester Contains a set of transaction hashes to be requested from peers.
- * @param tipsViewModel Contains a hash of solid and non solid tips
- * @param latestMilestoneTracker Tracks milestones issued from the coordinator
- * @param configuration Contains all the config.
- *
- */
- public Node(final Tangle tangle, SnapshotProvider snapshotProvider, final TransactionValidator transactionValidator, final TransactionRequester transactionRequester, final TipsViewModel tipsViewModel, final LatestMilestoneTracker latestMilestoneTracker, final NodeConfig configuration
- ) {
- this.configuration = configuration;
- this.tangle = tangle;
- this.snapshotProvider = snapshotProvider ;
- this.transactionValidator = transactionValidator;
- this.transactionRequester = transactionRequester;
- this.tipsViewModel = tipsViewModel;
- this.latestMilestoneTracker = latestMilestoneTracker ;
- this.reqHashSize = configuration.getRequestHashSize();
- int packetSize = configuration.getTransactionPacketSize();
- this.sendingPacket = new DatagramPacket(new byte[packetSize], packetSize);
- this.tipRequestingPacket = new DatagramPacket(new byte[packetSize], packetSize);
-
- }
-
- /**
- * Intialize the operations by spawning all the worker threads.
- *
- */
- public void init() throws Exception {
-
- //TODO ask Alon
- sendLimit = (long) ((configuration.getSendLimit() * 1000000) / (configuration.getTransactionPacketSize() * 8));
-
- BROADCAST_QUEUE_SIZE = RECV_QUEUE_SIZE = REPLY_QUEUE_SIZE = configuration.getqSizeNode();
- recentSeenBytes = new FIFOCache<>(configuration.getCacheSizeBytes(), configuration.getpDropCacheEntry());
- recentSeenBytesHashFunction = LongHashFunction.xx();
-
- parseNeighborsConfig();
-
- executor.submit(spawnBroadcasterThread());
- executor.submit(spawnTipRequesterThread());
- executor.submit(spawnNeighborDNSRefresherThread());
- executor.submit(spawnProcessReceivedThread());
- executor.submit(spawnReplyToRequestThread());
-
- executor.shutdown();
- }
-
- /**
- * Keeps the passed UDP DatagramSocket reference from {@link UDPReceiver}.
- * This is currently only used in creating a new {@link UDPNeighbor}.
- *
- * @param {@link DatagramSocket} socket created by UDPReceiver
- */
- public void setUDPSocket(final DatagramSocket socket) {
- this.udpSocket = socket;
- }
-
- /**
- * Returns the stored UDP DatagramSocket reference from {@link UDPReceiver}.
- *
- * @return {@link DatagramSocket} socket created by UDPReceiver
- */
- public DatagramSocket getUdpSocket() {
- return udpSocket;
- }
-
- /**
- * Internal map used to keep track of neighbor's IP vs DNS name
- */
- private final Map neighborIpCache = new HashMap<>();
-
- /**
- * One of the problem of dynamic DNS is neighbor could reconnect and get assigned
- * a new IP address. This thread periodically resovles the DNS to make sure
- * the IP is updated in the quickest possible manner. Doing it fast will increase
- * the detection of change - however will generate lot of unnecessary DNS outbound
- * traffic - so a balance is sought between speed and resource utilization.
- *
- */
- private Runnable spawnNeighborDNSRefresherThread() {
- return () -> {
- if (configuration.isDnsResolutionEnabled()) {
- log.info("Spawning Neighbor DNS Refresher Thread");
-
- while (!shuttingDown.get()) {
- int dnsCounter = 0;
- log.info("Checking Neighbors' Ip...");
-
- try {
- neighbors.forEach(n -> {
- final String hostname = n.getAddress().getHostString();
- checkIp(hostname).ifPresent(ip -> {
- log.info("DNS Checker: Validating DNS Address '{}' with '{}'", hostname, ip);
- tangle.publish("dnscv %s %s", hostname, ip);
- final String neighborAddress = neighborIpCache.get(hostname);
-
- if (neighborAddress == null) {
- neighborIpCache.put(hostname, ip);
- } else {
- if (neighborAddress.equals(ip)) {
- log.info("{} seems fine.", hostname);
- tangle.publish("dnscc %s", hostname);
- } else {
- if (configuration.isDnsRefresherEnabled()) {
- log.info("IP CHANGED for {}! Updating...", hostname);
- tangle.publish("dnscu %s", hostname);
- String protocol = (n instanceof TCPNeighbor) ? "tcp://" : "udp://";
- String port = ":" + n.getAddress().getPort();
-
- uri(protocol + hostname + port).ifPresent(uri -> {
- removeNeighbor(uri, n.isFlagged());
-
- uri(protocol + ip + port).ifPresent(nuri -> {
- Neighbor neighbor = newNeighbor(nuri, n.isFlagged());
- addNeighbor(neighbor);
- neighborIpCache.put(hostname, ip);
- });
- });
- } else {
- log.info("IP CHANGED for {}! Skipping... DNS_REFRESHER_ENABLED is false.", hostname);
- }
- }
- }
- });
- });
-
- while (dnsCounter++ < 60 * 30 && !shuttingDown.get()) {
- Thread.sleep(1000);
- }
- } catch (final Exception e) {
- log.error("Neighbor DNS Refresher Thread Exception:", e);
- }
- }
- log.info("Shutting down Neighbor DNS Refresher Thread");
- } else {
- log.info("Ignoring DNS Refresher Thread... DNS_RESOLUTION_ENABLED is false");
- }
- };
- }
-
- /**
- * Checks whether the passed DNS is an IP address in string form or a DNS
- * hostname.
- *
- * @return An IP address (decimal form) in string resolved from the given DNS
- *
- */
- private Optional checkIp(final String dnsName) {
-
- if (StringUtils.isEmpty(dnsName)) {
- return Optional.empty();
- }
-
- InetAddress inetAddress;
- try {
- inetAddress = java.net.InetAddress.getByName(dnsName);
- } catch (UnknownHostException e) {
- return Optional.empty();
- }
-
- final String hostAddress = inetAddress.getHostAddress();
-
- if (StringUtils.equals(dnsName, hostAddress)) { // not a DNS...
- return Optional.empty();
- }
-
- return Optional.of(hostAddress);
- }
-
-
- /**
- * First Entry point for receiving any incoming transactions from TCP/UDP Receivers.
- * At this point, the transport protocol (UDP/TCP) is irrelevant. We check if we have
- * already received this packet by taking a hash of incoming payload and
- * comparing it against a saved hash set. If the packet is new, we construct
- * a {@link TransactionViewModel} object from it and perform some basic validation
- * on the received transaction via {@link TransactionValidator#runValidation}
- *
- * The packet is then added to {@link receiveQueue} for further processing.
- */
-
- public void preProcessReceivedData(byte[] receivedData, SocketAddress senderAddress, String uriScheme) {
- TransactionViewModel receivedTransactionViewModel = null;
- Hash receivedTransactionHash = null;
-
- boolean addressMatch = false;
- boolean cached = false;
- double pDropTransaction = configuration.getpDropTransaction();
-
- for (final Neighbor neighbor : getNeighbors()) {
- addressMatch = neighbor.matches(senderAddress);
- if (addressMatch) {
- //Validate transaction
- neighbor.incAllTransactions();
- if (rnd.nextDouble() < pDropTransaction) {
- //log.info("Randomly dropping transaction. Stand by... ");
- break;
- }
- try {
-
- //Transaction bytes
- long digest = getBytesDigest(receivedData);
-
- //check if cached
- synchronized (recentSeenBytes) {
- cached = (receivedTransactionHash = recentSeenBytes.get(digest)) != null;
- }
-
- if (!cached) {
- //if not, then validate
- receivedTransactionViewModel = new TransactionViewModel(receivedData, TransactionHash.calculate(receivedData, TransactionViewModel.TRINARY_SIZE, SpongeFactory.create(SpongeFactory.Mode.CURLP81)));
- receivedTransactionHash = receivedTransactionViewModel.getHash();
- transactionValidator.runValidation(receivedTransactionViewModel, transactionValidator.getMinWeightMagnitude());
-
- synchronized (recentSeenBytes) {
- recentSeenBytes.put(digest, receivedTransactionHash);
- }
-
- //if valid - add to receive queue (receivedTransactionViewModel, neighbor)
- addReceivedDataToReceiveQueue(receivedTransactionViewModel, neighbor);
-
- }
- } catch (final TransactionValidator.StaleTimestampException e) {
- log.debug(e.getMessage());
- try {
- transactionRequester.clearTransactionRequest(receivedTransactionHash);
- } catch (Exception e1) {
- log.error(e1.getMessage());
- }
- neighbor.incStaleTransactions();
- } catch (final RuntimeException e) {
- log.error(e.getMessage());
- log.error("Received an Invalid TransactionViewModel. Dropping it...");
- neighbor.incInvalidTransactions();
- break;
- }
-
- //Request bytes
-
- //add request to reply queue (requestedHash, neighbor)
- Hash requestedHash = HashFactory.TRANSACTION.create(receivedData, TransactionViewModel.SIZE, reqHashSize);
- if (requestedHash.equals(receivedTransactionHash)) {
- //requesting a random tip
- requestedHash = Hash.NULL_HASH;
- }
-
- addReceivedDataToReplyQueue(requestedHash, neighbor);
-
- //recentSeenBytes statistics
-
- if (log.isDebugEnabled()) {
- long hitCount, missCount;
- if (cached) {
- hitCount = recentSeenBytesHitCount.incrementAndGet();
- missCount = recentSeenBytesMissCount.get();
- } else {
- hitCount = recentSeenBytesHitCount.get();
- missCount = recentSeenBytesMissCount.incrementAndGet();
- }
- if (((hitCount + missCount) % 50000L == 0)) {
- log.info("RecentSeenBytes cache hit/miss ratio: " + hitCount + "/" + missCount);
- tangle.publish("hmr %d/%d", hitCount, missCount);
- recentSeenBytesMissCount.set(0L);
- recentSeenBytesHitCount.set(0L);
- }
- }
-
- break;
- }
- }
-
- if (!addressMatch && configuration.isTestnet()) {
- int maxPeersAllowed = configuration.getMaxPeers();
- String uriString = uriScheme + ":/" + senderAddress.toString();
- if (Neighbor.getNumPeers() < maxPeersAllowed) {
- log.info("Adding non-tethered neighbor: " + uriString);
- tangle.publish("antn %s", uriString);
- try {
- final URI uri = new URI(uriString);
- // 3rd parameter false (not tcp), 4th parameter true (configured tethering)
- final Neighbor newneighbor = newNeighbor(uri, false);
- if (!getNeighbors().contains(newneighbor)) {
- getNeighbors().add(newneighbor);
- Neighbor.incNumPeers();
- }
- } catch (URISyntaxException e) {
- log.error("Invalid URI string: " + uriString);
- }
- } else {
- if (rejectedAddresses.size() > 20) {
- // Avoid ever growing list in case of an attack.
- rejectedAddresses.clear();
- } else if (rejectedAddresses.add(uriString)) {
- tangle.publish("rntn %s %s", uriString, String.valueOf(maxPeersAllowed));
- log.info("Refused non-tethered neighbor: " + uriString +
- " (max-peers = " + String.valueOf(maxPeersAllowed) + ")");
- }
- }
- }
- }
-
- /**
- * Adds incoming transactions to the {@link receiveQueue} to be processed later.
- */
- public void addReceivedDataToReceiveQueue(TransactionViewModel receivedTransactionViewModel, Neighbor neighbor) {
- receiveQueue.add(new ImmutablePair<>(receivedTransactionViewModel, neighbor));
- if (receiveQueue.size() > RECV_QUEUE_SIZE) {
- receiveQueue.pollLast();
- }
-
- }
-
- /**
- * Adds incoming transactions to the {@link replyQueue} to be processed later
- */
- public void addReceivedDataToReplyQueue(Hash requestedHash, Neighbor neighbor) {
- replyQueue.add(new ImmutablePair<>(requestedHash, neighbor));
- if (replyQueue.size() > REPLY_QUEUE_SIZE) {
- replyQueue.pollLast();
- }
- }
-
- /**
- * Picks up a transaction and neighbor pair from receive queue. Calls
- * {@link processReceivedData} on the pair.
- */
- public void processReceivedDataFromQueue() {
- final Pair receivedData = receiveQueue.pollFirst();
- if (receivedData != null) {
- processReceivedData(receivedData.getLeft(), receivedData.getRight());
- }
- }
-
- /**
- * Picks up a transaction hash and neighbor pair from reply queue. Calls
- * {@link replyToRequest} on the pair.
- */
- public void replyToRequestFromQueue() {
- final Pair receivedData = replyQueue.pollFirst();
- if (receivedData != null) {
- replyToRequest(receivedData.getLeft(), receivedData.getRight());
- }
- }
-
- /**
- * This is second step of incoming transaction processing. The newly received
- * and validated transactions are stored in {@link receiveQueue}. This function
- * picks up these transaction and stores them into the {@link Tangle} Database. The
- * transaction is then added to the broadcast queue, to be fruther spammed to the neighbors.
- */
- public void processReceivedData(TransactionViewModel receivedTransactionViewModel, Neighbor neighbor) {
-
- boolean stored = false;
-
- //store new transaction
- try {
- stored = receivedTransactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot());
- } catch (Exception e) {
- log.error("Error accessing persistence store.", e);
- neighbor.incInvalidTransactions();
- }
-
- //if new, then broadcast to all neighbors
- if (stored) {
- receivedTransactionViewModel.setArrivalTime(System.currentTimeMillis());
- try {
- transactionValidator.updateStatus(receivedTransactionViewModel);
- receivedTransactionViewModel.updateSender(neighbor.getAddress().toString());
- receivedTransactionViewModel.update(tangle, snapshotProvider.getInitialSnapshot(), "arrivalTime|sender");
- } catch (Exception e) {
- log.error("Error updating transactions.", e);
- }
- neighbor.incNewTransactions();
- broadcast(receivedTransactionViewModel);
- }
-
- }
-
- /**
- * This is second step of incoming transaction processing. The newly received
- * and validated transactions are stored in {@link receiveQueue}. This function
- * picks up these transaction and stores them into the {@link Tangle} Database. The
- * transaction is then added to the broadcast queue, to be fruther spammed to the neighbors.
- */
- public void replyToRequest(Hash requestedHash, Neighbor neighbor) {
-
- TransactionViewModel transactionViewModel = null;
- Hash transactionPointer;
-
- //retrieve requested transaction
- if (requestedHash.equals(Hash.NULL_HASH)) {
- //Random Tip Request
- try {
- if (transactionRequester.numberOfTransactionsToRequest() > 0
- && rnd.nextDouble() < configuration.getpReplyRandomTip()) {
- neighbor.incRandomTransactionRequests();
- transactionPointer = getRandomTipPointer();
- transactionViewModel = TransactionViewModel.fromHash(tangle, transactionPointer);
- } else {
- //no tx to request, so no random tip will be sent as a reply.
- return;
- }
- } catch (Exception e) {
- log.error("Error getting random tip.", e);
- }
- } else {
- //find requested trytes
- try {
- //transactionViewModel = TransactionViewModel.find(Arrays.copyOf(requestedHash.bytes(), TransactionRequester.REQUEST_HASH_SIZE));
- transactionViewModel = TransactionViewModel.fromHash(tangle, HashFactory.TRANSACTION.create(requestedHash.bytes(), 0, reqHashSize));
- //log.debug("Requested Hash: " + requestedHash + " \nFound: " + transactionViewModel.getHash());
- } catch (Exception e) {
- log.error("Error while searching for transaction.", e);
- }
- }
-
- if (transactionViewModel != null && transactionViewModel.getType() == TransactionViewModel.FILLED_SLOT) {
- //send trytes back to neighbor
- try {
- sendPacket(sendingPacket, transactionViewModel, neighbor);
-
- long digest = getBytesDigest(transactionViewModel.getBytes());
- synchronized (recentSeenBytes) {
- recentSeenBytes.put(digest, transactionViewModel.getHash());
- }
- } catch (Exception e) {
- log.error("Error fetching transaction to request.", e);
- }
- } else {
- //trytes not found
- if (!requestedHash.equals(Hash.NULL_HASH) && rnd.nextDouble() < configuration.getpPropagateRequest()) {
- //request is an actual transaction and missing in request queue add it.
- try {
- transactionRequester.requestTransaction(requestedHash, false);
-
- } catch (Exception e) {
- log.error("Error adding transaction to request.", e);
- }
-
- }
- }
-
- }
-
- private Hash getRandomTipPointer() throws Exception {
- Hash tip = rnd.nextDouble() < configuration.getpSendMilestone() ? latestMilestoneTracker.getLatestMilestoneHash() : tipsViewModel.getRandomSolidTipHash();
- return tip == null ? Hash.NULL_HASH : tip;
- }
-
- /**
- * Sends a Datagram to the neighbour. Also appends a random hash request
- * to the outgoing packet. Note that this is only used for UDP handling. For TCP
- * the outgoing packets are sent by {@link ReplicatorSinkProcessor}
- *
- * @param {@link DatagramPacket} sendingPacket the UDP payload buffer
- * @param {@link TransactionViewModel} transactionViewModel which should be sent.
- * @praram {@link Neighbor} the neighbor where this should be sent.
- *
- */
- public void sendPacket(DatagramPacket sendingPacket, TransactionViewModel transactionViewModel, Neighbor neighbor) throws Exception {
-
- //limit amount of sends per second
- long now = System.currentTimeMillis();
- if ((now - sendPacketsTimer.get()) > 1000L) {
- //reset counter every second
- sendPacketsCounter.set(0);
- sendPacketsTimer.set(now);
- }
- if (sendLimit >= 0 && sendPacketsCounter.get() > sendLimit) {
- //if exceeded limit - don't send
- //log.info("exceeded limit - don't send - {}",sendPacketsCounter.get());
- return;
- }
-
- synchronized (sendingPacket) {
- System.arraycopy(transactionViewModel.getBytes(), 0, sendingPacket.getData(), 0, TransactionViewModel.SIZE);
- Hash hash = transactionRequester.transactionToRequest(rnd.nextDouble() < configuration.getpSelectMilestoneChild());
- System.arraycopy(hash != null ? hash.bytes() : transactionViewModel.getHash().bytes(), 0,
- sendingPacket.getData(), TransactionViewModel.SIZE, reqHashSize);
- neighbor.send(sendingPacket);
- }
-
- sendPacketsCounter.getAndIncrement();
- }
-
- /**
- * Does the same as {@link #sendPacket(DatagramPacket, TransactionViewModel, Neighbor)} but defaults to using the
- * same internal {@link #sendingPacket} as all the other methods in this class, which allows external callers to
- * send packets that are in "sync" (sending is synchronized over the packet object) with the rest of the methods
- * used in this class.
- *
- * @param transactionViewModel the transaction that shall be sent
- * @param neighbor the neighbor that should receive the packet
- * @throws Exception if anything unexpected happens during the sending of the packet
- */
- public void sendPacket(TransactionViewModel transactionViewModel, Neighbor neighbor) throws Exception {
- sendPacket(sendingPacket, transactionViewModel, neighbor);
- }
-
- /**
- * This thread picks up a new transaction from the broadcast queue and
- * spams it to all of the neigbors. Sadly, this also includes the neigbor who
- * originally sent us the transaction. This could be improved in future.
- *
- */
- private Runnable spawnBroadcasterThread() {
- return () -> {
-
- log.info("Spawning Broadcaster Thread");
-
- while (!shuttingDown.get()) {
-
- try {
- final TransactionViewModel transactionViewModel = broadcastQueue.pollFirst();
- if (transactionViewModel != null) {
-
- for (final Neighbor neighbor : neighbors) {
- try {
- sendPacket(sendingPacket, transactionViewModel, neighbor);
- } catch (final Exception e) {
- // ignore
- }
- }
- }
- Thread.sleep(PAUSE_BETWEEN_TRANSACTIONS);
- } catch (final Exception e) {
- log.error("Broadcaster Thread Exception:", e);
- }
- }
- log.info("Shutting down Broadcaster Thread");
- };
- }
-
- /**
- * We send a tip request packet (transaction corresponding to the latest milestone)
- * to all of our neighbors periodically.
- */
- private Runnable spawnTipRequesterThread() {
- return () -> {
-
- log.info("Spawning Tips Requester Thread");
- long lastTime = 0;
- while (!shuttingDown.get()) {
-
- try {
- final TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(tangle, latestMilestoneTracker.getLatestMilestoneHash());
- System.arraycopy(transactionViewModel.getBytes(), 0, tipRequestingPacket.getData(), 0, TransactionViewModel.SIZE);
- System.arraycopy(transactionViewModel.getHash().bytes(), 0, tipRequestingPacket.getData(), TransactionViewModel.SIZE,
- reqHashSize);
- //Hash.SIZE_IN_BYTES);
-
- neighbors.forEach(n -> n.send(tipRequestingPacket));
-
- long now = System.currentTimeMillis();
- if ((now - lastTime) > 10000L) {
- lastTime = now;
- tangle.publish("rstat %d %d %d %d %d",
- getReceiveQueueSize(), getBroadcastQueueSize(),
- transactionRequester.numberOfTransactionsToRequest(), getReplyQueueSize(),
- TransactionViewModel.getNumberOfStoredTransactions(tangle));
- log.info("toProcess = {} , toBroadcast = {} , toRequest = {} , toReply = {} / totalTransactions = {}",
- getReceiveQueueSize(), getBroadcastQueueSize(),
- transactionRequester.numberOfTransactionsToRequest(), getReplyQueueSize(),
- TransactionViewModel.getNumberOfStoredTransactions(tangle));
- }
-
- Thread.sleep(5000);
- } catch (final Exception e) {
- log.error("Tips Requester Thread Exception:", e);
- }
- }
- log.info("Shutting down Requester Thread");
- };
- }
-
- private Runnable spawnProcessReceivedThread() {
- return () -> {
-
- log.info("Spawning Process Received Data Thread");
-
- while (!shuttingDown.get()) {
-
- try {
- processReceivedDataFromQueue();
- Thread.sleep(1);
- } catch (final Exception e) {
- log.error("Process Received Data Thread Exception:", e);
- }
- }
- log.info("Shutting down Process Received Data Thread");
- };
- }
-
- private Runnable spawnReplyToRequestThread() {
- return () -> {
-
- log.info("Spawning Reply To Request Thread");
-
- while (!shuttingDown.get()) {
-
- try {
- replyToRequestFromQueue();
- Thread.sleep(1);
- } catch (final Exception e) {
- log.error("Reply To Request Thread Exception:", e);
- }
- }
- log.info("Shutting down Reply To Request Thread");
- };
- }
-
-
- private static ConcurrentSkipListSet weightQueue() {
- return new ConcurrentSkipListSet<>((transaction1, transaction2) -> {
- if (transaction1.weightMagnitude == transaction2.weightMagnitude) {
- for (int i = Hash.SIZE_IN_BYTES; i-- > 0; ) {
- if (transaction1.getHash().bytes()[i] != transaction2.getHash().bytes()[i]) {
- return transaction2.getHash().bytes()[i] - transaction1.getHash().bytes()[i];
- }
- }
- return 0;
- }
- return transaction2.weightMagnitude - transaction1.weightMagnitude;
- });
- }
-
- //TODO generalize these weightQueues
- private static ConcurrentSkipListSet> weightQueueHashPair() {
- return new ConcurrentSkipListSet>((transaction1, transaction2) -> {
- Hash tx1 = transaction1.getLeft();
- Hash tx2 = transaction2.getLeft();
-
- for (int i = Hash.SIZE_IN_BYTES; i-- > 0; ) {
- if (tx1.bytes()[i] != tx2.bytes()[i]) {
- return tx2.bytes()[i] - tx1.bytes()[i];
- }
- }
- return 0;
-
- });
- }
-
- private static ConcurrentSkipListSet> weightQueueTxPair() {
- return new ConcurrentSkipListSet>((transaction1, transaction2) -> {
- TransactionViewModel tx1 = transaction1.getLeft();
- TransactionViewModel tx2 = transaction2.getLeft();
-
- if (tx1.weightMagnitude == tx2.weightMagnitude) {
- for (int i = Hash.SIZE_IN_BYTES; i-- > 0; ) {
- if (tx1.getHash().bytes()[i] != tx2.getHash().bytes()[i]) {
- return tx2.getHash().bytes()[i] - tx1.getHash().bytes()[i];
- }
- }
- return 0;
- }
- return tx2.weightMagnitude - tx1.weightMagnitude;
- });
- }
-
-
- public void broadcast(final TransactionViewModel transactionViewModel) {
- broadcastQueue.add(transactionViewModel);
- if (broadcastQueue.size() > BROADCAST_QUEUE_SIZE) {
- broadcastQueue.pollLast();
- }
- }
-
- public void shutdown() throws InterruptedException {
- shuttingDown.set(true);
- executor.awaitTermination(6, TimeUnit.SECONDS);
- }
-
- private long getBytesDigest(byte[] receivedData) {
- return recentSeenBytesHashFunction.hashBytes(receivedData, 0, TransactionViewModel.SIZE);
- }
-
- // helpers methods
-
- public boolean removeNeighbor(final URI uri, boolean isConfigured) {
- final Neighbor neighbor = newNeighbor(uri, isConfigured);
- if (uri.getScheme().equals("tcp")) {
- neighbors.stream().filter(n -> n instanceof TCPNeighbor)
- .map(n -> ((TCPNeighbor) n))
- .filter(n -> n.equals(neighbor))
- .forEach(TCPNeighbor::clear);
- }
- return neighbors.remove(neighbor);
- }
-
- public boolean addNeighbor(Neighbor neighbor) {
- return !getNeighbors().contains(neighbor) && getNeighbors().add(neighbor);
- }
-
- public boolean isUriValid(final URI uri) {
- if (uri != null) {
- if (uri.getScheme().equals("tcp") || uri.getScheme().equals("udp")) {
- if ((new InetSocketAddress(uri.getHost(), uri.getPort()).getAddress() != null)) {
- return true;
- }
- }
- log.error("'{}' is not a valid uri schema or resolvable address.", uri);
- return false;
- }
- log.error("Cannot read uri schema, please check neighbor config!");
- return false;
- }
-
- public Neighbor newNeighbor(final URI uri, boolean isConfigured) {
- if (isUriValid(uri)) {
- if (uri.getScheme().equals("tcp")) {
- return new TCPNeighbor(new InetSocketAddress(uri.getHost(), uri.getPort()), isConfigured);
- }
- if (uri.getScheme().equals("udp")) {
- return new UDPNeighbor(new InetSocketAddress(uri.getHost(), uri.getPort()), udpSocket, isConfigured);
- }
- }
- throw new RuntimeException(uri.toString());
- }
-
- public static Optional uri(final String uri) {
- try {
- return Optional.of(new URI(uri));
- } catch (URISyntaxException e) {
- log.error("Uri {} raised URI Syntax Exception", uri);
- }
- return Optional.empty();
- }
-
- private void parseNeighborsConfig() {
- configuration.getNeighbors().stream().distinct()
- .filter(s -> !s.isEmpty())
- .map(Node::uri).map(Optional::get)
- .filter(u -> isUriValid(u))
- .map(u -> newNeighbor(u, true))
- .peek(u -> {
- log.info("-> Adding neighbor : {} ", u.getAddress());
- tangle.publish("-> Adding Neighbor : %s", u.getAddress());
- }).forEach(neighbors::add);
- }
-
- public int queuedTransactionsSize() {
- return broadcastQueue.size();
- }
-
- public int howManyNeighbors() {
- return getNeighbors().size();
- }
-
- public List getNeighbors() {
- return neighbors;
- }
-
- public int getBroadcastQueueSize() {
- return broadcastQueue.size();
- }
-
- public int getReceiveQueueSize() {
- return receiveQueue.size();
- }
-
- public int getReplyQueueSize() {
- return replyQueue.size();
- }
-
- public class FIFOCache {
-
- private final int capacity;
- private final double dropRate;
- private LinkedHashMap map;
- private final SecureRandom rnd = new SecureRandom();
-
- public FIFOCache(int capacity, double dropRate) {
- this.capacity = capacity;
- this.dropRate = dropRate;
- this.map = new LinkedHashMap<>();
- }
-
- public V get(K key) {
- V value = this.map.get(key);
- if (value != null && (rnd.nextDouble() < this.dropRate)) {
- this.map.remove(key);
- return null;
- }
- return value;
- }
-
- public V put(K key, V value) {
- if (this.map.containsKey(key)) {
- return value;
- }
- if (this.map.size() >= this.capacity) {
- Iterator it = this.map.keySet().iterator();
- it.next();
- it.remove();
- }
- return this.map.put(key, value);
- }
- }
-
-}
diff --git a/src/main/java/com/iota/iri/network/TCPNeighbor.java b/src/main/java/com/iota/iri/network/TCPNeighbor.java
deleted file mode 100644
index d29eb7f828..0000000000
--- a/src/main/java/com/iota/iri/network/TCPNeighbor.java
+++ /dev/null
@@ -1,122 +0,0 @@
-package com.iota.iri.network;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.DatagramPacket;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.nio.ByteBuffer;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Created by paul on 4/15/17.
- */
-
- /**
- * This class Extends {@link Neighbor} base class with TCP specific functionality.
- * It keeps reference of Source and Sink while maintaining a sendQueue for keeping
- * outgoing requests.
- *
- */
-public class TCPNeighbor extends Neighbor {
- private static final Logger log = LoggerFactory.getLogger(Neighbor.class);
- private int tcpPort;
-
- private final ArrayBlockingQueue sendQueue = new ArrayBlockingQueue<>(10);
- private boolean stopped = false;
-
- public TCPNeighbor(InetSocketAddress address, boolean isConfigured) {
- super(address, isConfigured);
- this.tcpPort = address.getPort();
- }
-
- private Socket source = null;
-
- public Socket getSource() {
- return source;
- }
-
- public void clear() {
- setSource(null);
- setSink(null);
- this.stopped = true;
- }
-
- public boolean isStopped() {
- return stopped;
- }
-
- public void setSource(Socket source) {
- if (source == null) {
- if (this.source != null && !this.source.isClosed()) {
- try {
- this.source.close();
- log.info("Source {} closed", this.getHostAddress());
- } catch (IOException e) {
- log.error("Source {} close failure", this.getHostAddress(), e);
- }
- }
- }
- this.source = source;
- }
-
- private Socket sink = null;
-
- public Socket getSink() {
- return sink;
- }
-
- public void setSink(Socket sink) {
- if (sink == null) {
- if (this.sink != null && !this.sink.isClosed()) {
- try {
- this.sink.close();
- log.info("Sink {} closed", this.getHostAddress());
- } catch (IOException e) {
- log.error("Sink {} close failure: {}", this.getHostAddress(), e.toString());
- }
- }
- }
- this.sink = sink;
- }
-
- /**
- * This is a non-blocking write and that makes it necessary to make a defensive copy of the sent data.
- *
- * @param packet the data to be queued for sending.
- */
- @Override
- public void send(DatagramPacket packet) {
- synchronized (sendQueue) {
- if (sendQueue.remainingCapacity() == 0) {
- sendQueue.poll();
- log.debug("Sendqueue full...dropped 1 tx");
- }
- byte[] bytes = packet.getData().clone();
- sendQueue.add(ByteBuffer.wrap(bytes));
- }
-
- }
-
- @Override
- public int getPort() {
- return tcpPort;
- }
-
- @Override
- public String connectionType() {
- return "tcp";
- }
-
- public void setTcpPort(int tcpPort) {
- this.tcpPort = tcpPort;
- }
-
- public ByteBuffer getNextMessage() throws InterruptedException {
- return (this.sendQueue.poll(10000, TimeUnit.MILLISECONDS));
- }
-
-}
diff --git a/src/main/java/com/iota/iri/network/TipsRequester.java b/src/main/java/com/iota/iri/network/TipsRequester.java
new file mode 100644
index 0000000000..3233c34fef
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/TipsRequester.java
@@ -0,0 +1,24 @@
+package com.iota.iri.network;
+
+/**
+ * The {@link TipsRequester} requests tips from all neighbors in a given interval.
+ */
+public interface TipsRequester {
+
+ /**
+ * Issues random tip requests to all connected neighbors.
+ */
+ void requestTips();
+
+ /**
+ * Starts the background worker that automatically calls {@link #requestTips()} periodically to request
+ * tips from neighbors.
+ */
+ void start();
+
+ /**
+ * Stops the background worker that requests tips from the neighbors.
+ */
+ void shutdown();
+
+}
diff --git a/src/main/java/com/iota/iri/network/UDPNeighbor.java b/src/main/java/com/iota/iri/network/UDPNeighbor.java
deleted file mode 100644
index c86ff72131..0000000000
--- a/src/main/java/com/iota/iri/network/UDPNeighbor.java
+++ /dev/null
@@ -1,56 +0,0 @@
-package com.iota.iri.network;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.DatagramPacket;
-import java.net.DatagramSocket;
-import java.net.InetSocketAddress;
-
-/**
- * Created by paul on 4/15/17.
- */
-
- /**
- * This class Extends {@link Neighbor} base class with UDP specific functionality.
- * It keeps reference of socket and doesnt maintains any queue for UDP outgoing packets.
- *
- */
-public class UDPNeighbor extends Neighbor {
-
- private static final Logger log = LoggerFactory.getLogger(UDPNeighbor.class);
-
- private final DatagramSocket socket;
-
- UDPNeighbor(final InetSocketAddress address, final DatagramSocket socket, final boolean isConfigured) {
- super(address, isConfigured);
- this.socket = socket;
- }
-
- /**
- * This is a blocking write and it is not necessary to copy the sent data.
- *
- * @param packet the packet to be sent immediately.
- */
- @Override
- public void send(DatagramPacket packet) {
- try {
- packet.setSocketAddress(getAddress());
- socket.send(packet);
- incSentTransactions();
- } catch (final Exception e) {
- log.error("Error sending UDP packet to [{}]: {}", getAddress(), e.toString());
- }
- }
-
- @Override
- public int getPort() {
- return getAddress().getPort();
- }
-
- @Override
- public String connectionType() {
- return "udp";
- }
-
-}
\ No newline at end of file
diff --git a/src/main/java/com/iota/iri/network/UDPReceiver.java b/src/main/java/com/iota/iri/network/UDPReceiver.java
deleted file mode 100644
index d2b57f637b..0000000000
--- a/src/main/java/com/iota/iri/network/UDPReceiver.java
+++ /dev/null
@@ -1,128 +0,0 @@
-package com.iota.iri.network;
-
-import com.iota.iri.conf.NodeConfig;
-import com.iota.iri.crypto.Sponge;
-import com.iota.iri.crypto.SpongeFactory;
-import com.iota.iri.model.Hash;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.DatagramPacket;
-import java.net.DatagramSocket;
-import java.net.SocketAddress;
-import java.util.Arrays;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Created by paul on 4/16/17.
- */
-public class UDPReceiver {
- private static final Logger log = LoggerFactory.getLogger(UDPReceiver.class);
-
- private final DatagramPacket receivingPacket;
-
- private final AtomicBoolean shuttingDown = new AtomicBoolean(false);
- private final int port;
- private final Node node;
- private final int packetSize;
-
- private DatagramSocket socket;
-
- private final int PROCESSOR_THREADS = Math.max(1, Runtime.getRuntime().availableProcessors() * 4 );
-
- private final ExecutorService processor = new ThreadPoolExecutor(PROCESSOR_THREADS, PROCESSOR_THREADS, 5000L,
- TimeUnit.MILLISECONDS, new ArrayBlockingQueue(PROCESSOR_THREADS, true),
- new ThreadPoolExecutor.AbortPolicy());
-
- private Thread receivingThread;
-
- public UDPReceiver(Node node, NodeConfig config) {
- this.node = node;
- this.port = config.getUdpReceiverPort();
- this.packetSize = config.getTransactionPacketSize();
- this.receivingPacket = new DatagramPacket(new byte[packetSize], packetSize);
- }
-
- public void init() throws Exception {
-
- socket = new DatagramSocket(port);
- node.setUDPSocket(socket);
- log.info("UDP replicator is accepting connections on udp port " + port);
-
- receivingThread = new Thread(spawnReceiverThread(), "UDP receiving thread");
- receivingThread.start();
- }
-
- private Runnable spawnReceiverThread() {
- return () -> {
-
-
- log.info("Spawning Receiver Thread");
-
- final Sponge curl = SpongeFactory.create(SpongeFactory.Mode.CURLP81);
- final byte[] requestedTransaction = new byte[Hash.SIZE_IN_BYTES];
-
- int processed = 0, dropped = 0;
-
- while (!shuttingDown.get()) {
-
- if (((processed + dropped) % 50000 == 49999)) {
- log.info("Receiver thread processed/dropped ratio: "+processed+"/"+dropped);
- processed = 0;
- dropped = 0;
- }
-
- try {
- socket.receive(receivingPacket);
-
- if (receivingPacket.getLength() == packetSize) {
-
- byte[] bytes = Arrays.copyOf(receivingPacket.getData(), receivingPacket.getLength());
- SocketAddress address = receivingPacket.getSocketAddress();
-
- processor.submit(() -> node.preProcessReceivedData(bytes, address, "udp"));
- processed++;
-
- Thread.yield();
-
- } else {
- receivingPacket.setLength(packetSize);
- }
- } catch (final RejectedExecutionException e) {
- //no free thread, packet dropped
- dropped++;
-
- } catch (final Exception e) {
- log.error("Receiver Thread Exception:", e);
- }
- }
- log.info("Shutting down spawning Receiver Thread");
- };
- }
-
- public void send(final DatagramPacket packet) {
- try {
- if (socket != null) {
- socket.send(packet);
- }
- } catch (IOException e) {
- // ignore
- }
- }
-
- public void shutdown() throws InterruptedException {
- shuttingDown.set(true);
- processor.shutdown();
- processor.awaitTermination(6, TimeUnit.SECONDS);
- try {
- receivingThread.join(6000L);
- }
- catch (Exception e) {
- // ignore
- }
- }
-
-}
diff --git a/src/main/java/com/iota/iri/network/impl/TipsRequesterImpl.java b/src/main/java/com/iota/iri/network/impl/TipsRequesterImpl.java
new file mode 100644
index 0000000000..f1e714293f
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/impl/TipsRequesterImpl.java
@@ -0,0 +1,104 @@
+package com.iota.iri.network.impl;
+
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.network.NeighborRouter;
+import com.iota.iri.network.TipsRequester;
+import com.iota.iri.network.TransactionRequester;
+import com.iota.iri.network.neighbor.Neighbor;
+import com.iota.iri.network.pipeline.TransactionProcessingPipeline;
+import com.iota.iri.service.milestone.LatestMilestoneTracker;
+import com.iota.iri.storage.Tangle;
+import com.iota.iri.utils.thread.DedicatedScheduledExecutorService;
+import com.iota.iri.utils.thread.SilentScheduledExecutorService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * The {@link TipsRequesterImpl} requests tips from all neighbors in a given interval.
+ */
+public class TipsRequesterImpl implements TipsRequester {
+
+ private static final Logger log = LoggerFactory.getLogger(TipsRequesterImpl.class);
+ private static final int REQUESTER_THREAD_INTERVAL = 5000;
+
+ private final SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService(
+ "Tips Requester", log);
+
+ private NeighborRouter neighborRouter;
+ private Tangle tangle;
+ private TransactionRequester txRequester;
+ private LatestMilestoneTracker latestMilestoneTracker;
+
+ private long lastIterationTime = 0;
+
+ /**
+ * Initializes the dependencies.
+ *
+ * @param neighborRouter the {@link NeighborRouter} to use
+ * @param tangle the {@link Tangle} database to load the latest milestone from
+ * @param latestMilestoneTracker the {@link LatestMilestoneTracker} to gets the latest milestone hash from
+ * @param txRequester the {@link TransactionRequester} to get the currently number of requested
+ * transactions from
+ */
+ public void init(NeighborRouter neighborRouter, Tangle tangle, LatestMilestoneTracker latestMilestoneTracker,
+ TransactionRequester txRequester) {
+ this.neighborRouter = neighborRouter;
+ this.tangle = tangle;
+ this.latestMilestoneTracker = latestMilestoneTracker;
+ this.txRequester = txRequester;
+ }
+
+ /**
+ * Starts a dedicated thread for the {@link TipsRequesterImpl} and then starts requesting of tips.
+ */
+ public void start() {
+ executorService.silentScheduleWithFixedDelay(this::requestTips, 0, REQUESTER_THREAD_INTERVAL,
+ TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * Starts the loop to indefinitely request tips from neighbors.
+ */
+ public void requestTips() {
+ try {
+ final TransactionViewModel msTVM = TransactionViewModel.fromHash(tangle,
+ latestMilestoneTracker.getLatestMilestoneHash());
+
+ if (msTVM.getBytes().length > 0) {
+ for (Neighbor neighbor : neighborRouter.getConnectedNeighbors().values()) {
+ if(Thread.currentThread().isInterrupted()){
+ return;
+ }
+ try {
+ neighborRouter.gossipTransactionTo(neighbor, msTVM, true);
+ } catch (Exception e) {
+ log.error("error while sending tip request to neighbor {}. reason: {}", neighbor.getHostAddressAndPort(), e.getMessage());
+ }
+ }
+ }
+
+ long now = System.currentTimeMillis();
+ if ((now - lastIterationTime) > 10_000L) {
+ lastIterationTime = now;
+ TransactionProcessingPipeline txPipeline = neighborRouter.getTransactionProcessingPipeline();
+ log.info(
+ "toProcess = {} , toBroadcast = {} , toRequest = {} , toReply = {} / totalTransactions = {}",
+ txPipeline.getReceivedStageQueue().size(), txPipeline.getBroadcastStageQueue().size(),
+ txRequester.numberOfTransactionsToRequest(), txPipeline.getReplyStageQueue().size(),
+ TransactionViewModel.getNumberOfStoredTransactions(tangle));
+ }
+ } catch (final Exception e) {
+ log.error("Tips Requester Thread Exception:", e);
+ }
+ }
+
+ /**
+ * Shut downs the {@link TipsRequesterImpl}.
+ */
+ public void shutdown() {
+ executorService.shutdownNow();
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java b/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java
index 18b403ad69..378628b450 100644
--- a/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java
+++ b/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java
@@ -4,10 +4,10 @@
import com.iota.iri.controllers.TipsViewModel;
import com.iota.iri.controllers.TransactionViewModel;
import com.iota.iri.model.Hash;
-import com.iota.iri.network.Neighbor;
-import com.iota.iri.network.Node;
import com.iota.iri.network.TransactionRequester;
import com.iota.iri.network.TransactionRequesterWorker;
+import com.iota.iri.network.NeighborRouter;
+import com.iota.iri.network.neighbor.Neighbor;
import com.iota.iri.storage.Tangle;
import com.iota.iri.utils.thread.DedicatedScheduledExecutorService;
import com.iota.iri.utils.thread.SilentScheduledExecutorService;
@@ -63,7 +63,7 @@ public class TransactionRequesterWorkerImpl implements TransactionRequesterWorke
/**
* The network manager of the node.
*/
- private Node node;
+ private NeighborRouter neighborRouter;
/**
* The manager of the background task.
@@ -87,16 +87,16 @@ public class TransactionRequesterWorkerImpl implements TransactionRequesterWorke
* @param tangle Tangle object which acts as a database interface
* @param transactionRequester manager for the requested transactions
* @param tipsViewModel the manager for the tips
- * @param node the network manager of the node
+ * @param neighborRouter the network manager of the node
* @return the initialized instance itself to allow chaining
*/
public TransactionRequesterWorkerImpl init(Tangle tangle, TransactionRequester transactionRequester,
- TipsViewModel tipsViewModel, Node node) {
+ TipsViewModel tipsViewModel, NeighborRouter neighborRouter) {
this.tangle = tangle;
this.transactionRequester = transactionRequester;
this.tipsViewModel = tipsViewModel;
- this.node = node;
+ this.neighborRouter = neighborRouter;
return this;
}
@@ -126,10 +126,10 @@ public boolean processRequestQueue() {
}
private void sendToNodes(TransactionViewModel transaction) {
- for (Neighbor neighbor : node.getNeighbors()) {
+ for (Neighbor neighbor : neighborRouter.getConnectedNeighbors().values()) {
try {
// automatically adds the hash of a requested transaction when sending a packet
- node.sendPacket(transaction, neighbor);
+ neighborRouter.gossipTransactionTo(neighbor, transaction);
} catch (Exception e) {
log.error("unexpected error while sending request to neighbour", e);
}
diff --git a/src/main/java/com/iota/iri/network/neighbor/Neighbor.java b/src/main/java/com/iota/iri/network/neighbor/Neighbor.java
new file mode 100644
index 0000000000..1a7a2cd613
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/neighbor/Neighbor.java
@@ -0,0 +1,126 @@
+package com.iota.iri.network.neighbor;
+
+import com.iota.iri.network.protocol.Handshake;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * A {@link Neighbor} is a peer to/from which messages are sent/read from.
+ */
+public interface Neighbor {
+
+ /**
+ * Defines not knowing yet what server socket port a neighbor is using.
+ */
+ int UNKNOWN_REMOTE_SERVER_SOCKET_PORT = -1;
+
+ /**
+ * Instructs the {@link Neighbor} to read from its source channel.
+ *
+ * @return the amount of bytes read
+ * @throws IOException thrown when reading from the source channel fails
+ */
+ int read() throws IOException;
+
+ /**
+ * Instructs the {@link Neighbor} to write to its destination channel.
+ *
+ * @return the amount of bytes written
+ * @throws IOException thrown when writing to the destination channel fails
+ */
+ int write() throws IOException;
+
+ /**
+ * Instructs the {@link Neighbor} to read from its source channel a {@link Handshake} packet.
+ *
+ * @return the {@link Handshake} object defining the state of the handshaking
+ * @throws IOException thrown when reading from the source channels fails
+ */
+ Handshake handshake() throws IOException;
+
+ /**
+ * Instructs the {@link Neighbor} to send the given {@link ByteBuffer} to its destination channel.
+ *
+ * @param buf the {@link ByteBuffer} containing the message to send
+ */
+ void send(ByteBuffer buf);
+
+ /**
+ * Gets the host address.
+ *
+ * @return the host address of the neighbor (always the IP address, never a domain name)
+ */
+ String getHostAddress();
+
+ /**
+ * Sets the domain name.
+ *
+ * @param domain the domain to set
+ */
+ void setDomain(String domain);
+
+ /**
+ * Gets the domain name.
+ *
+ * @return the domain name
+ */
+ String getDomain();
+
+ /**
+ * Gets the server socket port.
+ *
+ * @return the server socket port
+ */
+ int getRemoteServerSocketPort();
+
+ /**
+ * Sets the server socket port.
+ *
+ * @param port the port number to set
+ */
+ void setRemoteServerSocketPort(int port);
+
+ /**
+ * Gets the host and port which also defines the identity of the {@link Neighbor}.
+ *
+ * @return the host and port
+ */
+ String getHostAddressAndPort();
+
+ /**
+ * Gets the current state of the {@link Neighbor}.
+ *
+ * @return the state
+ */
+ NeighborState getState();
+
+ /**
+ * Sets the state of the {@link Neighbor}.
+ *
+ * @param state
+ */
+ void setState(NeighborState state);
+
+ /**
+ * Gets the metrics of the {@link Neighbor}.
+ *
+ * @return the metrics
+ */
+ NeighborMetrics getMetrics();
+
+ /**
+ * Sets the protocol version to use to communicate with this {@link Neighbor}.
+ *
+ * @param version the protocol version to use
+ */
+ void setProtocolVersion(int version);
+
+ /**
+ * The protocol version used to communicate with the {@link Neighbor}.
+ *
+ * @return the protocol version
+ */
+ int getProtocolVersion();
+
+}
diff --git a/src/main/java/com/iota/iri/network/neighbor/NeighborMetrics.java b/src/main/java/com/iota/iri/network/neighbor/NeighborMetrics.java
new file mode 100644
index 0000000000..735f0ec52c
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/neighbor/NeighborMetrics.java
@@ -0,0 +1,105 @@
+package com.iota.iri.network.neighbor;
+
+/**
+ * Defines the metrics of a {@link Neighbor}.
+ */
+public interface NeighborMetrics {
+
+ /**
+ * Returns the number of all transactions.
+ *
+ * @return the number of all transactions
+ */
+ long getAllTransactionsCount();
+
+ /**
+ * Increments the all transactions count.
+ *
+ * @return the number of all transactions
+ */
+ long incrAllTransactionsCount();
+
+ /**
+ * Gets the number of invalid transctions.
+ *
+ * @return the number of invalid transactions
+ */
+ long getInvalidTransactionsCount();
+
+ /**
+ * Increments the invalid transaction count.
+ *
+ * @return the number of invalid transactions
+ */
+ long incrInvalidTransactionsCount();
+
+ /**
+ * Gets the number of stale transactions.
+ *
+ * @return the number of stale transactions
+ */
+ long getStaleTransactionsCount();
+
+ /**
+ * Increments the number of stale transactions.
+ *
+ * @return the number of stale transactions
+ */
+ long incrStaleTransactionsCount();
+
+ /**
+ * Gets the number of new transactions.
+ *
+ * @return the number of new transactions
+ */
+ long getNewTransactionsCount();
+
+ /**
+ * Increments the new transactions count.
+ *
+ * @return the number of new transactions
+ */
+ long incrNewTransactionsCount();
+
+ /**
+ * Gets the number of random transactions.
+ *
+ * @return the number of random transactions
+ */
+ long getRandomTransactionRequestsCount();
+
+ /**
+ * Increments the random transactions count.
+ *
+ * @return the number of random transactions
+ */
+ long incrRandomTransactionRequestsCount();
+
+ /**
+ * Gets the number of send transactions.
+ *
+ * @return the number of send transactions
+ */
+ long getSentTransactionsCount();
+
+ /**
+ * Increments the send transactions count.
+ *
+ * @return the number of send transactions
+ */
+ long incrSentTransactionsCount();
+
+ /**
+ * Gets the number of packets dropped from the neighbor's send queue.
+ *
+ * @return the number of packets dropped from the neighbor's send queue
+ */
+ long getDroppedSendPacketsCount();
+
+ /**
+ * Increments the number of packets dropped from the neighbor's send queue.
+ *
+ * @return the number of packets dropped from the neighbor's send queue
+ */
+ long incrDroppedSendPacketsCount();
+}
diff --git a/src/main/java/com/iota/iri/network/neighbor/NeighborState.java b/src/main/java/com/iota/iri/network/neighbor/NeighborState.java
new file mode 100644
index 0000000000..065f29c82b
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/neighbor/NeighborState.java
@@ -0,0 +1,10 @@
+package com.iota.iri.network.neighbor;
+
+/**
+ * Defines the different states a {@link Neighbor} can be in.
+ */
+public enum NeighborState {
+ HANDSHAKING,
+ READY_FOR_MESSAGES,
+ MARKED_FOR_DISCONNECT,
+}
diff --git a/src/main/java/com/iota/iri/network/neighbor/impl/NeighborImpl.java b/src/main/java/com/iota/iri/network/neighbor/impl/NeighborImpl.java
new file mode 100644
index 0000000000..56a4621db4
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/neighbor/impl/NeighborImpl.java
@@ -0,0 +1,264 @@
+package com.iota.iri.network.neighbor.impl;
+
+import com.iota.iri.network.neighbor.Neighbor;
+import com.iota.iri.network.neighbor.NeighborMetrics;
+import com.iota.iri.network.neighbor.NeighborState;
+import com.iota.iri.network.pipeline.TransactionProcessingPipeline;
+import com.iota.iri.network.protocol.*;
+import com.iota.iri.network.protocol.message.MessageReader;
+import com.iota.iri.network.protocol.message.MessageReaderFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.*;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+
+/**
+ * {@link NeighborImpl} is an implementation of {@link Neighbor} using a {@link ByteChannel} as the source and
+ * destination of data.
+ *
+ * @param
+ */
+public class NeighborImpl implements Neighbor {
+
+ private static final Logger log = LoggerFactory.getLogger(NeighborImpl.class);
+
+ /**
+ * The current state whether the neighbor is parsing a header or reading a message.
+ */
+ private enum ReadState {
+ PARSE_HEADER, HANDLE_MESSAGE
+ }
+
+ // next stage in the processing of incoming data
+ private TransactionProcessingPipeline txPipeline;
+
+ // data to be written out to the neighbor
+ private BlockingQueue sendQueue = new ArrayBlockingQueue<>(100);
+ private ByteBuffer currentToWrite;
+
+ private NeighborState state = NeighborState.HANDSHAKING;
+ private ReadState readState = ReadState.PARSE_HEADER;
+
+ // ident
+ private String domain;
+ private String hostAddress;
+ private int remoteServerSocketPort;
+ private int protocolVersion;
+
+ // we need the reference to the channel in order to register it for
+ // write interests once messages to send are available.
+ private T channel;
+ private Selector selector;
+
+ private NeighborMetrics metrics = new NeighborMetricsImpl();
+ private MessageReader msgReader;
+ private Handshake handshake = new Handshake();
+
+ /**
+ * Creates a new {@link NeighborImpl} using the given channel.
+ *
+ * @param selector the {@link Selector} which is associated with passed in channel
+ * @param channel the channel to use to read and write bytes from/to.
+ * @param hostAddress the host address (IP address) of the neighbor
+ * @param remoteServerSocketPort the server socket port of the neighbor
+ * @param txPipeline the transaction processing pipeline to submit newly received transactions to
+ */
+ public NeighborImpl(Selector selector, T channel, String hostAddress, int remoteServerSocketPort,
+ TransactionProcessingPipeline txPipeline) {
+ this.hostAddress = hostAddress;
+ this.remoteServerSocketPort = remoteServerSocketPort;
+ this.selector = selector;
+ this.channel = channel;
+ this.txPipeline = txPipeline;
+ this.msgReader = MessageReaderFactory.create(ProtocolMessage.HEADER, ProtocolMessage.HEADER.getMaxLength());
+ }
+
+ @Override
+ public Handshake handshake() throws IOException {
+ if (read() == -1) {
+ handshake.setState(Handshake.State.FAILED);
+ }
+ return handshake;
+ }
+
+ @Override
+ public int read() throws IOException {
+ int bytesRead = msgReader.readMessage(channel);
+ if (!msgReader.ready()) {
+ return bytesRead;
+ }
+ ByteBuffer msg = msgReader.getMessage();
+ msg.flip();
+ switch (readState) {
+ case PARSE_HEADER:
+ if (!parseHeader(msg)) {
+ return -1;
+ }
+ // execute another read as we likely already have the message in the network buffer
+ return read();
+
+ case HANDLE_MESSAGE:
+ handleMessage(msg);
+ break;
+ default:
+ // do nothing
+ }
+ return bytesRead;
+ }
+
+ /**
+ * Parses the header in the given {@link ByteBuffer} and sets up the message reader to read the bytes for a message
+ * of the advertised type/size.
+ *
+ * @param msg the {@link ByteBuffer} containing the header
+ * @return whether the parsing was successful or not
+ */
+ private boolean parseHeader(ByteBuffer msg) {
+ ProtocolHeader protocolHeader;
+ try {
+ protocolHeader = Protocol.parseHeader(msg);
+ } catch (UnknownMessageTypeException e) {
+ log.error("unknown message type received from {}, closing connection", getHostAddressAndPort());
+ return false;
+ } catch (InvalidProtocolMessageLengthException e) {
+ log.error("{} is trying to send a message with an invalid length for the given message type, "
+ + "closing connection", getHostAddressAndPort());
+ return false;
+ }
+
+ // if we are handshaking, then we must have a handshaking packet as the initial packet
+ if (state == NeighborState.HANDSHAKING && protocolHeader.getMessageType() != ProtocolMessage.HANDSHAKE) {
+ log.error("neighbor {}'s initial packet is not a handshaking packet, closing connection",
+ getHostAddressAndPort());
+ return false;
+ }
+
+ // we got the header, now we want to read/handle the message
+ readState = ReadState.HANDLE_MESSAGE;
+ msgReader = MessageReaderFactory.create(protocolHeader.getMessageType(), protocolHeader.getMessageLength());
+ return true;
+ }
+
+ /**
+ * Relays the message to the component in charge of handling this message.
+ *
+ * @param msg the {@link ByteBuffer} containing the message (without header)
+ */
+ private void handleMessage(ByteBuffer msg) {
+ switch (msgReader.getMessageType()) {
+ case HANDSHAKE:
+ handshake = Handshake.fromByteBuffer(msg);
+ break;
+ case TRANSACTION_GOSSIP:
+ txPipeline.process(this, msg);
+ break;
+ default:
+ // do nothing
+ }
+ // reset
+ readState = ReadState.PARSE_HEADER;
+ msgReader = MessageReaderFactory.create(ProtocolMessage.HEADER, ProtocolMessage.HEADER.getMaxLength());
+ }
+
+ @Override
+ public int write() throws IOException {
+ // previous message wasn't fully sent yet
+ if (currentToWrite != null) {
+ return writeMsg();
+ }
+
+ currentToWrite = sendQueue.poll();
+ if (currentToWrite == null) {
+ return 0;
+ }
+ return writeMsg();
+ }
+
+ private int writeMsg() throws IOException {
+ int written = channel.write(currentToWrite);
+ if (!currentToWrite.hasRemaining()) {
+ currentToWrite = null;
+ }
+ return written;
+ }
+
+ @Override
+ public void send(ByteBuffer buf) {
+ // re-register write interest
+ SelectionKey key = channel.keyFor(selector);
+ if (key != null && key.isValid() && (key.interestOps() & SelectionKey.OP_WRITE) == 0) {
+ key.interestOps(SelectionKey.OP_READ | SelectionKey.OP_WRITE);
+ selector.wakeup();
+ }
+
+ if (!sendQueue.offer(buf)) {
+ metrics.incrDroppedSendPacketsCount();
+ }
+ }
+
+ @Override
+ public String getHostAddressAndPort() {
+ if (remoteServerSocketPort == Neighbor.UNKNOWN_REMOTE_SERVER_SOCKET_PORT) {
+ return hostAddress;
+ }
+ return String.format("%s:%d", hostAddress, remoteServerSocketPort);
+ }
+
+ @Override
+ public String getHostAddress() {
+ return hostAddress;
+ }
+
+ @Override
+ public void setDomain(String domain) {
+ this.domain = domain;
+ }
+
+ @Override
+ public String getDomain() {
+ return domain;
+ }
+
+ @Override
+ public int getRemoteServerSocketPort() {
+ return remoteServerSocketPort;
+ }
+
+ @Override
+ public void setRemoteServerSocketPort(int port) {
+ remoteServerSocketPort = port;
+ }
+
+ @Override
+ public NeighborState getState() {
+ return state;
+ }
+
+ @Override
+ public void setState(NeighborState state) {
+ if (this.state == NeighborState.MARKED_FOR_DISCONNECT) {
+ return;
+ }
+ this.state = state;
+ }
+
+ @Override
+ public NeighborMetrics getMetrics() {
+ return metrics;
+ }
+
+ @Override
+ public void setProtocolVersion(int protocolVersion) {
+ this.protocolVersion = protocolVersion;
+ }
+
+ @Override
+ public int getProtocolVersion() {
+ return protocolVersion;
+ }
+
+}
diff --git a/src/main/java/com/iota/iri/network/neighbor/impl/NeighborMetricsImpl.java b/src/main/java/com/iota/iri/network/neighbor/impl/NeighborMetricsImpl.java
new file mode 100644
index 0000000000..9b5c71f307
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/neighbor/impl/NeighborMetricsImpl.java
@@ -0,0 +1,89 @@
+package com.iota.iri.network.neighbor.impl;
+
+import com.iota.iri.network.neighbor.NeighborMetrics;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Implements {@link NeighborMetrics} using {@link AtomicLong}s.
+ */
+public class NeighborMetricsImpl implements NeighborMetrics {
+
+ private AtomicLong allTxsCount = new AtomicLong();
+ private AtomicLong invalidTxsCount = new AtomicLong();
+ private AtomicLong staleTxsCount = new AtomicLong();
+ private AtomicLong randomTxsCount = new AtomicLong();
+ private AtomicLong sentTxsCount = new AtomicLong();
+ private AtomicLong newTxsCount = new AtomicLong();
+ private AtomicLong droppedSendPacketsCount = new AtomicLong();
+
+ @Override
+ public long getAllTransactionsCount() {
+ return allTxsCount.get();
+ }
+
+ @Override
+ public long incrAllTransactionsCount() {
+ return allTxsCount.incrementAndGet();
+ }
+
+ @Override
+ public long getInvalidTransactionsCount() {
+ return invalidTxsCount.get();
+ }
+
+ @Override
+ public long incrInvalidTransactionsCount() {
+ return invalidTxsCount.incrementAndGet();
+ }
+
+ @Override
+ public long getStaleTransactionsCount() {
+ return staleTxsCount.get();
+ }
+
+ @Override
+ public long incrStaleTransactionsCount() {
+ return staleTxsCount.incrementAndGet();
+ }
+
+ @Override
+ public long getNewTransactionsCount() {
+ return newTxsCount.get();
+ }
+
+ @Override
+ public long incrNewTransactionsCount() {
+ return newTxsCount.incrementAndGet();
+ }
+
+ @Override
+ public long getRandomTransactionRequestsCount() {
+ return randomTxsCount.get();
+ }
+
+ @Override
+ public long incrRandomTransactionRequestsCount() {
+ return randomTxsCount.incrementAndGet();
+ }
+
+ @Override
+ public long getSentTransactionsCount() {
+ return sentTxsCount.get();
+ }
+
+ @Override
+ public long incrSentTransactionsCount() {
+ return sentTxsCount.incrementAndGet();
+ }
+
+ @Override
+ public long getDroppedSendPacketsCount() {
+ return droppedSendPacketsCount.get();
+ }
+
+ @Override
+ public long incrDroppedSendPacketsCount() {
+ return droppedSendPacketsCount.incrementAndGet();
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/BroadcastPayload.java b/src/main/java/com/iota/iri/network/pipeline/BroadcastPayload.java
new file mode 100644
index 0000000000..7da95d4ed6
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/BroadcastPayload.java
@@ -0,0 +1,48 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.network.neighbor.Neighbor;
+
+/**
+ * Defines a payload which gets submitted to the {@link BroadcastStage}.
+ */
+public class BroadcastPayload extends Payload {
+
+ private Neighbor originNeighbor;
+ private TransactionViewModel tvm;
+
+ /**
+ * Creates a new {@link BroadcastPayload} with the given neighbor and transaction.
+ *
+ * @param originNeighbor The neighbor from which the transaction originated from
+ * @param tvm The transaction
+ */
+ public BroadcastPayload(Neighbor originNeighbor, TransactionViewModel tvm) {
+ this.originNeighbor = originNeighbor;
+ this.tvm = tvm;
+ }
+
+ /**
+ * Gets the origin neighbor.
+ *
+ * @return the origin neighbor
+ */
+ public Neighbor getOriginNeighbor() {
+ return originNeighbor;
+ }
+
+ /**
+ * Gets the transaction
+ *
+ * @return the transaction
+ */
+ public TransactionViewModel getTransactionViewModel() {
+ return tvm;
+ }
+
+ @Override
+ public String toString() {
+ return "BroadcastPayload{" + "originNeighbor=" + originNeighbor.getHostAddressAndPort() + ", tvm="
+ + tvm.getHash() + '}';
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/BroadcastStage.java b/src/main/java/com/iota/iri/network/pipeline/BroadcastStage.java
new file mode 100644
index 0000000000..4f26765362
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/BroadcastStage.java
@@ -0,0 +1,60 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.network.NeighborRouter;
+import com.iota.iri.network.neighbor.Neighbor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+/**
+ * The {@link BroadcastStage} takes care of broadcasting newly received transactions to all neighbors except the
+ * neighbor from which the transaction originated from.
+ */
+public class BroadcastStage implements Stage {
+
+ private static final Logger log = LoggerFactory.getLogger(BroadcastStage.class);
+
+ private NeighborRouter neighborRouter;
+
+ /**
+ * Creates a new {@link BroadcastStage}.
+ *
+ * @param neighborRouter The {@link NeighborRouter} instance to use to broadcast
+ */
+ public BroadcastStage(NeighborRouter neighborRouter) {
+ this.neighborRouter = neighborRouter;
+ }
+
+ /**
+ * Extracts the transaction and then broadcasts it to all neighbors. If the transaction originated from a neighbor,
+ * it is not sent to that given neighbor.
+ *
+ * @param ctx the broadcast stage {@link ProcessingContext}
+ * @return the same ctx as passed in
+ */
+ @Override
+ public ProcessingContext process(ProcessingContext ctx) {
+ BroadcastPayload payload = (BroadcastPayload) ctx.getPayload();
+ Neighbor originNeighbor = payload.getOriginNeighbor();
+ TransactionViewModel tvm = payload.getTransactionViewModel();
+
+ // racy
+ Map currentlyConnectedNeighbors = neighborRouter.getConnectedNeighbors();
+ for (Neighbor neighbor : currentlyConnectedNeighbors.values()) {
+ // don't send back to origin neighbor
+ if (neighbor.equals(originNeighbor)) {
+ continue;
+ }
+ try {
+ neighborRouter.gossipTransactionTo(neighbor, tvm);
+ } catch (Exception e) {
+ log.error(e.getMessage());
+ }
+ }
+
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.FINISH);
+ return ctx;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/HashingPayload.java b/src/main/java/com/iota/iri/network/pipeline/HashingPayload.java
new file mode 100644
index 0000000000..0e96a3fc1f
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/HashingPayload.java
@@ -0,0 +1,43 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.crypto.batched.HashRequest;
+import com.iota.iri.model.Hash;
+import com.iota.iri.network.neighbor.Neighbor;
+
+/**
+ * Defines a payload which gets submitted to the {@link HashingStage}.
+ */
+public class HashingPayload extends ValidationPayload {
+
+ private HashRequest hashRequest;
+
+ /**
+ * Creates a new {@link HashingPayload}.
+ *
+ * @param neighbor The neighbor from which the transaction originated from
+ * @param txTrits The transaction trits
+ * @param txDigest The transaction bytes digest
+ * @param hashOfRequestedTx The hash of the requested transaction
+ */
+ public HashingPayload(Neighbor neighbor, byte[] txTrits, Long txDigest, Hash hashOfRequestedTx) {
+ super(neighbor, txTrits, null, txDigest, hashOfRequestedTx);
+ }
+
+ /**
+ * Gets the {@link HashRequest}.
+ *
+ * @return the {@link HashRequest}
+ */
+ public HashRequest getHashRequest() {
+ return hashRequest;
+ }
+
+ /**
+ * Sets the {@link HashRequest}.
+ *
+ * @param hashRequest the {@link HashRequest} to set
+ */
+ public void setHashRequest(HashRequest hashRequest) {
+ this.hashRequest = hashRequest;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/HashingStage.java b/src/main/java/com/iota/iri/network/pipeline/HashingStage.java
new file mode 100644
index 0000000000..3b2b44a57c
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/HashingStage.java
@@ -0,0 +1,36 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.crypto.batched.BatchedHasher;
+import com.iota.iri.crypto.batched.HashRequest;
+
+/**
+ * The {@link HashingStage} batches up transaction trits and then hashes them using a {@link BatchedHasher} in one go.
+ */
+public class HashingStage implements Stage {
+
+ private BatchedHasher batchedHasher;
+
+ /**
+ * Creates a new {@link HashingStage}.
+ *
+ * @param batchedHasher The {@link BatchedHasher} to use
+ */
+ public HashingStage(BatchedHasher batchedHasher) {
+ this.batchedHasher = batchedHasher;
+ }
+
+ /**
+ * Extracts the {@link HashRequest} from the context and submits it to the {@link BatchedHasher}. The
+ * {@link com.iota.iri.crypto.batched.HashRequest}'s callback must be setup to submit the result to the
+ * {@link ValidationStage}.
+ *
+ * @param ctx the hashing stage {@link ProcessingContext}
+ * @return the same ctx as passed in
+ */
+ @Override
+ public ProcessingContext process(ProcessingContext ctx) {
+ HashingPayload payload = (HashingPayload) ctx.getPayload();
+ batchedHasher.submitHashingRequest(payload.getHashRequest());
+ return ctx;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/MultiStagePayload.java b/src/main/java/com/iota/iri/network/pipeline/MultiStagePayload.java
new file mode 100644
index 0000000000..01b280f7b6
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/MultiStagePayload.java
@@ -0,0 +1,52 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.network.neighbor.Neighbor;
+
+/**
+ * A payload which contains processing contexts for two stages.
+ */
+public class MultiStagePayload extends Payload {
+
+ private ProcessingContext left;
+ private ProcessingContext right;
+
+ /**
+ * Creates a new {@link MultiStagePayload} with the given left and right assigned contexts.
+ *
+ * @param left the left assigned context
+ * @param right the right assigned context
+ */
+ public MultiStagePayload(ProcessingContext left, ProcessingContext right) {
+ this.left = left;
+ this.right = right;
+ }
+
+ /**
+ * Returns the left assigned context.
+ *
+ * @return the left assigned context
+ */
+ public ProcessingContext getLeft() {
+ return left;
+ }
+
+ /**
+ * Returns the right assigned context.
+ *
+ * @return the right assigned context
+ */
+ public ProcessingContext getRight() {
+ return right;
+ }
+
+ @Override
+ public Neighbor getOriginNeighbor() {
+ return left.getPayload().getOriginNeighbor();
+ }
+
+ @Override
+ public String toString() {
+ return "MultiStagePayload{" + "left=" + left.getPayload().toString() + ", right="
+ + right.getPayload().toString() + '}';
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/Payload.java b/src/main/java/com/iota/iri/network/pipeline/Payload.java
new file mode 100644
index 0000000000..28f58faba8
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/Payload.java
@@ -0,0 +1,17 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.network.neighbor.Neighbor;
+
+/**
+ * Defines a payload which is given to a {@link ProcessingContext} for processing within a {@link Stage}.
+ */
+public abstract class Payload {
+
+ /**
+ * Gets the origin neighbor from which a given transaction originated from.
+ * Can be null if the transaction did not originate from a neighbor.
+ *
+ * @return the origin neighbor
+ */
+ public abstract Neighbor getOriginNeighbor();
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/PreProcessPayload.java b/src/main/java/com/iota/iri/network/pipeline/PreProcessPayload.java
new file mode 100644
index 0000000000..8237e75411
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/PreProcessPayload.java
@@ -0,0 +1,55 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.network.neighbor.Neighbor;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Defines the payload which gets submitted to the {@link PreProcessStage}.
+ */
+public class PreProcessPayload extends Payload {
+
+ private Neighbor neighbor;
+ private ByteBuffer data;
+
+ /**
+ * Creates a new {@link PreProcessPayload}.
+ *
+ * @param neighbor The origin neighbor
+ * @param data The gossip transaction data
+ */
+ public PreProcessPayload(Neighbor neighbor, ByteBuffer data) {
+ this.neighbor = neighbor;
+ this.data = data;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Neighbor getOriginNeighbor() {
+ return neighbor;
+ }
+
+ /**
+ * Sets the {@link Neighbor}
+ *
+ * @param neighbor the {@link Neighbor}
+ */
+ public void setNeighbor(Neighbor neighbor) {
+ this.neighbor = neighbor;
+ }
+
+ /**
+ * Gets the transaction gossip data.
+ *
+ * @return the transaction gossip data
+ */
+ public ByteBuffer getData() {
+ return data;
+ }
+
+ @Override
+ public String toString() {
+ return "PreProcessPayload{" + "neighbor=" + neighbor.getHostAddressAndPort() + '}';
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/PreProcessStage.java b/src/main/java/com/iota/iri/network/pipeline/PreProcessStage.java
new file mode 100644
index 0000000000..b926f356e8
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/PreProcessStage.java
@@ -0,0 +1,93 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.model.Hash;
+import com.iota.iri.model.HashFactory;
+import com.iota.iri.network.FIFOCache;
+import com.iota.iri.network.NeighborRouter;
+import com.iota.iri.network.protocol.Protocol;
+import com.iota.iri.utils.Converter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+
+/**
+ * The {@link PreProcessStage} expands truncated transaction gossip payloads, computes the digest of the payload and
+ * converts the transaction to its trits representation.
+ */
+public class PreProcessStage implements Stage {
+
+ private static final Logger log = LoggerFactory.getLogger(PreProcessStage.class);
+ private FIFOCache recentlySeenBytesCache;
+
+ /**
+ * Creates a new {@link PreProcessStage}.
+ *
+ * @param recentlySeenBytesCache The cache to use for checking whether a transaction is known
+ */
+ public PreProcessStage(FIFOCache recentlySeenBytesCache) {
+ this.recentlySeenBytesCache = recentlySeenBytesCache;
+ }
+
+ /**
+ * Extracts the transaction gossip payload, expands it, computes the digest and then creates a new
+ * {@link ProcessingContext} to the appropriate stage. If the transaction is not known, the transaction payload is
+ * also converted to its trits representation.
+ *
+ * @param ctx the pre process stage {@link ProcessingContext}
+ * @return a {@link ProcessingContext} which either redirects to the {@link ReplyStage} or {@link HashingStage}
+ * depending on whether the transaction is known
+ */
+ @Override
+ public ProcessingContext process(ProcessingContext ctx) {
+ PreProcessPayload payload = (PreProcessPayload) ctx.getPayload();
+ ByteBuffer packetData = payload.getData();
+ byte[] data = packetData.array();
+
+ // expand received tx data
+ byte[] txDataBytes = Protocol.expandTx(data);
+ // copy requested tx hash
+ byte[] reqHashBytes = Protocol.extractRequestedTxHash(data);
+
+ // increment all txs count
+ payload.getOriginNeighbor().getMetrics().incrAllTransactionsCount();
+
+ // compute digest of tx bytes data
+ long txDigest = NeighborRouter.getTxCacheDigest(txDataBytes);
+
+ Hash receivedTxHash = recentlySeenBytesCache.get(txDigest);
+ Hash requestedHash = HashFactory.TRANSACTION.create(reqHashBytes, 0,
+ Protocol.GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH);
+
+ // log cache hit/miss ratio every 50k get()s
+ if (log.isDebugEnabled()) {
+ long hits = recentlySeenBytesCache.getCacheHits();
+ long misses = recentlySeenBytesCache.getCacheMisses();
+ if ((hits + misses) % 50000L == 0) {
+ log.debug("recently seen bytes cache hit/miss ratio: {}/{}", hits, misses);
+ recentlySeenBytesCache.resetCacheStats();
+ }
+ }
+
+ // received tx is known, therefore we can submit to the reply stage directly.
+ if (receivedTxHash != null) {
+ // reply with a random tip by setting the request hash to the null hash
+ requestedHash = requestedHash.equals(receivedTxHash) ? Hash.NULL_HASH : requestedHash;
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.REPLY);
+ ctx.setPayload(new ReplyPayload(payload.getOriginNeighbor(), requestedHash));
+ return ctx;
+ }
+
+ // convert tx byte data into trits representation once
+ byte[] txTrits = new byte[TransactionViewModel.TRINARY_SIZE];
+ Converter.getTrits(txDataBytes, txTrits);
+
+ // submit to hashing stage.
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.HASHING);
+ HashingPayload hashingStagePayload = new HashingPayload(payload.getOriginNeighbor(), txTrits, txDigest,
+ requestedHash);
+ ctx.setPayload(hashingStagePayload);
+ return ctx;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/ProcessingContext.java b/src/main/java/com/iota/iri/network/pipeline/ProcessingContext.java
new file mode 100644
index 0000000000..4630a6fc52
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/ProcessingContext.java
@@ -0,0 +1,67 @@
+package com.iota.iri.network.pipeline;
+
+/**
+ * A {@link ProcessingContext} defines a context within the {@link TransactionProcessingPipelineImpl} of processing a
+ * transaction. It holds the information to which stage to be submitted next and the associated payload.
+ */
+public class ProcessingContext {
+
+ private TransactionProcessingPipelineImpl.Stage nextStage;
+ private Payload payload;
+
+ /**
+ * Creates a new {@link ProcessingContext}.
+ *
+ * @param payload The payload
+ */
+ public ProcessingContext(Payload payload) {
+ this.payload = payload;
+ }
+
+ /**
+ * Creates a new {@link ProcessingContext}.
+ *
+ * @param nextStage The next stage
+ * @param payload The payload for the next stage
+ */
+ public ProcessingContext(TransactionProcessingPipelineImpl.Stage nextStage, Payload payload) {
+ this.nextStage = nextStage;
+ this.payload = payload;
+ }
+
+ /**
+ * Gets the payload.
+ *
+ * @return the payload
+ */
+ public Payload getPayload() {
+ return payload;
+ }
+
+ /**
+ * Sets the payload.
+ *
+ * @param payload the payload to set
+ */
+ public void setPayload(Payload payload) {
+ this.payload = payload;
+ }
+
+ /**
+ * Gets the next stage.
+ *
+ * @return the next stage to submit this {@link ProcessingContext} to
+ */
+ public TransactionProcessingPipelineImpl.Stage getNextStage() {
+ return nextStage;
+ }
+
+ /**
+ * Sets the next stage.
+ *
+ * @param nextStage the stage to set as the next stage
+ */
+ public void setNextStage(TransactionProcessingPipelineImpl.Stage nextStage) {
+ this.nextStage = nextStage;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/ReceivedPayload.java b/src/main/java/com/iota/iri/network/pipeline/ReceivedPayload.java
new file mode 100644
index 0000000000..db9b9a58d5
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/ReceivedPayload.java
@@ -0,0 +1,45 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.network.neighbor.Neighbor;
+
+/**
+ * Defines a payload which gets submitted to the {@link ReceivedStage}.
+ */
+public class ReceivedPayload extends Payload {
+
+ private Neighbor neighbor;
+ private TransactionViewModel tvm;
+
+ /**
+ * Creates a new {@link ReceivedPayload}.
+ *
+ * @param neighbor the {@link Neighbor} from which the transaction originated from (can be null)
+ * @param tvm the transaction
+ */
+ public ReceivedPayload(Neighbor neighbor, TransactionViewModel tvm) {
+ this.neighbor = neighbor;
+ this.tvm = tvm;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Neighbor getOriginNeighbor() {
+ return neighbor;
+ }
+
+ /**
+ * Gets the transaction.
+ *
+ * @return the transaction
+ */
+ public TransactionViewModel getTransactionViewModel() {
+ return tvm;
+ }
+
+ @Override
+ public String toString() {
+ return "ReceivedPayload{" + "neighbor=" + neighbor.getHostAddressAndPort() + ", tvm=" + tvm.getHash() + '}';
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/ReceivedStage.java b/src/main/java/com/iota/iri/network/pipeline/ReceivedStage.java
new file mode 100644
index 0000000000..68042171a5
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/ReceivedStage.java
@@ -0,0 +1,83 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.TransactionValidator;
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.network.neighbor.Neighbor;
+import com.iota.iri.service.snapshot.SnapshotProvider;
+import com.iota.iri.storage.Tangle;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The {@link ReceivedStage} stores the given transaction in the database, updates the arrival time and sender and then
+ * submits to the {@link BroadcastStage}.
+ */
+public class ReceivedStage implements Stage {
+
+ private static final Logger log = LoggerFactory.getLogger(ReceivedStage.class);
+
+ private Tangle tangle;
+ private TransactionValidator txValidator;
+ private SnapshotProvider snapshotProvider;
+
+ /**
+ * Creates a new {@link ReceivedStage}.
+ *
+ * @param tangle The {@link Tangle} database used to store/update the transaction
+ * @param txValidator The {@link TransactionValidator} used to store/update the transaction
+ * @param snapshotProvider The {@link SnapshotProvider} used to store/update the transaction
+ */
+ public ReceivedStage(Tangle tangle, TransactionValidator txValidator, SnapshotProvider snapshotProvider) {
+ this.txValidator = txValidator;
+ this.tangle = tangle;
+ this.snapshotProvider = snapshotProvider;
+ }
+
+ /**
+ * Stores the given transaction in the database, updates it status
+ * ({@link TransactionValidator#updateStatus(TransactionViewModel)}) and updates the sender.
+ *
+ * @param ctx the received stage {@link ProcessingContext}
+ * @return a {@link ProcessingContext} which redirects to the {@link BroadcastStage}
+ */
+ @Override
+ public ProcessingContext process(ProcessingContext ctx) {
+ ReceivedPayload payload = (ReceivedPayload) ctx.getPayload();
+ Neighbor originNeighbor = payload.getOriginNeighbor();
+ TransactionViewModel tvm = payload.getTransactionViewModel();
+
+ boolean stored;
+ try {
+ stored = tvm.store(tangle, snapshotProvider.getInitialSnapshot());
+ } catch (Exception e) {
+ log.error("error persisting newly received tx", e);
+ if (originNeighbor != null) {
+ originNeighbor.getMetrics().incrInvalidTransactionsCount();
+ }
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.ABORT);
+ return ctx;
+ }
+
+ if (stored) {
+ tvm.setArrivalTime(System.currentTimeMillis());
+ try {
+ txValidator.updateStatus(tvm);
+ // neighbor might be null because tx came from a broadcastTransaction command
+ if (originNeighbor != null) {
+ tvm.updateSender(originNeighbor.getHostAddressAndPort());
+ }
+ tvm.update(tangle, snapshotProvider.getInitialSnapshot(), "arrivalTime|sender");
+ } catch (Exception e) {
+ log.error("error updating newly received tx", e);
+ }
+ if (originNeighbor != null) {
+ originNeighbor.getMetrics().incrNewTransactionsCount();
+ }
+ }
+
+ // broadcast the newly saved tx to the other neighbors
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.BROADCAST);
+ ctx.setPayload(new BroadcastPayload(originNeighbor, tvm));
+ return ctx;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/ReplyPayload.java b/src/main/java/com/iota/iri/network/pipeline/ReplyPayload.java
new file mode 100644
index 0000000000..fcbe022e71
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/ReplyPayload.java
@@ -0,0 +1,55 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.model.Hash;
+import com.iota.iri.network.neighbor.Neighbor;
+
+/**
+ * Defines a payload which gets submitted to the {@link ReplyStage}.
+ */
+public class ReplyPayload extends Payload {
+
+ private Neighbor neighbor;
+ private Hash hashOfRequestedTx;
+
+ /**
+ * Creates a new {@link ReplyStage}.
+ *
+ * @param neighbor the neighbor from which the request came from
+ * @param hashOfRequestedTx the hash of the requested transaction
+ */
+ public ReplyPayload(Neighbor neighbor, Hash hashOfRequestedTx) {
+ this.neighbor = neighbor;
+ this.hashOfRequestedTx = hashOfRequestedTx;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Neighbor getOriginNeighbor() {
+ return neighbor;
+ }
+
+ /**
+ * Sets the {@link Neighbor}.
+ *
+ * @param neighbor the neighbor to set
+ */
+ public void setNeighbor(Neighbor neighbor) {
+ this.neighbor = neighbor;
+ }
+
+ /**
+ * Gets the hash of the requested transaction.
+ *
+ * @return the hash of the requested transaction
+ */
+ public Hash getHashOfRequestedTx() {
+ return hashOfRequestedTx;
+ }
+
+ @Override
+ public String toString() {
+ return "ReplyPayload{" + "neighbor=" + neighbor.getHostAddressAndPort() + ", hashOfRequestedTx="
+ + hashOfRequestedTx.toString() + '}';
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/ReplyStage.java b/src/main/java/com/iota/iri/network/pipeline/ReplyStage.java
new file mode 100644
index 0000000000..a8eca4ee82
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/ReplyStage.java
@@ -0,0 +1,167 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.conf.NodeConfig;
+import com.iota.iri.controllers.TipsViewModel;
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.model.Hash;
+import com.iota.iri.model.HashFactory;
+import com.iota.iri.network.FIFOCache;
+import com.iota.iri.network.NeighborRouter;
+import com.iota.iri.network.neighbor.Neighbor;
+import com.iota.iri.network.protocol.Protocol;
+import com.iota.iri.service.milestone.LatestMilestoneTracker;
+import com.iota.iri.service.snapshot.SnapshotProvider;
+import com.iota.iri.storage.Tangle;
+
+import java.security.SecureRandom;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The {@link ReplyStage} replies to the neighbor which supplied the given hash of the requested transaction. If a
+ * {@link Hash#NULL_HASH} is supplied, then a random tip is replied back to the neighbor. A neighbor indicates to
+ * receive a random tip, when the requested transaction hash is the same as the transaction hash of the transaction in
+ * the gossip payload.
+ */
+public class ReplyStage implements Stage {
+
+ private static final Logger log = LoggerFactory.getLogger(ReplyStage.class);
+
+ private NeighborRouter neighborRouter;
+ private Tangle tangle;
+ private NodeConfig config;
+ private TipsViewModel tipsViewModel;
+ private LatestMilestoneTracker latestMilestoneTracker;
+ private SnapshotProvider snapshotProvider;
+ private FIFOCache recentlySeenBytesCache;
+ private SecureRandom rnd = new SecureRandom();
+
+ /**
+ * Creates a new {@link ReplyStage}.
+ *
+ * @param neighborRouter the {@link NeighborRouter} to use to send the requested transaction
+ * @param config the {@link NodeConfig}
+ * @param tangle the {@link Tangle} database to load the request transaction from
+ * @param tipsViewModel the {@link TipsViewModel} to load the random tips from
+ * @param latestMilestoneTracker the {@link LatestMilestoneTracker} to load the latest milestone from
+ * @param snapshotProvider the {@link SnapshotProvider} to check the latest solid milestone from
+ * @param recentlySeenBytesCache the {@link FIFOCache} to use to cache the replied transaction
+ * @param rnd the {@link SecureRandom} used to get random values to randomize chances for not
+ * replying at all or not requesting a not stored requested transaction from neighbors
+ */
+ public ReplyStage(NeighborRouter neighborRouter, NodeConfig config, Tangle tangle, TipsViewModel tipsViewModel,
+ LatestMilestoneTracker latestMilestoneTracker, SnapshotProvider snapshotProvider,
+ FIFOCache recentlySeenBytesCache, SecureRandom rnd) {
+ this.neighborRouter = neighborRouter;
+ this.config = config;
+ this.tangle = tangle;
+ this.tipsViewModel = tipsViewModel;
+ this.latestMilestoneTracker = latestMilestoneTracker;
+ this.snapshotProvider = snapshotProvider;
+ this.recentlySeenBytesCache = recentlySeenBytesCache;
+ this.rnd = rnd;
+ }
+
+ /**
+ * Creates a new {@link ReplyStage}.
+ *
+ * @param neighborRouter the {@link NeighborRouter} to use to send the requested transaction
+ * @param config the {@link NodeConfig}
+ * @param tangle the {@link Tangle} database to load the request transaction from
+ * @param tipsViewModel the {@link TipsViewModel} to load the random tips from
+ * @param latestMilestoneTracker the {@link LatestMilestoneTracker} to load the latest milestone from
+ * @param snapshotProvider the {@link SnapshotProvider} to check the latest solid milestone from
+ * @param recentlySeenBytesCache the {@link FIFOCache} to use to cache the replied transaction
+ */
+ public ReplyStage(NeighborRouter neighborRouter, NodeConfig config, Tangle tangle, TipsViewModel tipsViewModel,
+ LatestMilestoneTracker latestMilestoneTracker, SnapshotProvider snapshotProvider,
+ FIFOCache recentlySeenBytesCache) {
+ this.neighborRouter = neighborRouter;
+ this.config = config;
+ this.tangle = tangle;
+ this.tipsViewModel = tipsViewModel;
+ this.latestMilestoneTracker = latestMilestoneTracker;
+ this.snapshotProvider = snapshotProvider;
+ this.recentlySeenBytesCache = recentlySeenBytesCache;
+ }
+
+ /**
+ * Loads the requested transaction from the database and replies it back to the neighbor who requested it. If the
+ * {@link Hash#NULL_HASH} is supplied, then a random tip is replied with.
+ *
+ * @param ctx the reply stage {@link ProcessingContext}
+ * @return the same {@link ProcessingContext} as passed in
+ */
+ @Override
+ public ProcessingContext process(ProcessingContext ctx) {
+ ReplyPayload payload = (ReplyPayload) ctx.getPayload();
+ Neighbor neighbor = payload.getOriginNeighbor();
+ Hash hashOfRequestedTx = payload.getHashOfRequestedTx();
+
+ TransactionViewModel tvm = null;
+
+ if (hashOfRequestedTx.equals(Hash.NULL_HASH)) {
+ try {
+ // don't reply to random tip requests if we are synchronized with a max delta of one
+ // to the newest milestone
+ if (snapshotProvider.getLatestSnapshot().getIndex() >= latestMilestoneTracker.getLatestMilestoneIndex()
+ - 1) {
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.FINISH);
+ return ctx;
+ }
+ // retrieve random tx
+ neighbor.getMetrics().incrRandomTransactionRequestsCount();
+ Hash transactionPointer = getRandomTipPointer();
+ tvm = TransactionViewModel.fromHash(tangle, transactionPointer);
+ } catch (Exception e) {
+ log.error("error loading random tip for reply", e);
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.ABORT);
+ return ctx;
+ }
+ } else {
+ try {
+ // retrieve requested tx
+ tvm = TransactionViewModel.fromHash(tangle, HashFactory.TRANSACTION.create(hashOfRequestedTx.bytes(), 0,
+ Protocol.GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH));
+ } catch (Exception e) {
+ log.error("error while searching for explicitly asked for tx", e);
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.ABORT);
+ return ctx;
+ }
+ }
+
+ if (tvm != null && tvm.getType() == TransactionViewModel.FILLED_SLOT) {
+ try {
+ // send the requested tx data to the requester
+ neighborRouter.gossipTransactionTo(neighbor, tvm);
+ // cache the replied with tx
+ long txDigest = NeighborRouter.getTxCacheDigest(tvm.getBytes());
+ recentlySeenBytesCache.put(txDigest, tvm.getHash());
+ } catch (Exception e) {
+ log.error("error adding reply tx to neighbor's send queue", e);
+ }
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.ABORT);
+ return ctx;
+ }
+
+ // we didn't have the requested transaction (random or explicit) from the neighbor but we will immediately reply
+ // with the latest known milestone and a needed transaction hash, to keep up the ping-pong
+ try {
+ final TransactionViewModel msTVM = TransactionViewModel.fromHash(tangle,
+ latestMilestoneTracker.getLatestMilestoneHash());
+ neighborRouter.gossipTransactionTo(neighbor, msTVM, false);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.FINISH);
+ return ctx;
+ }
+
+ private Hash getRandomTipPointer() {
+ Hash tip = rnd.nextDouble() < config.getpSendMilestone() ? latestMilestoneTracker.getLatestMilestoneHash()
+ : tipsViewModel.getRandomSolidTipHash();
+ return tip == null ? Hash.NULL_HASH : tip;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/Stage.java b/src/main/java/com/iota/iri/network/pipeline/Stage.java
new file mode 100644
index 0000000000..b463fdd39d
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/Stage.java
@@ -0,0 +1,16 @@
+package com.iota.iri.network.pipeline;
+
+/**
+ * Defines a stage in the {@link TransactionProcessingPipelineImpl} which processes a {@link ProcessingContext} and its
+ * payload and then mutates the given context with the information for the next stage.
+ */
+public interface Stage {
+
+ /**
+ * Processes the given context and adjusts it with the payloads needed for the next stage (if any).
+ *
+ * @param ctx the context to process
+ * @return the mutated context (usually the same context as the passed in context)
+ */
+ ProcessingContext process(ProcessingContext ctx);
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/TransactionProcessingPipeline.java b/src/main/java/com/iota/iri/network/pipeline/TransactionProcessingPipeline.java
new file mode 100644
index 0000000000..82249bfa8e
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/TransactionProcessingPipeline.java
@@ -0,0 +1,137 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.TransactionValidator;
+import com.iota.iri.conf.NodeConfig;
+import com.iota.iri.controllers.TipsViewModel;
+import com.iota.iri.network.NeighborRouter;
+import com.iota.iri.network.neighbor.Neighbor;
+import com.iota.iri.service.milestone.LatestMilestoneTracker;
+import com.iota.iri.service.snapshot.SnapshotProvider;
+import com.iota.iri.storage.Tangle;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.BlockingQueue;
+
+/**
+ * A pipeline using stages to process incoming transaction data from neighbors and API calls.
+ */
+public interface TransactionProcessingPipeline {
+
+ /**
+ * Defines the different stages of the {@link TransactionProcessingPipelineImpl}.
+ */
+ enum Stage {
+ PRE_PROCESS, HASHING, VALIDATION, REPLY, RECEIVED, BROADCAST, MULTIPLE, ABORT, FINISH,
+ }
+
+ /**
+ * Initializes the dependencies of the {@link TransactionProcessingPipeline}.
+ *
+ * @param neighborRouter The {@link NeighborRouter} to use for broadcasting transactions
+ * @param config The config to set cache sizes and other options
+ * @param txValidator The transaction validator to validate incoming transactions with
+ * @param tangle The {@link Tangle} database to use to store and load transactions.
+ * @param snapshotProvider The {@link SnapshotProvider} to use to store transactions with.
+ * @param tipsViewModel The {@link TipsViewModel} to load tips from in the reply stage
+ * @param latestMilestoneTracker The {@link LatestMilestoneTracker} to load the latest milestone hash from in the
+ * reply stage
+ */
+ void init(NeighborRouter neighborRouter, NodeConfig config, TransactionValidator txValidator, Tangle tangle,
+ SnapshotProvider snapshotProvider, TipsViewModel tipsViewModel,
+ LatestMilestoneTracker latestMilestoneTracker);
+
+ /**
+ * Kicks of the pipeline by assembling the pipeline and starting all threads.
+ */
+ void start();
+
+ /**
+ * Gets the received stage queue.
+ *
+ * @return the received stage queue
+ */
+ BlockingQueue getReceivedStageQueue();
+
+ /**
+ * Gets the broadcast stage queue.
+ *
+ * @return the broadcast stage queue.
+ */
+ BlockingQueue getBroadcastStageQueue();
+
+ /**
+ * Gets the reply stage queue.
+ *
+ * @return the reply stage queue
+ */
+ BlockingQueue getReplyStageQueue();
+
+ /**
+ * Gets the validation stage queue.
+ *
+ * @return the validation stage queue
+ */
+ BlockingQueue getValidationStageQueue();
+
+ /**
+ * Submits the given data from the given neighbor into the pre processing stage of the pipeline.
+ *
+ * @param neighbor the {@link Neighbor} from which the data originated from
+ * @param data the data to process
+ */
+ void process(Neighbor neighbor, ByteBuffer data);
+
+ /**
+ * Submits the given transactions trits into the hashing stage of the pipeline.
+ *
+ * @param txTrits the transaction trits
+ */
+ void process(byte[] txTrits);
+
+ /**
+ * Shut downs the pipeline by shutting down all stages.
+ */
+ void shutdown();
+
+ /**
+ * Sets the pre process stage. This method should only be used for injecting mocked objects.
+ *
+ * @param preProcessStage the {@link PreProcessStage} to use
+ */
+ void setPreProcessStage(PreProcessStage preProcessStage);
+
+ /**
+ * Sets the validation stage. This method should only be used for injecting mocked objects.
+ *
+ * @param receivedStage the {@link ReceivedStage} to use
+ */
+ void setReceivedStage(ReceivedStage receivedStage);
+
+ /**
+ * Sets the validation stage. This method should only be used for injecting mocked objects.
+ *
+ * @param validationStage the {@link ValidationStage} to use
+ */
+ void setValidationStage(ValidationStage validationStage);
+
+ /**
+ * Sets the reply stage. This method should only be used for injecting mocked objects.
+ *
+ * @param replyStage the {@link ReplyStage} to use
+ */
+ void setReplyStage(ReplyStage replyStage);
+
+ /**
+ * Sets the broadcast stage. This method should only be used for injecting mocked objects.
+ *
+ * @param broadcastStage the {@link BroadcastStage} to use
+ */
+ void setBroadcastStage(BroadcastStage broadcastStage);
+
+ /**
+ * Sets the hashing stage. This method should only be used for injecting mocked objects.
+ *
+ * @param hashingStage the {@link HashingStage} to use
+ */
+ void setHashingStage(HashingStage hashingStage);
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/TransactionProcessingPipelineImpl.java b/src/main/java/com/iota/iri/network/pipeline/TransactionProcessingPipelineImpl.java
new file mode 100644
index 0000000000..c8996feb09
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/TransactionProcessingPipelineImpl.java
@@ -0,0 +1,238 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.TransactionValidator;
+import com.iota.iri.conf.NodeConfig;
+import com.iota.iri.controllers.TipsViewModel;
+import com.iota.iri.crypto.batched.BatchedHasher;
+import com.iota.iri.crypto.batched.BatchedHasherFactory;
+import com.iota.iri.crypto.batched.HashRequest;
+import com.iota.iri.model.Hash;
+import com.iota.iri.model.persistables.Transaction;
+import com.iota.iri.network.FIFOCache;
+import com.iota.iri.network.NeighborRouter;
+import com.iota.iri.network.neighbor.Neighbor;
+import com.iota.iri.service.milestone.LatestMilestoneTracker;
+import com.iota.iri.service.snapshot.SnapshotProvider;
+import com.iota.iri.storage.Tangle;
+import com.iota.iri.utils.Converter;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The {@link TransactionProcessingPipelineImpl} processes transactions which either came from {@link Neighbor} instances or
+ * were submitted via {@link com.iota.iri.service.API#broadcastTransactionsStatement(List)}.
+ * The pipeline splits the processing of transactions into different stages which run concurrently:
+ *
+ * - PreProcess: expands transaction payloads, computes the digest of transactions received by
+ * {@link Neighbor} instances and converts the transaction payload to its trits representation.
+ * Submits to the hashing stage if the transaction payload is not known or to the reply stage if already known.
+ * - Hashing: hashes transaction trits using a {@link BatchedHasher} and then submits it further to
+ * the validation stage.
+ * - Validation: validates the newly received transaction payload and adds it to the known bytes
+ * cache.
+ * If the transaction originated from a {@link com.iota.iri.service.API#broadcastTransactionsStatement(List)}, then the
+ * transaction is submitted to the received stage, otherwise it is both submitted to the reply and received stage.
+ * - Reply: replies to the given neighbor with the requested transaction or a random tip.
+ * - Received: stores the newly received and validated transaction and then submits it to the
+ * broadcast stage.
+ * - Broadcast: broadcasts the given transaction to all connected {@link Neighbor} instances except
+ * the neighbor from which the transaction originated from.
+ *
+ */
+public class TransactionProcessingPipelineImpl implements TransactionProcessingPipeline {
+
+ private static final Logger log = LoggerFactory.getLogger(TransactionProcessingPipelineImpl.class);
+ private ExecutorService stagesThreadPool = Executors.newFixedThreadPool(6);
+
+ // stages of the protocol protocol
+ private PreProcessStage preProcessStage;
+ private ReceivedStage receivedStage;
+ private ValidationStage validationStage;
+ private ReplyStage replyStage;
+ private BroadcastStage broadcastStage;
+ private BatchedHasher batchedHasher;
+ private HashingStage hashingStage;
+
+ private BlockingQueue preProcessStageQueue = new ArrayBlockingQueue<>(100);
+ private BlockingQueue validationStageQueue = new ArrayBlockingQueue<>(100);
+ private BlockingQueue receivedStageQueue = new ArrayBlockingQueue<>(100);
+ private BlockingQueue broadcastStageQueue = new ArrayBlockingQueue<>(100);
+ private BlockingQueue replyStageQueue = new ArrayBlockingQueue<>(100);
+
+ @Override
+ public void init(NeighborRouter neighborRouter, NodeConfig config, TransactionValidator txValidator, Tangle tangle,
+ SnapshotProvider snapshotProvider, TipsViewModel tipsViewModel,
+ LatestMilestoneTracker latestMilestoneTracker) {
+ FIFOCache recentlySeenBytesCache = new FIFOCache<>(config.getCacheSizeBytes());
+ this.preProcessStage = new PreProcessStage(recentlySeenBytesCache);
+ this.replyStage = new ReplyStage(neighborRouter, config, tangle, tipsViewModel, latestMilestoneTracker,
+ snapshotProvider, recentlySeenBytesCache);
+ this.broadcastStage = new BroadcastStage(neighborRouter);
+ this.validationStage = new ValidationStage(txValidator, recentlySeenBytesCache);
+ this.receivedStage = new ReceivedStage(tangle, txValidator, snapshotProvider);
+ this.batchedHasher = BatchedHasherFactory.create(BatchedHasherFactory.Type.BCTCURL81, 20);
+ this.hashingStage = new HashingStage(batchedHasher);
+ }
+
+ @Override
+ public void start() {
+ stagesThreadPool.submit(batchedHasher);
+ addStage("pre-process", preProcessStageQueue, preProcessStage);
+ addStage("validation", validationStageQueue, validationStage);
+ addStage("reply", replyStageQueue, replyStage);
+ addStage("received", receivedStageQueue, receivedStage);
+ addStage("broadcast", broadcastStageQueue, broadcastStage);
+ }
+
+ /**
+ * Adds the given stage to the processing pipeline.
+ *
+ * @param name the name of the stage
+ * @param queue the queue from which contexts are taken to process within the stage
+ * @param stage the stage with the processing logic
+ */
+ private void addStage(String name, BlockingQueue queue,
+ com.iota.iri.network.pipeline.Stage stage) {
+ stagesThreadPool.submit(new Thread(() -> {
+ try {
+ while (!Thread.currentThread().isInterrupted()) {
+ ProcessingContext ctx = stage.process(queue.take());
+ switch (ctx.getNextStage()) {
+ case REPLY:
+ replyStageQueue.put(ctx);
+ break;
+ case HASHING:
+ hashAndValidate(ctx);
+ break;
+ case RECEIVED:
+ receivedStageQueue.put(ctx);
+ break;
+ case MULTIPLE:
+ MultiStagePayload payload = (MultiStagePayload) ctx.getPayload();
+ replyStageQueue.put(payload.getLeft());
+ receivedStageQueue.put(payload.getRight());
+ break;
+ case BROADCAST:
+ broadcastStageQueue.put(ctx);
+ break;
+ case ABORT:
+ break;
+ case FINISH:
+ break;
+ default:
+ // do nothing
+ }
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ } finally {
+ log.info("{}-stage shutdown", name);
+ }
+ }, String.format("%s-stage", name)));
+ }
+
+ @Override
+ public BlockingQueue getReceivedStageQueue() {
+ return receivedStageQueue;
+ }
+
+ @Override
+ public BlockingQueue getBroadcastStageQueue() {
+ return broadcastStageQueue;
+ }
+
+ @Override
+ public BlockingQueue getReplyStageQueue() {
+ return replyStageQueue;
+ }
+
+ @Override
+ public BlockingQueue getValidationStageQueue() {
+ return validationStageQueue;
+ }
+
+ @Override
+ public void process(Neighbor neighbor, ByteBuffer data) {
+ try {
+ preProcessStageQueue.put(new ProcessingContext(new PreProcessPayload(neighbor, data)));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void process(byte[] txTrits) {
+ byte[] txBytes = new byte[Transaction.SIZE];
+ Converter.bytes(txTrits, txBytes);
+ long txDigest = NeighborRouter.getTxCacheDigest(txBytes);
+ HashingPayload payload = new HashingPayload(null, txTrits, txDigest, null);
+ hashAndValidate(new ProcessingContext(payload));
+ }
+
+ /**
+ * Sets up the given hashing stage {@link ProcessingContext} so that up on success, it will submit further to the
+ * validation stage.
+ *
+ * @param ctx the hashing stage {@link ProcessingContext}
+ */
+ private void hashAndValidate(ProcessingContext ctx) {
+ // the hashing already runs in its own thread,
+ // the callback will submit the data to the validation stage
+ HashingPayload hashingStagePayload = (HashingPayload) ctx.getPayload();
+ hashingStagePayload.setHashRequest(new HashRequest(hashingStagePayload.getTxTrits(), hashTrits -> {
+ try {
+ hashingStagePayload.setHashTrits(hashTrits);
+ // the validation stage takes care of submitting a payload to the reply stage.
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.VALIDATION);
+ validationStageQueue.put(ctx);
+ } catch (InterruptedException e) {
+ log.error("unable to put processing context into hashing stage. reason: {}", e.getMessage());
+ }
+ }));
+ hashingStage.process(ctx);
+ }
+
+ @Override
+ public void shutdown() {
+ stagesThreadPool.shutdownNow();
+ }
+
+ @Override
+ public void setPreProcessStage(PreProcessStage preProcessStage) {
+ this.preProcessStage = preProcessStage;
+ }
+
+ @Override
+ public void setReceivedStage(ReceivedStage receivedStage) {
+ this.receivedStage = receivedStage;
+ }
+
+ @Override
+ public void setValidationStage(ValidationStage validationStage) {
+ this.validationStage = validationStage;
+ }
+
+ @Override
+ public void setReplyStage(ReplyStage replyStage) {
+ this.replyStage = replyStage;
+ }
+
+ @Override
+ public void setBroadcastStage(BroadcastStage broadcastStage) {
+ this.broadcastStage = broadcastStage;
+ }
+
+ @Override
+ public void setHashingStage(HashingStage hashingStage) {
+ this.hashingStage = hashingStage;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/ValidationPayload.java b/src/main/java/com/iota/iri/network/pipeline/ValidationPayload.java
new file mode 100644
index 0000000000..19f17367a9
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/ValidationPayload.java
@@ -0,0 +1,95 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.model.Hash;
+import com.iota.iri.network.neighbor.Neighbor;
+
+import java.util.Arrays;
+
+/**
+ * Defines a payload whic gets submitted to the {@link ValidationStage}.
+ */
+public class ValidationPayload extends Payload {
+
+ private Neighbor neighbor;
+ private byte[] txTrits;
+ private byte[] hashTrits;
+ private Long txBytesDigest;
+ private Hash hashOfRequestedTx;
+
+ /**
+ * Creates a new {@link ValidationStage}.
+ *
+ * @param neighbor the {@link Neighbor} from which the transaction originated from (can be null)
+ * @param txTrits the trits representation of the transaction
+ * @param hashTrits the hash of the transaction in trits representation
+ * @param txBytesDigest the digest of the tansaction payload
+ * @param hashOfRequestedTx the hash of the requested transaction
+ */
+ public ValidationPayload(Neighbor neighbor, byte[] txTrits, byte[] hashTrits, Long txBytesDigest,
+ Hash hashOfRequestedTx) {
+ this.neighbor = neighbor;
+ this.txBytesDigest = txBytesDigest;
+ this.txTrits = txTrits;
+ this.hashTrits = hashTrits;
+ this.hashOfRequestedTx = hashOfRequestedTx;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Neighbor getOriginNeighbor() {
+ return neighbor;
+ }
+
+ /**
+ * Gets the transaction trits.
+ *
+ * @return the transaction trits
+ */
+ public byte[] getTxTrits() {
+ return txTrits;
+ }
+
+ /**
+ * Gets the transaction payload digest.
+ *
+ * @return the transaction payload digest
+ */
+ public Long getTxBytesDigest() {
+ return txBytesDigest;
+ }
+
+ /**
+ * Gets the hash of the requested transaction.
+ *
+ * @return the hash of the requested transaction.
+ */
+ public Hash getHashOfRequestedTx() {
+ return hashOfRequestedTx;
+ }
+
+ /**
+ * Gets the hash of the transaction.
+ *
+ * @return the hash of the transaction.
+ */
+ public byte[] getHashTrits() {
+ return hashTrits;
+ }
+
+ /**
+ * Sets the transaction hash trits.
+ *
+ * @param hashTrits the hash trits to set
+ */
+ public void setHashTrits(byte[] hashTrits) {
+ this.hashTrits = hashTrits;
+ }
+
+ @Override
+ public String toString() {
+ return "ValidationPayload{" + "neighbor=" + neighbor.getHostAddressAndPort() + ", hashTrits="
+ + Arrays.toString(hashTrits) + ", txBytesDigest=" + txBytesDigest + ", hashOfRequestedTx="
+ + hashOfRequestedTx.toString() + '}';
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/pipeline/ValidationStage.java b/src/main/java/com/iota/iri/network/pipeline/ValidationStage.java
new file mode 100644
index 0000000000..a139210eb4
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/pipeline/ValidationStage.java
@@ -0,0 +1,97 @@
+package com.iota.iri.network.pipeline;
+
+import com.iota.iri.TransactionValidator;
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.model.Hash;
+import com.iota.iri.model.HashFactory;
+import com.iota.iri.model.TransactionHash;
+import com.iota.iri.network.FIFOCache;
+import com.iota.iri.network.neighbor.Neighbor;
+
+import static com.iota.iri.model.Hash.SIZE_IN_TRITS;
+
+/**
+ * The {@link ValidationStage} validates the given transaction, caches it as recently seen and then either submits
+ * further to the {@link ReceivedStage} or/and {@link ReplyStage} depending on whether the transaction originated from a
+ * neighbor or not.
+ */
+public class ValidationStage implements Stage {
+
+ private TransactionValidator txValidator;
+ private FIFOCache recentlySeenBytesCache;
+
+ /**
+ * Creates a new {@link ValidationStage}.
+ *
+ * @param txValidator the {@link TransactionValidator} to use to validate the transaction
+ * @param recentlySeenBytesCache the {@link FIFOCache} to cache the validate transaction as recently seen
+ */
+ public ValidationStage(TransactionValidator txValidator, FIFOCache recentlySeenBytesCache) {
+ this.txValidator = txValidator;
+ this.recentlySeenBytesCache = recentlySeenBytesCache;
+ }
+
+ /**
+ * Validates the transaction and caches it as 'recently seen'.
+ *
+ * @param ctx the reply stage {@link ProcessingContext}
+ * @return a {@link ProcessingContext} either directing to only the {@link ReceivedStage} or both
+ * {@link ReceivedStage} and {@link ReplyStage}, depending on whether the transaction came from a neighbor
+ * or not
+ */
+ @Override
+ public ProcessingContext process(ProcessingContext ctx) {
+ ValidationPayload payload = (ValidationPayload) ctx.getPayload();
+ byte[] hashTrits = payload.getHashTrits();
+ byte[] txTrits = payload.getTxTrits();
+ Neighbor originNeighbor = payload.getOriginNeighbor();
+ Long txBytesDigest = payload.getTxBytesDigest();
+ Hash hashOfRequestedTx = payload.getHashOfRequestedTx();
+
+ // construct transaction hash and model
+ TransactionHash txHash = (TransactionHash) HashFactory.TRANSACTION.create(hashTrits, 0, SIZE_IN_TRITS);
+ TransactionViewModel tvm = new TransactionViewModel(txTrits, txHash);
+
+ try {
+ txValidator.runValidation(tvm, txValidator.getMinWeightMagnitude());
+ } catch (TransactionValidator.StaleTimestampException ex) {
+ if (originNeighbor != null) {
+ originNeighbor.getMetrics().incrStaleTransactionsCount();
+ }
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.ABORT);
+ return ctx;
+ } catch (Exception ex) {
+ if (originNeighbor != null) {
+ originNeighbor.getMetrics().incrInvalidTransactionsCount();
+ }
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.ABORT);
+ return ctx;
+ }
+
+ // cache the tx hash under the tx payload digest
+ if (txBytesDigest != null && txBytesDigest != 0) {
+ recentlySeenBytesCache.put(txBytesDigest, txHash);
+ }
+
+ ReceivedPayload receivedStagePayload = new ReceivedPayload(originNeighbor, tvm);
+
+ // go directly to receive stage if the transaction didn't originate from a neighbor
+ if (hashOfRequestedTx == null || originNeighbor == null) {
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.RECEIVED);
+ ctx.setPayload(receivedStagePayload);
+ return ctx;
+ }
+
+ // diverge flow to received and reply stage
+ ctx.setNextStage(TransactionProcessingPipeline.Stage.MULTIPLE);
+ hashOfRequestedTx = hashOfRequestedTx.equals(txHash) ? Hash.NULL_HASH : hashOfRequestedTx;
+
+ ReplyPayload replyStagePayload = new ReplyPayload(originNeighbor, hashOfRequestedTx);
+ ProcessingContext replyCtx = new ProcessingContext(TransactionProcessingPipeline.Stage.REPLY,
+ replyStagePayload);
+ ProcessingContext receivedCtx = new ProcessingContext(TransactionProcessingPipeline.Stage.RECEIVED,
+ receivedStagePayload);
+ ctx.setPayload(new MultiStagePayload(replyCtx, receivedCtx));
+ return ctx;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/Handshake.java b/src/main/java/com/iota/iri/network/protocol/Handshake.java
new file mode 100644
index 0000000000..f4e81cf563
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/Handshake.java
@@ -0,0 +1,238 @@
+package com.iota.iri.network.protocol;
+
+import com.iota.iri.network.neighbor.Neighbor;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Defines information exchanged up on a new connection with a {@link Neighbor}.
+ */
+public class Handshake {
+
+ /**
+ * The amount of bytes used for the coo address sent in a handshake packet.
+ */
+ public final static int BYTE_ENCODED_COO_ADDRESS_BYTES_LENGTH = 49;
+
+ /**
+ * The state of the handshaking.
+ */
+ public enum State {
+ INIT, FAILED, OK,
+ }
+
+ private int serverSocketPort;
+ private long sentTimestamp;
+ private byte[] byteEncodedCooAddress;
+ private int mwm;
+ private State state = State.INIT;
+ private byte[] supportedVersions;
+
+ /**
+ * Creates a new handshake packet.
+ *
+ * @param ownSourcePort the node's own server socket port number
+ * @return a {@link ByteBuffer} containing the handshake packet
+ */
+ public static ByteBuffer createHandshakePacket(char ownSourcePort, byte[] ownByteEncodedCooAddress,
+ byte ownUsedMWM) {
+ short maxLength = ProtocolMessage.HANDSHAKE.getMaxLength();
+ final short payloadLengthBytes = (short) (maxLength - (maxLength - 60) + Protocol.SUPPORTED_PROTOCOL_VERSIONS.length);
+ ByteBuffer buf = ByteBuffer.allocate(ProtocolMessage.HEADER.getMaxLength() + payloadLengthBytes);
+ Protocol.addProtocolHeader(buf, ProtocolMessage.HANDSHAKE, payloadLengthBytes);
+ buf.putChar(ownSourcePort);
+ buf.putLong(System.currentTimeMillis());
+ buf.put(ownByteEncodedCooAddress);
+ buf.put(ownUsedMWM);
+ buf.put(Protocol.SUPPORTED_PROTOCOL_VERSIONS);
+ buf.flip();
+ return buf;
+ }
+
+ /**
+ * Parses the given message into a {@link Handshake} object.
+ *
+ * @param msg the buffer containing the handshake info
+ * @return the {@link Handshake} object
+ */
+ public static Handshake fromByteBuffer(ByteBuffer msg) {
+ Handshake handshake = new Handshake();
+ handshake.setServerSocketPort((int) msg.getChar());
+ handshake.setSentTimestamp(msg.getLong());
+ byte[] byteEncodedCooAddress = new byte[BYTE_ENCODED_COO_ADDRESS_BYTES_LENGTH];
+ msg.get(byteEncodedCooAddress);
+ handshake.setByteEncodedCooAddress(byteEncodedCooAddress);
+ handshake.setMWM(msg.get());
+ handshake.setState(Handshake.State.OK);
+ // extract supported versions
+ byte[] supportedVersions = new byte[msg.remaining()];
+ msg.get(supportedVersions);
+ handshake.setSupportedVersions(supportedVersions);
+ return handshake;
+ }
+
+ /**
+ * Gets the state of the handshaking.
+ *
+ * @return the state
+ */
+ public State getState() {
+ return state;
+ }
+
+ /**
+ * Sets the state of the handshaking.
+ *
+ * @param state the state to set
+ */
+ public void setState(State state) {
+ this.state = state;
+ }
+
+ /**
+ * Sets the server socket port number.
+ *
+ * @param serverSocketPort the number to set
+ */
+ public void setServerSocketPort(int serverSocketPort) {
+ this.serverSocketPort = serverSocketPort;
+ }
+
+ /**
+ * Gets the server socket port number.
+ *
+ * @return the server socket port number.
+ */
+ public int getServerSocketPort() {
+ return serverSocketPort;
+ }
+
+ /**
+ * Sets the sent timestamp.
+ *
+ * @param sentTimestamp the timestamp
+ */
+ public void setSentTimestamp(long sentTimestamp) {
+ this.sentTimestamp = sentTimestamp;
+ }
+
+ /**
+ * Gets the sent timestamp.
+ *
+ * @return the sent timestamp
+ */
+ public long getSentTimestamp() {
+ return sentTimestamp;
+ }
+
+ /**
+ * Gets the byte encoded coordinator address.
+ *
+ * @return the byte encoded coordinator address
+ */
+ public byte[] getByteEncodedCooAddress() {
+ return byteEncodedCooAddress;
+ }
+
+ /**
+ * Sets the byte encoded coordinator address.
+ *
+ * @param byteEncodedCooAddress the byte encoded coordinator to set
+ */
+ public void setByteEncodedCooAddress(byte[] byteEncodedCooAddress) {
+ this.byteEncodedCooAddress = byteEncodedCooAddress;
+ }
+
+ /**
+ * Gets the MWM.
+ *
+ * @return the mwm
+ */
+ public int getMWM() {
+ return mwm;
+ }
+
+ /**
+ * Sets the mwm.
+ *
+ * @param mwm the mwm to set
+ */
+ public void setMWM(int mwm) {
+ this.mwm = mwm;
+ }
+
+ /**
+ * Gets the supported versions.
+ *
+ * @return the supported versions
+ */
+ public byte[] getSupportedVersions() {
+ return supportedVersions;
+ }
+
+ /**
+ * Sets the supported versions.
+ *
+ * @param supportedVersions the supported versions to set
+ */
+ public void setSupportedVersions(byte[] supportedVersions) {
+ this.supportedVersions = supportedVersions;
+ }
+
+ /**
+ * Returns the highest supported protocol version by the neighbor or a negative number indicating the highest
+ * protocol version the neighbor would have supported but which our node doesn't.
+ *
+ * @param ownSupportedVersions the versions our own node supports
+ * @return a positive integer defining the highest supported protocol version and a negative integer indicating the
+ * highest supported version by the given neighbor but which is not supported by us
+ */
+ public int getNeighborSupportedVersion(byte[] ownSupportedVersions) {
+ int highestSupportedVersion = 0;
+ for (int i = 0; i < ownSupportedVersions.length; i++) {
+ // max check up to advertised versions by the neighbor
+ if (i > supportedVersions.length - 1) {
+ break;
+ }
+
+ // get versions matched by both
+ byte supported = (byte) (supportedVersions[i] & ownSupportedVersions[i]);
+
+ // none supported
+ if (supported == 0) {
+ continue;
+ }
+
+ // iterate through all bits and find highest (more to the left is higher)
+ int highest = 0;
+ for (int j = 0; j < 8; j++) {
+ if (((supported >> j) & 1) == 1) {
+ highest = j + 1;
+ }
+ }
+ highestSupportedVersion = highest + (i * 8);
+ }
+
+ // if the highest version is still 0, it means that we don't support
+ // any protocol version the neighbor supports
+ if (highestSupportedVersion == 0) {
+ // grab last byte denoting the highest versions.
+ // a node will only hold version bytes if at least one version in that
+ // byte is supported, therefore it's safe to assume, that the last byte contains
+ // the highest supported version of a given node.
+ byte lastVersionsByte = supportedVersions[supportedVersions.length - 1];
+ // find highest version
+ int highest = 0;
+ for (int j = 0; j < 8; j++) {
+ if (((lastVersionsByte >> j) & 1) == 1) {
+ highest = j + 1;
+ }
+ }
+ int highestSupportedVersionByNeighbor = highest + ((supportedVersions.length - 1) * 8);
+ // negate to indicate that we don't actually support it
+ return -highestSupportedVersionByNeighbor;
+ }
+
+ return highestSupportedVersion;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/InvalidProtocolMessageLengthException.java b/src/main/java/com/iota/iri/network/protocol/InvalidProtocolMessageLengthException.java
new file mode 100644
index 0000000000..bd56cadfbb
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/InvalidProtocolMessageLengthException.java
@@ -0,0 +1,16 @@
+package com.iota.iri.network.protocol;
+
+/**
+ * Thrown when a packet advertises a message length which is invalid for the given {@link ProtocolMessage} type.
+ */
+public class InvalidProtocolMessageLengthException extends Exception {
+
+ /**
+ * Constructs a new exception for invlaid protocol message lengths.
+ *
+ * @param msg the message for this exception
+ */
+ public InvalidProtocolMessageLengthException(String msg) {
+ super(msg);
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/Protocol.java b/src/main/java/com/iota/iri/network/protocol/Protocol.java
new file mode 100644
index 0000000000..7d865deda3
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/Protocol.java
@@ -0,0 +1,193 @@
+package com.iota.iri.network.protocol;
+
+import com.iota.iri.controllers.TransactionViewModel;
+import com.iota.iri.model.persistables.Transaction;
+
+import java.nio.ByteBuffer;
+
+/**
+ * The IRI protocol uses a 4 bytes header denoting the version, type and length of a packet.
+ */
+public class Protocol {
+
+ /**
+ * The protocol version used by this node.
+ */
+ public final static byte PROTOCOL_VERSION = 1;
+ /**
+ *
+ * The supported protocol versions by this node. Bitmasks are used to denote what protocol version this node
+ * supports in its implementation. The LSB acts as a starting point. Up to 32 bytes are supported in the handshake
+ * packet, limiting the amount of supported denoted protocol versions to 256.
+ *
+ *
+ * Examples:
+ *
+ *
+ * - [00000001] denotes that this node supports protocol version 1.
+ * - [00000111] denotes that this node supports protocol versions 1, 2 and 3.
+ * - [01101110] denotes that this node supports protocol versions 2, 3, 4, 6 and 7.
+ * - [01101110, 01010001] denotes that this node supports protocol versions 2, 3, 4, 6, 7, 9, 13 and 15.
+ * - [01101110, 01010001, 00010001] denotes that this node supports protocol versions 2, 3, 4, 6, 7, 9, 13, 15,
+ * 17 and 21.
+ *
+ */
+ public final static byte[] SUPPORTED_PROTOCOL_VERSIONS = {
+ /* supports protocol version(s): 1 */
+ (byte) 0b00000001,
+ };
+ /**
+ * The amount of bytes dedicated for the message type in the packet header.
+ */
+ public final static byte HEADER_TLV_TYPE_BYTES_LENGTH = 1;
+ /**
+ * The amount of bytes dedicated for the message length denotation in the packet header.
+ */
+ public final static byte HEADER_TLV_LENGTH_BYTES_LENGTH = 2;
+ /**
+ * The amount of bytes making up the protocol packet header.
+ */
+ public final static byte PROTOCOL_HEADER_BYTES_LENGTH = HEADER_TLV_LENGTH_BYTES_LENGTH
+ + HEADER_TLV_TYPE_BYTES_LENGTH;
+
+ /**
+ * The amount of bytes used for the requested transaction hash.
+ */
+ public final static int GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH = 49;
+ /**
+ * The amount of bytes making up the non signature message fragment part of a transaction gossip payload.
+ */
+ public final static int NON_SIG_TX_PART_BYTES_LENGTH = 292;
+ /**
+ * The max amount of bytes a signature message fragment is made up from.
+ */
+ public final static int SIG_DATA_MAX_BYTES_LENGTH = 1312;
+
+ /**
+ * Parses the given buffer into a {@link ProtocolHeader}.
+ *
+ * @param buf the buffer to parse
+ * @return the parsed {@link ProtocolHeader}
+ * @throws UnknownMessageTypeException thrown when the advertised message type is unknown
+ * @throws InvalidProtocolMessageLengthException thrown when the advertised message length is invalid
+ */
+ public static ProtocolHeader parseHeader(ByteBuffer buf)
+ throws UnknownMessageTypeException, InvalidProtocolMessageLengthException {
+
+ // extract type of message
+ byte type = buf.get();
+ ProtocolMessage protoMsg = ProtocolMessage.fromTypeID(type);
+ if (protoMsg == null) {
+ throw new UnknownMessageTypeException(String.format("got unknown message type in protocol: %d", type));
+ }
+
+ // extract length of message
+ short advertisedMsgLength = buf.getShort();
+ if ((advertisedMsgLength > protoMsg.getMaxLength())
+ || (!protoMsg.supportsDynamicLength() && advertisedMsgLength < protoMsg.getMaxLength())) {
+ throw new InvalidProtocolMessageLengthException(String.format(
+ "advertised length: %d bytes; max length: %d bytes", advertisedMsgLength, protoMsg.getMaxLength()));
+ }
+
+ return new ProtocolHeader(protoMsg, advertisedMsgLength);
+ }
+
+ /**
+ * Creates a new transaction gossip packet.
+ *
+ * @param tvm The transaction to add into the packet
+ * @param requestedHash The hash of the requested transaction
+ * @return a {@link ByteBuffer} containing the transaction gossip packet.
+ */
+ public static ByteBuffer createTransactionGossipPacket(TransactionViewModel tvm, byte[] requestedHash) {
+ byte[] truncatedTx = truncateTx(tvm.getBytes());
+ final short payloadLengthBytes = (short) (truncatedTx.length + GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH);
+ ByteBuffer buf = ByteBuffer.allocate(ProtocolMessage.HEADER.getMaxLength() + payloadLengthBytes);
+ addProtocolHeader(buf, ProtocolMessage.TRANSACTION_GOSSIP, payloadLengthBytes);
+ buf.put(truncatedTx);
+ buf.put(requestedHash, 0, GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH);
+ buf.flip();
+ return buf;
+ }
+
+ /**
+ * Adds the protocol header to the given {@link ByteBuffer}.
+ *
+ * @param buf the {@link ByteBuffer} to write into.
+ * @param protoMsg the message type which will be sent
+ */
+ public static void addProtocolHeader(ByteBuffer buf, ProtocolMessage protoMsg) {
+ addProtocolHeader(buf, protoMsg, protoMsg.getMaxLength());
+ }
+
+ /**
+ * Adds the protocol header to the given {@link ByteBuffer}.
+ *
+ * @param buf the {@link ByteBuffer} to write into.
+ * @param protoMsg the message type which will be sent
+ * @param payloadLengthBytes the message length
+ */
+ public static void addProtocolHeader(ByteBuffer buf, ProtocolMessage protoMsg, short payloadLengthBytes) {
+ buf.put(protoMsg.getTypeID());
+ buf.putShort(payloadLengthBytes);
+ }
+
+ /**
+ * Expands a truncated bytes encoded transaction payload.
+ *
+ * @param data the source data
+ */
+ public static byte[] expandTx(byte[] data) {
+ byte[] txDataBytes = new byte[Transaction.SIZE];
+ // we need to expand the tx data (signature message fragment) as
+ // it could have been truncated for transmission
+ int numOfBytesOfSigMsgFragToExpand = ProtocolMessage.TRANSACTION_GOSSIP.getMaxLength() - data.length;
+ byte[] sigMsgFragPadding = new byte[numOfBytesOfSigMsgFragToExpand];
+ int sigMsgFragBytesToCopy = data.length - Protocol.GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH
+ - Protocol.NON_SIG_TX_PART_BYTES_LENGTH;
+
+ // build up transaction payload. empty signature message fragment equals padding with 1312x 0 bytes
+ System.arraycopy(data, 0, txDataBytes, 0, sigMsgFragBytesToCopy);
+ System.arraycopy(sigMsgFragPadding, 0, txDataBytes, sigMsgFragBytesToCopy, sigMsgFragPadding.length);
+ System.arraycopy(data, sigMsgFragBytesToCopy, txDataBytes, Protocol.SIG_DATA_MAX_BYTES_LENGTH,
+ Protocol.NON_SIG_TX_PART_BYTES_LENGTH);
+ return txDataBytes;
+ }
+
+ /**
+ * Truncates the given bytes encoded transaction data.
+ *
+ * @param txBytes the transaction bytes to truncate
+ * @return an array containing the truncated transaction data
+ */
+ public static byte[] truncateTx(byte[] txBytes) {
+ // check how many bytes from the signature can be truncated
+ int bytesToTruncate = 0;
+ for (int i = SIG_DATA_MAX_BYTES_LENGTH - 1; i >= 0; i--) {
+ if (txBytes[i] != 0) {
+ break;
+ }
+ bytesToTruncate++;
+ }
+ // allocate space for truncated tx
+ byte[] truncatedTx = new byte[SIG_DATA_MAX_BYTES_LENGTH - bytesToTruncate + NON_SIG_TX_PART_BYTES_LENGTH];
+ System.arraycopy(txBytes, 0, truncatedTx, 0, SIG_DATA_MAX_BYTES_LENGTH - bytesToTruncate);
+ System.arraycopy(txBytes, SIG_DATA_MAX_BYTES_LENGTH, truncatedTx, SIG_DATA_MAX_BYTES_LENGTH - bytesToTruncate,
+ NON_SIG_TX_PART_BYTES_LENGTH);
+ return truncatedTx;
+ }
+
+ /**
+ * Copies the requested transaction hash from the given source data byte array into the given destination byte
+ * array.
+ *
+ * @param source the transaction gossip packet data
+ */
+ public static byte[] extractRequestedTxHash(byte[] source) {
+ byte[] reqHashBytes = new byte[Protocol.GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH];
+ System.arraycopy(source, source.length - Protocol.GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH, reqHashBytes, 0,
+ Protocol.GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH);
+ return reqHashBytes;
+ }
+
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/ProtocolHeader.java b/src/main/java/com/iota/iri/network/protocol/ProtocolHeader.java
new file mode 100644
index 0000000000..f1c8e6cb8a
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/ProtocolHeader.java
@@ -0,0 +1,39 @@
+package com.iota.iri.network.protocol;
+
+/**
+ * The {@link ProtocolHeader} denotes the protocol version used by the node and the TLV of the packet.
+ */
+public class ProtocolHeader {
+
+ private ProtocolMessage protoMsg;
+ private short messageLength;
+
+ /**
+ * Creates a new protocol header.
+ *
+ * @param protoMsg the message type
+ * @param messageLength the message length
+ */
+ public ProtocolHeader(ProtocolMessage protoMsg, short messageLength) {
+ this.protoMsg = protoMsg;
+ this.messageLength = messageLength;
+ }
+
+ /**
+ * Gets the denoted message type.
+ *
+ * @return the denoted message type
+ */
+ public ProtocolMessage getMessageType() {
+ return protoMsg;
+ }
+
+ /**
+ * Gets the denoted message length.
+ *
+ * @return the denoted message length
+ */
+ public short getMessageLength() {
+ return messageLength;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/ProtocolMessage.java b/src/main/java/com/iota/iri/network/protocol/ProtocolMessage.java
new file mode 100644
index 0000000000..e0d2361cea
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/ProtocolMessage.java
@@ -0,0 +1,87 @@
+package com.iota.iri.network.protocol;
+
+/**
+ * Defines the different message types supported by the protocol and their characteristics.
+ */
+public enum ProtocolMessage {
+ /**
+ * The message header sent in each message denoting the TLV fields.
+ */
+ HEADER((byte) 0, (short) Protocol.PROTOCOL_HEADER_BYTES_LENGTH, false),
+ /**
+ * The initial handshake packet sent over the wire up on a new neighbor connection.
+ * Made up of:
+ * - own server socket port (2 bytes)
+ * - time at which the packet was sent (8 bytes)
+ * - own used byte encoded coordinator address (49 bytes)
+ * - own used MWM (1 byte)
+ * - supported protocol versions. we need up to 32 bytes to represent 256 possible protocol
+ * versions. only up to N bytes are used to communicate the highest supported version.
+ */
+ HANDSHAKE((byte) 1, (short) 92, true),
+ /**
+ * The transaction payload + requested transaction hash gossipping packet. In reality most of this packets won't
+ * take up their full 1604 bytes as the signature message fragment of the tx is truncated.
+ */
+ TRANSACTION_GOSSIP((byte) 2, (short) (Protocol.GOSSIP_REQUESTED_TX_HASH_BYTES_LENGTH + Protocol.NON_SIG_TX_PART_BYTES_LENGTH
+ + Protocol.SIG_DATA_MAX_BYTES_LENGTH), true);
+
+ private static final ProtocolMessage[] lookup = new ProtocolMessage[256];
+
+ private byte typeID;
+ private short maxLength;
+ private boolean supportsDynamicLength;
+
+ ProtocolMessage(byte typeID, short maxLength, boolean supportsDynamicLength) {
+ this.typeID = typeID;
+ this.maxLength = maxLength;
+ this.supportsDynamicLength = supportsDynamicLength;
+ }
+
+ static {
+ lookup[0] = HEADER;
+ lookup[1] = HANDSHAKE;
+ lookup[2] = TRANSACTION_GOSSIP;
+ }
+
+ /**
+ * Gets the {@link ProtocolMessage} corresponding to the given type id.
+ *
+ * @param typeID the type id of the message
+ * @return the {@link ProtocolMessage} corresponding to the given type id or null
+ */
+ public static ProtocolMessage fromTypeID(byte typeID) {
+ if (typeID >= lookup.length) {
+ return null;
+ }
+ return lookup[typeID];
+ }
+
+ /**
+ * Gets the type id of the message.
+ *
+ * @return the type id of the message
+ */
+ public byte getTypeID() {
+ return typeID;
+ }
+
+ /**
+ * Gets the maximum length of the message.
+ *
+ * @return the maximum length of the message
+ */
+ public short getMaxLength() {
+ return maxLength;
+ }
+
+ /**
+ * Whether this message type supports dynamic length.
+ *
+ * @return whether this message type supports dynamic length
+ */
+ public boolean supportsDynamicLength() {
+ return supportsDynamicLength;
+ }
+
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/UnknownMessageTypeException.java b/src/main/java/com/iota/iri/network/protocol/UnknownMessageTypeException.java
new file mode 100644
index 0000000000..21f4623630
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/UnknownMessageTypeException.java
@@ -0,0 +1,15 @@
+package com.iota.iri.network.protocol;
+
+/**
+ * Thrown when an unknown {@link ProtocolMessage} type is advertised in a packet.
+ */
+public class UnknownMessageTypeException extends Exception {
+
+ /**
+ * Creates a new exception for when an unknown message type is advertised.
+ * @param msg the message for this exception
+ */
+ public UnknownMessageTypeException(String msg) {
+ super(msg);
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/message/MessageReader.java b/src/main/java/com/iota/iri/network/protocol/message/MessageReader.java
new file mode 100644
index 0000000000..fc673fed51
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/message/MessageReader.java
@@ -0,0 +1,44 @@
+package com.iota.iri.network.protocol.message;
+
+import com.iota.iri.network.protocol.ProtocolMessage;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.ReadableByteChannel;
+
+/**
+ * A {@link MessageReader} reads up to N defined bytes from a {@link ReadableByteChannel}.
+ */
+public interface MessageReader {
+
+ /**
+ * Checks whether the underlying {@link ByteBuffer} is ready.
+ *
+ * @return whether the {@link MessageReader}'s {@link ByteBuffer} is ready
+ */
+ boolean ready();
+
+ /**
+ * Reads bytes from the given channel into the {@link ByteBuffer}.
+ *
+ * @param channel the channel to read from
+ * @return how many bytes have been read into the buffer.
+ * @throws IOException thrown when reading from the channel fails
+ */
+ int readMessage(ReadableByteChannel channel) throws IOException;
+
+ /**
+ * Gets the {@link ByteBuffer} holding the message.
+ *
+ * @return the {@link ByteBuffer} holding the message.
+ */
+ ByteBuffer getMessage();
+
+ /**
+ * Gets the message type this {@link MessageReader} is expecting.
+ *
+ * @return the message type this {@link MessageReader} is expecting
+ */
+ ProtocolMessage getMessageType();
+
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/message/MessageReaderFactory.java b/src/main/java/com/iota/iri/network/protocol/message/MessageReaderFactory.java
new file mode 100644
index 0000000000..8b08e18b0f
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/message/MessageReaderFactory.java
@@ -0,0 +1,42 @@
+package com.iota.iri.network.protocol.message;
+
+import com.iota.iri.network.protocol.ProtocolMessage;
+import com.iota.iri.network.protocol.UnknownMessageTypeException;
+import com.iota.iri.network.protocol.message.impl.MessageReaderImpl;
+
+/**
+ * {@link MessageReaderFactory} provides methods to easily construct a {@link MessageReader}.
+ */
+public class MessageReaderFactory {
+
+ /**
+ * Creates a new {@link MessageReader} for the given message type.
+ *
+ * @param protoMsg the message type
+ * @return a {@link MessageReader} for the given message type
+ * @throws UnknownMessageTypeException when the message type is not known
+ */
+ public static MessageReader create(ProtocolMessage protoMsg) throws UnknownMessageTypeException {
+ switch (protoMsg) {
+ case HEADER:
+ case HANDSHAKE:
+ case TRANSACTION_GOSSIP:
+ return create(protoMsg, protoMsg.getMaxLength());
+ // there might be message types in the future which need a separate message reader implementation
+ default:
+ throw new UnknownMessageTypeException("can't construct MessageReaderImpl for unknown message type");
+ }
+ }
+
+ /**
+ * Creates a new {@link MessageReader} with an explicit length to read.
+ *
+ * @param protoMsg the message type
+ * @param messageLength the max bytes to read
+ * @return a {@link MessageReader} for the given message type
+ */
+ public static MessageReader create(ProtocolMessage protoMsg, short messageLength) {
+ return new MessageReaderImpl(protoMsg, messageLength);
+ }
+
+}
diff --git a/src/main/java/com/iota/iri/network/protocol/message/impl/MessageReaderImpl.java b/src/main/java/com/iota/iri/network/protocol/message/impl/MessageReaderImpl.java
new file mode 100644
index 0000000000..787dada451
--- /dev/null
+++ b/src/main/java/com/iota/iri/network/protocol/message/impl/MessageReaderImpl.java
@@ -0,0 +1,47 @@
+package com.iota.iri.network.protocol.message.impl;
+
+import com.iota.iri.network.protocol.ProtocolMessage;
+import com.iota.iri.network.protocol.message.MessageReader;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.ReadableByteChannel;
+
+/**
+ * A {@link MessageReaderImpl} is a {@link MessageReader}.
+ */
+public class MessageReaderImpl implements MessageReader {
+
+ private ByteBuffer msgBuf;
+ private ProtocolMessage protoMsg;
+
+ /**
+ * Creates a new {@link MessageReaderImpl}.
+ * @param protoMsg the message type
+ * @param msgLength the message length
+ */
+ public MessageReaderImpl(ProtocolMessage protoMsg, short msgLength) {
+ this.protoMsg = protoMsg;
+ this.msgBuf = ByteBuffer.allocate(msgLength);
+ }
+
+ @Override
+ public boolean ready() {
+ return !msgBuf.hasRemaining();
+ }
+
+ @Override
+ public int readMessage(ReadableByteChannel channel) throws IOException {
+ return channel.read(msgBuf);
+ }
+
+ @Override
+ public ByteBuffer getMessage() {
+ return msgBuf;
+ }
+
+ @Override
+ public ProtocolMessage getMessageType() {
+ return protoMsg;
+ }
+}
diff --git a/src/main/java/com/iota/iri/network/replicator/Replicator.java b/src/main/java/com/iota/iri/network/replicator/Replicator.java
deleted file mode 100644
index 2a5c59daba..0000000000
--- a/src/main/java/com/iota/iri/network/replicator/Replicator.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package com.iota.iri.network.replicator;
-
-import com.iota.iri.conf.NodeConfig;
-import com.iota.iri.network.Node;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * This class manages a set of Source and Sink pool workers {@link ReplicatorSourceProcessor}
- * and {@link ReplicatorSinkProcessor}. The workers are managed by the Pool manager threads
- * {@link ReplicatorSourcePool} and {@link ReplicatorSinkPool}
- *
- * A **Sink** is basically a received end point for TCP where all the peers send
- * trnasactions to. Since there is only one global endpoint for all the peers, we need
- * only a single thread to manage all the incoming messages.
- *
- * A **Source** is a single peer which sends packets to us, therefore we need multiple worker
- * threads to manage sending transaction to multiple peers.
- *
- */
-
-public class Replicator {
-
- public static final int NUM_THREADS = 32;
-
- private static final Logger log = LoggerFactory.getLogger(Replicator.class);
- private final ReplicatorSinkPool replicatorSinkPool;
- private final int port;
- private ReplicatorSourcePool replicatorSourcePool;
-
- public Replicator(Node node, NodeConfig configuration) {
- this.port = configuration.getTcpReceiverPort();
- replicatorSinkPool = new ReplicatorSinkPool(node, port, configuration.getTransactionPacketSize());
- replicatorSourcePool = new ReplicatorSourcePool(replicatorSinkPool, node, configuration.getMaxPeers(),
- configuration.isTestnet());
- }
-
- public void init() {
- new Thread(replicatorSinkPool).start();
- new Thread(replicatorSourcePool.init(port)).start();
- log.info("Started ReplicatorSourcePool");
- }
-
- public void shutdown() throws InterruptedException {
- // TODO
- replicatorSourcePool.shutdown();
- replicatorSinkPool.shutdown();
- }
-
-}
diff --git a/src/main/java/com/iota/iri/network/replicator/ReplicatorSinkPool.java b/src/main/java/com/iota/iri/network/replicator/ReplicatorSinkPool.java
deleted file mode 100644
index 3ebce699c7..0000000000
--- a/src/main/java/com/iota/iri/network/replicator/ReplicatorSinkPool.java
+++ /dev/null
@@ -1,98 +0,0 @@
-package com.iota.iri.network.replicator;
-
-import java.io.IOException;
-import java.net.Socket;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import com.iota.iri.network.TCPNeighbor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.iota.iri.network.Neighbor;
-import com.iota.iri.network.Node;
-
-public class ReplicatorSinkPool implements Runnable {
-
- private static final Logger log = LoggerFactory.getLogger(ReplicatorSinkPool.class);
- private final int port;
- private int transactionPacketSize;
- private final Node node;
-
- private ExecutorService sinkPool;
-
- public boolean shutdown = false;
-
- public final static int PORT_BYTES = 10;
-
- public ReplicatorSinkPool(Node node, int port, int transactionPacketSize) {
- this.node = node;
- this.port = port;
- this.transactionPacketSize = transactionPacketSize;
- }
-
- @Override
- public void run() {
-
- sinkPool = Executors.newFixedThreadPool(Replicator.NUM_THREADS);
- {
- List neighbors = node.getNeighbors();
- // wait until list is populated
- int loopcnt = 10;
- while ((loopcnt-- > 0) && neighbors.size() == 0) {
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- log.error("Interrupted");
- }
- }
- neighbors.stream().filter(n -> n instanceof TCPNeighbor && n.isFlagged())
- .map(n -> ((TCPNeighbor) n))
- .forEach(this::createSink);
- }
-
- while (!Thread.interrupted()) {
- // Restart attempt for neighbors that are in the configuration.
- try {
- Thread.sleep(30000);
- } catch (InterruptedException e) {
- log.debug("Interrupted: ", e);
- }
- List neighbors = node.getNeighbors();
- neighbors.stream()
- .filter(n -> n instanceof TCPNeighbor && n.isFlagged())
- .map(n -> ((TCPNeighbor) n))
- .filter(n -> n.getSink() == null)
- .forEach(this::createSink);
- }
- }
-
- public void createSink(TCPNeighbor neighbor) {
- Runnable proc = new ReplicatorSinkProcessor( neighbor, this, port, transactionPacketSize);
- sinkPool.submit(proc);
- }
-
- public void shutdownSink(TCPNeighbor neighbor) {
- Socket socket = neighbor.getSink();
- if (socket != null) {
- if (!socket.isClosed()) {
- try {
- socket.close();
- log.info("Sink {} closed", neighbor.getHostAddress());
- } catch (IOException e) {
- // TODO
- }
- }
- }
- neighbor.setSink(null);
- }
-
-
- public void shutdown() throws InterruptedException {
- shutdown = true;
- sinkPool.shutdown();
- sinkPool.awaitTermination(6, TimeUnit.SECONDS);
- }
-}
diff --git a/src/main/java/com/iota/iri/network/replicator/ReplicatorSinkProcessor.java b/src/main/java/com/iota/iri/network/replicator/ReplicatorSinkProcessor.java
deleted file mode 100644
index f64f9267f3..0000000000
--- a/src/main/java/com/iota/iri/network/replicator/ReplicatorSinkProcessor.java
+++ /dev/null
@@ -1,140 +0,0 @@
-package com.iota.iri.network.replicator;
-
-import com.iota.iri.network.TCPNeighbor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.nio.ByteBuffer;
-import java.util.zip.CRC32;
-
-class ReplicatorSinkProcessor implements Runnable {
-
- private static final Logger log = LoggerFactory.getLogger(ReplicatorSinkProcessor.class);
-
- private final TCPNeighbor neighbor;
-
- public final static int CRC32_BYTES = 16;
- private final ReplicatorSinkPool replicatorSinkPool;
- private final int port;
- private int transactionPacketSize;
-
- public ReplicatorSinkProcessor(final TCPNeighbor neighbor,
- final ReplicatorSinkPool replicatorSinkPool,
- final int port, int transactionPacketSize) {
- this.neighbor = neighbor;
- this.replicatorSinkPool = replicatorSinkPool;
- this.port = port;
- this.transactionPacketSize = transactionPacketSize;
- }
-
- @Override
- public void run() {
- try {
- Thread.sleep(1000);
- }
- catch (InterruptedException e) {
- log.info("Interrupted");
- }
-
- String remoteAddress = neighbor.getHostAddress();
-
- try {
- Socket socket;
- synchronized (neighbor) {
- Socket sink = neighbor.getSink();
- if ( sink == null ) {
- log.info("Opening sink {}", remoteAddress);
- socket = new Socket();
- socket.setSoLinger(true, 0);
- socket.setSoTimeout(30000);
- neighbor.setSink(socket);
- }
- else {
- // Sink already created
- log.info("Sink {} already created", remoteAddress);
- return;
- }
- }
-
- if (socket != null) {
- log.info("Connecting sink {}", remoteAddress);
- socket.connect(new InetSocketAddress(remoteAddress, neighbor.getPort()), 30000);
- if (!socket.isClosed() && socket.isConnected()) {
- OutputStream out = socket.getOutputStream();
- log.info("----- NETWORK INFO ----- Sink {} is connected", remoteAddress);
-
- // Let neighbor know our tcp listener port
- String fmt = "%0"+String.valueOf(ReplicatorSinkPool.PORT_BYTES)+"d";
- byte [] portAsByteArray = new byte [10];
- System.arraycopy(String.format(fmt, port).getBytes(), 0,
- portAsByteArray, 0, ReplicatorSinkPool.PORT_BYTES);
- out.write(portAsByteArray);
-
- while (!replicatorSinkPool.shutdown && !neighbor.isStopped()) {
- try {
- ByteBuffer message = neighbor.getNextMessage();
- if (neighbor.getSink() != null) {
- if (neighbor.getSink().isClosed() || !neighbor.getSink().isConnected()) {
- log.info("----- NETWORK INFO ----- Sink {} got disconnected", remoteAddress);
- return;
- } else {
- if ((message != null) && (neighbor.getSink() != null && neighbor.getSink().isConnected())
- && (neighbor.getSource() != null && neighbor.getSource().isConnected())) {
-
- byte[] bytes = message.array();
-
- if (bytes.length == transactionPacketSize) {
- try {
- CRC32 crc32 = new CRC32();
- crc32.update(message.array());
- String crc32String = Long.toHexString(crc32.getValue());
- while (crc32String.length() < CRC32_BYTES) {
- crc32String = "0"+crc32String;
- }
- out.write(message.array());
- out.write(crc32String.getBytes());
- out.flush();
- neighbor.incSentTransactions();
- } catch (IOException e2) {
- if (!neighbor.getSink().isClosed() && neighbor.getSink().isConnected()) {
- out.close();
- out = neighbor.getSink().getOutputStream();
- } else {
- log.info("----- NETWORK INFO ----- Sink {} thread terminating",
- remoteAddress);
- return;
- }
- }
- }
- }
- }
- }
- } catch (InterruptedException e) {
- log.error("Interrupted while waiting for send buffer");
- }
- }
- }
- }
- } catch (Exception e) {
- String reason = e.getMessage();
- if (reason == null || reason.equals("null")) {
- reason = "closed";
- }
- log.error("***** NETWORK ALERT ***** No sink to apiHost {}:{}, reason: {}", remoteAddress, neighbor.getPort(),
- reason);
- synchronized (neighbor) {
- Socket sourceSocket = neighbor.getSource();
- if (sourceSocket != null && (sourceSocket.isClosed() || !sourceSocket.isConnected())) {
- neighbor.setSource(null);
- }
- neighbor.setSink(null);
- }
- }
-
- }
-
-}
diff --git a/src/main/java/com/iota/iri/network/replicator/ReplicatorSourcePool.java b/src/main/java/com/iota/iri/network/replicator/ReplicatorSourcePool.java
deleted file mode 100644
index f067402916..0000000000
--- a/src/main/java/com/iota/iri/network/replicator/ReplicatorSourcePool.java
+++ /dev/null
@@ -1,82 +0,0 @@
-package com.iota.iri.network.replicator;
-
-import java.io.IOException;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import com.iota.iri.network.Node;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ReplicatorSourcePool implements Runnable {
-
- private final ReplicatorSinkPool replicatorSinkPool;
- private final Node node;
- private final int maxPeers;
- private final boolean testnet;
- private volatile boolean shutdown = false;
-
- private static final Logger log = LoggerFactory.getLogger(ReplicatorSourcePool.class);
- private ExecutorService pool;
- private int port;
-
- public ReplicatorSourcePool(final ReplicatorSinkPool replicatorSinkPool,
- final Node node,
- final int maxPeers,
- final boolean testnet) {
- this.replicatorSinkPool = replicatorSinkPool;
- this.node = node;
- this.maxPeers = maxPeers;
- this.testnet = testnet;
- }
-
- @Override
- public void run() {
- ExecutorService pool;
- ServerSocket server = null;
- pool = Executors.newFixedThreadPool(Replicator.NUM_THREADS);
- this.pool = pool;
- try {
- server = new ServerSocket(port);
- log.info("TCP replicator is accepting connections on tcp port " + server.getLocalPort());
- while (!shutdown) {
- try {
- Socket request = server.accept();
- request.setSoLinger(true, 0);
- Runnable proc = new ReplicatorSourceProcessor( replicatorSinkPool, request, node, maxPeers, testnet);
- pool.submit(proc);
- } catch (IOException ex) {
- log.error("Error accepting connection", ex);
- }
- }
- log.info("ReplicatorSinkPool shutting down");
- } catch (IOException e) {
- log.error("***** NETWORK ALERT ***** Cannot create server socket on port {}, {}", port, e.getMessage());
- } finally {
- if (server != null) {
- try {
- server.close();
- }
- catch (Exception e) {
- // don't care.
- }
- }
- }
- }
-
- public void shutdown() throws InterruptedException {
- shutdown = true;
- //notify();
- pool.shutdown();
- pool.awaitTermination(6, TimeUnit.SECONDS);
- }
-
- public ReplicatorSourcePool init(int port) {
- this.port = port;
- return this;
- }
-
-}
diff --git a/src/main/java/com/iota/iri/network/replicator/ReplicatorSourceProcessor.java b/src/main/java/com/iota/iri/network/replicator/ReplicatorSourceProcessor.java
deleted file mode 100644
index 64053da736..0000000000
--- a/src/main/java/com/iota/iri/network/replicator/ReplicatorSourceProcessor.java
+++ /dev/null
@@ -1,197 +0,0 @@
-package com.iota.iri.network.replicator;
-
-import com.iota.iri.conf.MainnetConfig;
-import com.iota.iri.conf.TestnetConfig;
-import com.iota.iri.network.Neighbor;
-import com.iota.iri.network.Node;
-import com.iota.iri.network.TCPNeighbor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketAddress;
-import java.util.List;
-import java.util.zip.CRC32;
-
-class ReplicatorSourceProcessor implements Runnable {
-
- private static final Logger log = LoggerFactory.getLogger(ReplicatorSourceProcessor.class);
-
- private final Socket connection;
-
- private final boolean shutdown = false;
- private final Node node;
- private final int maxPeers;
- private final boolean testnet;
- private final ReplicatorSinkPool replicatorSinkPool;
- private final int packetSize;
-
- private boolean existingNeighbor;
-
- private TCPNeighbor neighbor;
-
- public ReplicatorSourceProcessor(final ReplicatorSinkPool replicatorSinkPool,
- final Socket connection,
- final Node node,
- final int maxPeers,
- final boolean testnet) {
- this.connection = connection;
- this.node = node;
- this.maxPeers = maxPeers;
- this.testnet = testnet;
- this.replicatorSinkPool = replicatorSinkPool;
- this.packetSize = testnet
- ? TestnetConfig.Defaults.PACKET_SIZE
- : MainnetConfig.Defaults.PACKET_SIZE;
- }
-
- @Override
- public void run() {
- int count;
- byte[] data = new byte[2000];
- int offset = 0;
- //boolean isNew;
- boolean finallyClose = true;
-
- try {
- SocketAddress address = connection.getRemoteSocketAddress();
- InetSocketAddress inetSocketAddress = (InetSocketAddress) address;
-
- existingNeighbor = false;
- List neighbors = node.getNeighbors();
- neighbors.stream().filter(n -> n instanceof TCPNeighbor)
- .map(n -> ((TCPNeighbor) n))
- .forEach(n -> {
- String hisAddress = inetSocketAddress.getAddress().getHostAddress();
- if (n.getHostAddress().equals(hisAddress)) {
- existingNeighbor = true;
- neighbor = n;
- }
- });
-
- if (!existingNeighbor) {
- int maxPeersAllowed = maxPeers;
- if (!testnet || Neighbor.getNumPeers() >= maxPeersAllowed) {
- String hostAndPort = inetSocketAddress.getHostName() + ":" + String.valueOf(inetSocketAddress.getPort());
- if (Node.rejectedAddresses.add(inetSocketAddress.getHostName())) {
- String sb = "***** NETWORK ALERT ***** Got connected from unknown neighbor tcp://"
- + hostAndPort
- + " (" + inetSocketAddress.getAddress().getHostAddress() + ") - closing connection";
- if (testnet && Neighbor.getNumPeers() >= maxPeersAllowed) {
- sb = sb + (" (max-peers allowed is "+String.valueOf(maxPeersAllowed)+")");
- }
- log.info(sb);
- }
- connection.getInputStream().close();
- connection.shutdownInput();
- connection.shutdownOutput();
- connection.close();
- return;
- } else {
- final TCPNeighbor freshNeighbor = new TCPNeighbor(inetSocketAddress, false);
- node.getNeighbors().add(freshNeighbor);
- neighbor = freshNeighbor;
- Neighbor.incNumPeers();
- }
- }
-
- if ( neighbor.getSource() != null ) {
- log.info("Source {} already connected", inetSocketAddress.getAddress().getHostAddress());
- finallyClose = false;
- return;
- }
- neighbor.setSource(connection);
-
- // Read neighbors tcp listener port number.
- InputStream stream = connection.getInputStream();
- offset = 0;
- while (((count = stream.read(data, offset, ReplicatorSinkPool.PORT_BYTES - offset)) != -1) && (offset < ReplicatorSinkPool.PORT_BYTES)) {
- offset += count;
- }
-
- if ( count == -1 || connection.isClosed() ) {
- log.error("Did not receive neighbors listener port");
- return;
- }
-
- byte [] pbytes = new byte [10];
- System.arraycopy(data, 0, pbytes, 0, ReplicatorSinkPool.PORT_BYTES);
- neighbor.setTcpPort((int)Long.parseLong(new String(pbytes)));
-
- if (neighbor.getSink() == null) {
- log.info("Creating sink for {}", neighbor.getHostAddress());
- replicatorSinkPool.createSink(neighbor);
- }
-
- if (connection.isConnected()) {
- log.info("----- NETWORK INFO ----- Source {} is connected", inetSocketAddress.getAddress().getHostAddress());
- }
-
- connection.setSoTimeout(0); // infinite timeout - blocking read
-
- offset = 0;
- while (!shutdown && !neighbor.isStopped()) {
-
- while ( ((count = stream.read(data, offset, (packetSize- offset + ReplicatorSinkProcessor.CRC32_BYTES))) != -1)
- && (offset < (packetSize + ReplicatorSinkProcessor.CRC32_BYTES))) {
- offset += count;
- }
-
- if ( count == -1 || connection.isClosed() ) {
- break;
- }
-
- offset = 0;
-
- try {
- CRC32 crc32 = new CRC32();
- for (int i=0; i
- * The API makes it possible to interact with the node by requesting information or actions to be taken.
- * You can interact with it by passing a JSON object which at least contains a command.
- * Upon successful execution of the command, the API returns your requested information in an {@link AbstractResponse}.
+ * The API makes it possible to interact with the node by requesting information or actions to be taken.
+ * You can interact with it by passing a JSON object which at least contains a command.
+ * Upon successful execution of the command, the API returns your requested information in an {@link AbstractResponse}.
*
*
- * If the request is invalid, an {@link ErrorResponse} is returned.
- * This, for example, happens when the command does not exist or there is no command section at all.
- * If there is an error in the given data during the execution of a command, an {@link ErrorResponse} is also sent.
+ * If the request is invalid, an {@link ErrorResponse} is returned.
+ * This, for example, happens when the command does not exist or there is no command section at all.
+ * If there is an error in the given data during the execution of a command, an {@link ErrorResponse} is also sent.
*
*
- * If an Exception is thrown during the execution of a command, an {@link ExceptionResponse} is returned.
+ * If an Exception is thrown during the execution of a command, an {@link ExceptionResponse} is returned.
*
*/
@SuppressWarnings("unchecked")
@@ -106,7 +103,8 @@ public class API {
private final BundleValidator bundleValidator;
private final SnapshotProvider snapshotProvider;
private final LedgerService ledgerService;
- private final Node node;
+ private final NeighborRouter neighborRouter;
+ private final TransactionProcessingPipeline txPipeline;
private final TipSelector tipsSelector;
private final TipsViewModel tipsViewModel;
private final TransactionValidator transactionValidator;
@@ -129,8 +127,7 @@ public class API {
//Package Private For Testing
final Map, AbstractResponse>> commandRoute;
-
-
+
private RestConnector connector;
private final ExecutorService tipSelExecService = Executors.newSingleThreadExecutor(r -> new Thread(r, "tip-selection"));
@@ -138,7 +135,7 @@ public class API {
/**
* Starts loading the IOTA API, parameters do not have to be initialized.
*
- * @param configuration
+ * @param configuration Holds IRI configuration parameters.
* @param ixi If a command is not in the standard API,
* we try to process it as a Nashorn JavaScript module through {@link IXI}
* @param transactionRequester Service where transactions get requested
@@ -147,7 +144,7 @@ public class API {
* @param bundleValidator Validates bundles
* @param snapshotProvider Manager of our currently taken snapshots
* @param ledgerService contains all the relevant business logic for modifying and calculating the ledger state.
- * @param node Handles and manages neighbors
+ * @param neighborRouter Handles and manages neighbors
* @param tipsSelector Handles logic for selecting tips based on other transactions
* @param tipsViewModel Contains the current tips of this node
* @param transactionValidator Validates transactions
@@ -155,9 +152,9 @@ public class API {
*/
public API(IotaConfig configuration, IXI ixi, TransactionRequester transactionRequester,
SpentAddressesService spentAddressesService, Tangle tangle, BundleValidator bundleValidator,
- SnapshotProvider snapshotProvider, LedgerService ledgerService, Node node, TipSelector tipsSelector,
+ SnapshotProvider snapshotProvider, LedgerService ledgerService, NeighborRouter neighborRouter, TipSelector tipsSelector,
TipsViewModel tipsViewModel, TransactionValidator transactionValidator,
- LatestMilestoneTracker latestMilestoneTracker) {
+ LatestMilestoneTracker latestMilestoneTracker, TransactionProcessingPipeline txPipeline) {
this.configuration = configuration;
this.ixi = ixi;
@@ -167,7 +164,8 @@ public API(IotaConfig configuration, IXI ixi, TransactionRequester transactionRe
this.bundleValidator = bundleValidator;
this.snapshotProvider = snapshotProvider;
this.ledgerService = ledgerService;
- this.node = node;
+ this.neighborRouter = neighborRouter;
+ this.txPipeline = txPipeline;
this.tipsSelector = tipsSelector;
this.tipsViewModel = tipsViewModel;
this.transactionValidator = transactionValidator;
@@ -216,25 +214,25 @@ public void init(RestConnector connector){
* Handles an API request body.
* Its returned {@link AbstractResponse} is created using the following logic
*
- * -
- * {@link ExceptionResponse} if the body cannot be parsed.
- *
- * -
- * {@link ErrorResponse} if the body does not contain a 'command' section.
- *
- * -
- * {@link AccessLimitedResponse} if the command is not allowed on this node.
- *
- * -
- * {@link ErrorResponse} if the command contains invalid parameters.
- *
- * -
- * {@link ExceptionResponse} if we encountered an unexpected exception during command processing.
- *
- * -
- * {@link AbstractResponse} when the command is successfully processed.
- * The response class depends on the command executed.
- *
+ * -
+ * {@link ExceptionResponse} if the body cannot be parsed.
+ *
+ * -
+ * {@link ErrorResponse} if the body does not contain a 'command' section.
+ *
+ * -
+ * {@link AccessLimitedResponse} if the command is not allowed on this node.
+ *
+ * -
+ * {@link ErrorResponse} if the command contains invalid parameters.
+ *
+ * -
+ * {@link ExceptionResponse} if we encountered an unexpected exception during command processing.
+ *
+ * -
+ * {@link AbstractResponse} when the command is successfully processed.
+ * The response class depends on the command executed.
+ *
*
*
* @param requestString The JSON encoded data of the request.
@@ -242,7 +240,7 @@ public void init(RestConnector connector){
* @param sourceAddress The address from the sender of this API request.
* @return The result of this request.
* @throws UnsupportedEncodingException If the requestString cannot be parsed into a Map.
- Currently caught and turned into a {@link ExceptionResponse}.
+ * Currently caught and turned into a {@link ExceptionResponse}.
*/
private AbstractResponse process(final String requestString, InetAddress netAddress){
try {
@@ -250,8 +248,7 @@ private AbstractResponse process(final String requestString, InetAddress netAddr
Map request;
try {
request = gson.fromJson(requestString, Map.class);
- }
- catch(JsonSyntaxException jsonSyntaxException) {
+ } catch (JsonSyntaxException jsonSyntaxException) {
return ErrorResponse.create("Invalid JSON syntax: " + jsonSyntaxException.getMessage());
}
if (request == null) {
@@ -365,7 +362,7 @@ private Hash findTail(Hash hash) throws Exception {
* The transaction is not missing a reference transaction
* Tails of tails are valid
*
- *
+ *
* If a transaction does not exist, or it is not a tail, an {@link ErrorResponse} is returned.
*
* @param transactionsList Transactions you want to check the consistency for
@@ -437,7 +434,7 @@ public boolean invalidSubtangleStatus() {
**/
@Document(name="getNeighbors")
private AbstractResponse getNeighborsStatement() {
- return GetNeighborsResponse.create(node.getNeighbors());
+ return GetNeighborsResponse.create(neighborRouter.getNeighbors());
}
/**
@@ -445,9 +442,9 @@ private AbstractResponse getNeighborsStatement() {
* The added neighbors will not be available after restart.
* Add the neighbors to your config file
* or supply them in the -n command line option if you want to add them permanently.
- *
+ *
* The URI (Unique Resource Identification) for adding neighbors is:
- * udp://IPADDRESS:PORT
+ * tcp://IPADDRESS:PORT
*
* @param uris list of neighbors to add
* @return {@link com.iota.iri.service.dto.AddedNeighborsResponse}
@@ -455,17 +452,21 @@ private AbstractResponse getNeighborsStatement() {
@Document(name="addNeighbors")
private AbstractResponse addNeighborsStatement(List uris) {
int numberOfAddedNeighbors = 0;
- try {
- for (final String uriString : uris) {
- log.info("Adding neighbor: " + uriString);
- final Neighbor neighbor = node.newNeighbor(new URI(uriString), true);
- if (!node.getNeighbors().contains(neighbor)) {
- node.getNeighbors().add(neighbor);
- numberOfAddedNeighbors++;
- }
- }
- } catch (URISyntaxException|RuntimeException e) {
- return ErrorResponse.create("Invalid uri scheme: " + e.getLocalizedMessage());
+ for (final String uriStr : uris) {
+ switch (neighborRouter.addNeighbor(uriStr)) {
+ case OK:
+ log.info("Added neighbor: {}", uriStr);
+ numberOfAddedNeighbors++;
+ break;
+ case URI_INVALID:
+ log.info("Can't add neighbor {}: URI is invalid", uriStr);
+ break;
+ case SLOTS_FILLED:
+ log.info("Can't add neighbor {}: no more slots available for new neighbors", uriStr);
+ break;
+ default:
+ // do nothing
+ }
}
return AddedNeighborsResponse.create(numberOfAddedNeighbors);
}
@@ -486,21 +487,31 @@ private AbstractResponse addNeighborsStatement(List uris) {
@Document(name="removeNeighbors")
private AbstractResponse removeNeighborsStatement(List uris) {
int numberOfRemovedNeighbors = 0;
- try {
- for (final String uriString : uris) {
- log.info("Removing neighbor: " + uriString);
- if (node.removeNeighbor(new URI(uriString),true)) {
+ for (final String uriString : uris) {
+ log.info("Removing neighbor: " + uriString);
+ switch(neighborRouter.removeNeighbor(uriString)){
+ case OK:
+ log.info("Removed neighbor: {}", uriString);
numberOfRemovedNeighbors++;
- }
+ case URI_INVALID:
+ log.info("Can't remove neighbor {}: URI is invalid", uriString);
+ break;
+ case UNRESOLVED_DOMAIN:
+ log.info("Can't remove neighbor {}: domain couldn't be resolved to its IP address", uriString);
+ break;
+ case UNKNOWN_NEIGHBOR:
+ log.info("Can't remove neighbor {}: neighbor is unknown", uriString);
+ break;
+ default:
+ // do nothing
}
- } catch (URISyntaxException|RuntimeException e) {
- return ErrorResponse.create("Invalid uri scheme: " + e.getLocalizedMessage());
+
}
return RemoveNeighborsResponse.create(numberOfRemovedNeighbors);
}
/**
- * raw transaction data (trytes) of a specific transaction.
+ * Returns raw transaction data (trytes) of a specific transaction.
* These trytes can then be converted into the actual transaction object.
* See utility and {@link Transaction} functions in an IOTA library for more details.
*
@@ -589,7 +600,7 @@ private synchronized AbstractResponse getTransactionsToApproveStatement(int dept
* Gets tips which can be used by new transactions to approve.
* If debug is enabled, statistics on tip selection will be gathered.
*
- * @param depth The milestone depth for finding the transactions to approve.
+ * @param depth The milestone depth for finding the transactions to approve.
* @param reference An optional transaction hash to be referenced by tips.
* @return The tips which can be approved.
* @throws Exception if the subtangle is out of date or if we fail to retrieve transaction tips.
@@ -622,7 +633,7 @@ List getTransactionToApproveTips(int depth, Optional reference) thro
*
* Handles statistics on tip selection.
* Increases the tip selection by one use.
- *
+ *
*
* If the {@link #getCounterGetTxToApprove()} is a power of 100, a log is send and counters are reset.
*
@@ -664,26 +675,16 @@ private synchronized AbstractResponse getTipsStatement() throws Exception {
**/
@Document(name="storeTransactions")
public AbstractResponse storeTransactionsStatement(List trytes) throws Exception {
- final List elements = new LinkedList<>();
- byte[] txTrits = Converter.allocateTritsForTrytes(TRYTES_SIZE);
- for (final String trytesPart : trytes) {
- //validate all trytes
- Converter.trits(trytesPart, txTrits, 0);
- final TransactionViewModel transactionViewModel = transactionValidator.validateTrits(txTrits,
- transactionValidator.getMinWeightMagnitude());
- elements.add(transactionViewModel);
- }
-
+ final List elements = convertTrytes(trytes);
for (final TransactionViewModel transactionViewModel : elements) {
//store transactions
if(transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot())) {
- transactionViewModel.setArrivalTime(System.currentTimeMillis() / 1000L);
+ transactionViewModel.setArrivalTime(System.currentTimeMillis());
transactionValidator.updateStatus(transactionViewModel);
transactionViewModel.updateSender("local");
transactionViewModel.update(tangle, snapshotProvider.getInitialSnapshot(), "sender");
}
}
-
return AbstractResponse.createEmptyResponse();
}
@@ -715,7 +716,7 @@ private AbstractResponse getNodeInfoStatement() throws Exception{
Runtime.getRuntime().availableProcessors(),
Runtime.getRuntime().freeMemory(),
System.getProperty("java.version"),
-
+
Runtime.getRuntime().maxMemory(),
Runtime.getRuntime().totalMemory(),
latestMilestoneTracker.getLatestMilestoneHash(),
@@ -727,8 +728,8 @@ private AbstractResponse getNodeInfoStatement() throws Exception{
milestone != null ? milestone.index() : -1,
snapshotProvider.getLatestSnapshot().getInitialIndex(),
- node.howManyNeighbors(),
- node.queuedTransactionsSize(),
+ neighborRouter.getConnectedNeighbors().size(),
+ txPipeline.getBroadcastStageQueue().size(),
System.currentTimeMillis(),
tipsViewModel.size(),
transactionRequester.numberOfTransactionsToRequest(),
@@ -737,7 +738,7 @@ private AbstractResponse getNodeInfoStatement() throws Exception{
}
/**
- * Returns information about this node configuration.
+ * Returns information about this node configuration.
*
* @return {@link GetNodeAPIConfigurationResponse}
*/
@@ -789,14 +790,14 @@ private AbstractResponse getInclusionStatesStatement(
}
// Finds the lowest tips index, or 0
- int minTipsIndex = tipsIndex.stream().reduce((a,b) -> a < b ? a : b).orElse(0);
+ int minTipsIndex = tipsIndex.stream().reduce((a, b) -> a < b ? a : b).orElse(0);
// If the lowest tips index (minTipsIndex) is 0 (or lower),
// we can't check transactions against snapshots because there were no tips,
// or tips have not been confirmed by a snapshot yet
- if(minTipsIndex > 0) {
+ if (minTipsIndex > 0) {
// Finds the highest tips index, or 0
- int maxTipsIndex = tipsIndex.stream().reduce((a,b) -> a > b ? a : b).orElse(0);
+ int maxTipsIndex = tipsIndex.stream().reduce((a, b) -> a > b ? a : b).orElse(0);
int count = 0;
// Checks transactions with indexes of tips, and sets inclusionStates byte to 1 or -1 accordingly
@@ -808,9 +809,9 @@ private AbstractResponse getInclusionStatesStatement(
TransactionViewModel transaction = TransactionViewModel.fromHash(tangle, hash);
if(transaction.getType() == TransactionViewModel.PREFILLED_SLOT || transaction.snapshotIndex() == 0) {
inclusionStates[count] = -1;
- } else if(transaction.snapshotIndex() > maxTipsIndex) {
+ } else if (transaction.snapshotIndex() > maxTipsIndex) {
inclusionStates[count] = -1;
- } else if(transaction.snapshotIndex() < maxTipsIndex) {
+ } else if (transaction.snapshotIndex() < maxTipsIndex) {
inclusionStates[count] = 1;
}
count++;
@@ -844,20 +845,20 @@ private AbstractResponse getInclusionStatesStatement(
// Loop over all snapshot indexes of transactions that were not confirmed.
// If we encounter an invalid tangle, stop this function completely.
- for(Integer index : sameIndexTransactionCount.keySet()) {
+ for (Integer index : sameIndexTransactionCount.keySet()) {
// Get the tips from the snapshot indexes we are missing
Queue sameIndexTip = sameIndexTips.get(index);
// We have tips on the same level as transactions, do a manual search.
if (sameIndexTip != null && !exhaustiveSearchWithinIndex(
- sameIndexTip, analyzedTips, trans,
- inclusionStates, sameIndexTransactionCount.get(index), index)) {
+ sameIndexTip, analyzedTips, trans,
+ inclusionStates, sameIndexTransactionCount.get(index), index)) {
return ErrorResponse.create(INVALID_SUBTANGLE);
}
}
final boolean[] inclusionStatesBoolean = new boolean[inclusionStates.length];
- for(int i = 0; i < inclusionStates.length; i++) {
+ for (int i = 0; i < inclusionStates.length; i++) {
// If a state is 0 by now, we know nothing so assume not included
inclusionStatesBoolean[i] = inclusionStates[i] == 1;
}
@@ -871,24 +872,24 @@ private AbstractResponse getInclusionStatesStatement(
* Traverses down the tips until all transactions we wish to validate have been found or transaction data is missing.
*
* @param nonAnalyzedTransactions Tips we will analyze.
- * @param analyzedTips The hashes of tips we have analyzed.
- * Hashes specified here won't be analyzed again.
- * @param transactions All transactions we are validating.
- * @param inclusionStates The state of each transaction.
- * 1 means confirmed, -1 means unconfirmed, 0 is unknown confirmation.
- * Should be of equal length as transactions.
- * @param count The amount of transactions on the same index level as nonAnalyzedTransactions.
- * @param index The snapshot index of the tips in nonAnalyzedTransactions.
+ * @param analyzedTips The hashes of tips we have analyzed.
+ * Hashes specified here won't be analyzed again.
+ * @param transactions All transactions we are validating.
+ * @param inclusionStates The state of each transaction.
+ * 1 means confirmed, -1 means unconfirmed, 0 is unknown confirmation.
+ * Should be of equal length as transactions.
+ * @param count The amount of transactions on the same index level as nonAnalyzedTransactions.
+ * @param index The snapshot index of the tips in nonAnalyzedTransactions.
* @return true if all transactions are directly or indirectly references by
- * nonAnalyzedTransactions.
- * If at some point we are missing transaction data false is returned immediately.
+ * nonAnalyzedTransactions.
+ * If at some point we are missing transaction data false is returned immediately.
* @throws Exception If a {@link TransactionViewModel} cannot be loaded.
*/
private boolean exhaustiveSearchWithinIndex(
- Queue nonAnalyzedTransactions,
- Set analyzedTips,
- List transactions,
- byte[] inclusionStates, int count, int index) throws Exception {
+ Queue nonAnalyzedTransactions,
+ Set analyzedTips,
+ List transactions,
+ byte[] inclusionStates, int count, int index) throws Exception {
Hash pointer;
MAIN_LOOP:
@@ -948,12 +949,12 @@ private boolean exhaustiveSearchWithinIndex(
@Document(name="findTransactions")
private synchronized AbstractResponse findTransactionsStatement(final Map request) throws Exception {
- final Set foundTransactions = new HashSet<>();
+ final Set foundTransactions = new HashSet<>();
boolean containsKey = false;
final Set bundlesTransactions = new HashSet<>();
if (request.containsKey("bundles")) {
- final Set bundles = getParameterAsSet(request,"bundles",HASH_SIZE);
+ final Set bundles = getParameterAsSet(request, "bundles", HASH_SIZE);
for (final String bundle : bundles) {
bundlesTransactions.addAll(
BundleViewModel.load(tangle, HashFactory.BUNDLE.create(bundle))
@@ -965,7 +966,7 @@ private synchronized AbstractResponse findTransactionsStatement(final Map addressesTransactions = new HashSet<>();
if (request.containsKey("addresses")) {
- final Set addresses = getParameterAsSet(request,"addresses",HASH_SIZE);
+ final Set addresses = getParameterAsSet(request, "addresses", HASH_SIZE);
for (final String address : addresses) {
addressesTransactions.addAll(
AddressViewModel.load(tangle, HashFactory.ADDRESS.create(address))
@@ -977,7 +978,7 @@ private synchronized AbstractResponse findTransactionsStatement(final Map tagsTransactions = new HashSet<>();
if (request.containsKey("tags")) {
- final Set tags = getParameterAsSet(request,"tags",0);
+ final Set tags = getParameterAsSet(request, "tags", 0);
for (String tag : tags) {
tag = padTag(tag);
tagsTransactions.addAll(
@@ -999,7 +1000,7 @@ private synchronized AbstractResponse findTransactionsStatement(final Map approveeTransactions = new HashSet<>();
if (request.containsKey("approvees")) {
- final Set approvees = getParameterAsSet(request,"approvees",HASH_SIZE);
+ final Set approvees = getParameterAsSet(request, "approvees", HASH_SIZE);
for (final String approvee : approvees) {
approveeTransactions.addAll(
TransactionViewModel.fromHash(tangle, HashFactory.TRANSACTION.create(approvee))
@@ -1058,9 +1059,9 @@ private String padTag(String tag) throws ValidationException {
/**
* Runs {@link #getParameterAsList(Map, String, int)} and transforms it into a {@link Set}.
*
- * @param request All request parameters.
+ * @param request All request parameters.
* @param paramName The name of the parameter we want to turn into a list of Strings.
- * @param size the length each String must have.
+ * @param size the length each String must have.
* @return the list of valid Tryte Strings.
* @throws ValidationException If the requested parameter does not exist or
* the string is not exactly trytes of size length or
@@ -1070,7 +1071,7 @@ private Set getParameterAsSet(
Map request,
String paramName, int size) throws ValidationException {
- HashSet result = getParameterAsList(request,paramName,size)
+ HashSet result = getParameterAsList(request, paramName, size)
.stream()
.collect(Collectors.toCollection(HashSet::new));
@@ -1090,20 +1091,10 @@ private Set getParameterAsSet(
**/
@Document(name="broadcastTransactions")
public AbstractResponse broadcastTransactionsStatement(List trytes) {
- final List elements = new LinkedList<>();
- byte[] txTrits = Converter.allocateTritsForTrytes(TRYTES_SIZE);
for (final String tryte : trytes) {
- //validate all trytes
+ byte[] txTrits = Converter.allocateTritsForTrytes(TRYTES_SIZE);
Converter.trits(tryte, txTrits, 0);
- final TransactionViewModel transactionViewModel = transactionValidator.validateTrits(
- txTrits, transactionValidator.getMinWeightMagnitude());
-
- elements.add(transactionViewModel);
- }
- for (final TransactionViewModel transactionViewModel : elements) {
- //push first in line to broadcast
- transactionViewModel.weightMagnitude = Curl.HASH_LENGTH;
- node.broadcast(transactionViewModel);
+ txPipeline.process(txTrits);
}
return AbstractResponse.createEmptyResponse();
}
@@ -1127,7 +1118,7 @@ public AbstractResponse broadcastTransactionsStatement(List trytes) {
**/
@Document(name="getBalances")
private AbstractResponse getBalancesStatement(List addresses,
- List tips,
+ List tips,
int threshold) throws Exception {
if (threshold <= 0 || threshold > 100) {
@@ -1195,7 +1186,7 @@ private AbstractResponse getBalancesStatement(List addresses,
* Each increase indicates another 2 tips sent.
*
* @return The current amount of times this node has done proof of work.
- * Doesn't distinguish between remote and local proof of work.
+ * Doesn't distinguish between remote and local proof of work.
*/
public static int getCounterPoW() {
return counter_PoW;
@@ -1212,7 +1203,7 @@ public static void incCounterPoW() {
* Can be 0 or more, and is set to 0 every 100 requests.
*
* @return The current amount of time spent on doing proof of work in milliseconds.
- * Doesn't distinguish between remote and local proof of work.
+ * Doesn't distinguish between remote and local proof of work.
*/
public static long getEllapsedTimePoW() {
return ellapsedTime_PoW;
@@ -1285,13 +1276,13 @@ public synchronized List attachToTangleStatement(Hash trunkTransaction,
//attachment fields: tag and timestamps
//tag - copy the obsolete tag to the attachment tag field only if tag isn't set.
- if(IntStream.range(TransactionViewModel.TAG_TRINARY_OFFSET,
- TransactionViewModel.TAG_TRINARY_OFFSET + TransactionViewModel.TAG_TRINARY_SIZE)
- .allMatch(idx -> transactionTrits[idx] == ((byte) 0))) {
+ if (IntStream.range(TransactionViewModel.TAG_TRINARY_OFFSET,
+ TransactionViewModel.TAG_TRINARY_OFFSET + TransactionViewModel.TAG_TRINARY_SIZE)
+ .allMatch(idx -> transactionTrits[idx] == ((byte) 0))) {
System.arraycopy(transactionTrits, TransactionViewModel.OBSOLETE_TAG_TRINARY_OFFSET,
- transactionTrits, TransactionViewModel.TAG_TRINARY_OFFSET,
- TransactionViewModel.TAG_TRINARY_SIZE);
+ transactionTrits, TransactionViewModel.TAG_TRINARY_OFFSET,
+ TransactionViewModel.TAG_TRINARY_SIZE);
}
Converter.copyTrits(timestamp, transactionTrits,
@@ -1317,10 +1308,10 @@ public synchronized List attachToTangleStatement(Hash trunkTransaction,
} finally {
API.incEllapsedTimePoW(System.nanoTime() - startTime);
API.incCounterPoW();
- if ( ( API.getCounterPoW() % 100) == 0 ) {
+ if ((API.getCounterPoW() % 100) == 0) {
String sb = "Last 100 PoW consumed "
- + API.getEllapsedTimePoW() / 1000000000L
- + " seconds processing time.";
+ + API.getEllapsedTimePoW() / 1000000000L
+ + " seconds processing time.";
log.info(sb);
counter_PoW = 0;
ellapsedTime_PoW = 0L;
@@ -1338,7 +1329,7 @@ public synchronized List attachToTangleStatement(Hash trunkTransaction,
/**
* Transforms an object parameter into an int.
*
- * @param request A map of all request parameters
+ * @param request A map of all request parameters
* @param paramName The parameter we want to get as an int.
* @return The integer value of this parameter
* @throws ValidationException If the requested parameter does not exist or cannot be transformed into an int.
@@ -1357,9 +1348,9 @@ private int getParameterAsInt(Map request, String paramName) thr
/**
* Transforms an object parameter into a String.
*
- * @param request A map of all request parameters
+ * @param request A map of all request parameters
* @param paramName The parameter we want to get as a String.
- * @param size The expected length of this String
+ * @param size The expected length of this String
* @return The String value of this parameter
* @throws ValidationException If the requested parameter does not exist or
* the string is not exactly trytes of size length
@@ -1376,19 +1367,20 @@ private String getParameterAsStringAndValidate(Map request, Stri
* Trytes are Strings containing only A-Z and the number 9.
*
* @param paramName The name of the parameter this String came from.
- * @param size The amount of trytes it should contain.
- * @param result The String we validate.
+ * @param size The amount of trytes it should contain.
+ * @param result The String we validate.
* @throws ValidationException If the string is not exactly trytes of size length
*/
private void validateTrytes(String paramName, int size, String result) throws ValidationException {
- if (!validTrytes(result,size,ZERO_LENGTH_NOT_ALLOWED)) {
+ if (!validTrytes(result, size, ZERO_LENGTH_NOT_ALLOWED)) {
throw new ValidationException("Invalid " + paramName + " input");
}
}
/**
* Checks if a parameter exists in the map
- * @param request All request parameters
+ *
+ * @param request All request parameters
* @param paramName The name of the parameter we are looking for
* @throws ValidationException if request does not contain paramName
*/
@@ -1403,9 +1395,9 @@ private void validateParamExists(Map request, String paramName)
* We then validate if the amount of elements does not exceed the maximum allowed.
* Afterwards we verify if each element is valid according to {@link #validateTrytes(String, int, String)}.
*
- * @param request All request parameters
+ * @param request All request parameters
* @param paramName The name of the parameter we want to turn into a list of Strings
- * @param size the length each String must have
+ * @param size the length each String must have
* @return the list of valid Tryte Strings.
* @throws ValidationException If the requested parameter does not exist or
* the string is not exactly trytes of size length or
@@ -1433,8 +1425,8 @@ private List getParameterAsList(Map request, String para
* Checks if a string is of a certain length, and contains exactly size amount of trytes.
* Trytes are Strings containing only A-Z and the number 9.
*
- * @param trytes The String we validate.
- * @param length The amount of trytes it should contain.
+ * @param trytes The String we validate.
+ * @param length The amount of trytes it should contain.
* @param zeroAllowed If set to '{@value #ZERO_LENGTH_ALLOWED}', an empty string is also valid.
* @return true if the string is valid, otherwise false
*/
@@ -1460,8 +1452,7 @@ public void shutDown() {
}
}
- /**
- *
+ /**
* Only available on testnet.
* Creates, attaches, stores, and broadcasts a transaction with this message
*
@@ -1538,6 +1529,7 @@ private synchronized AbstractResponse storeMessageStatement(String address, Stri
broadcastTransactionsStatement(powResult);
return AbstractResponse.createEmptyResponse();
}
+
//
// FUNCTIONAL COMMAND ROUTES
@@ -1722,4 +1714,18 @@ private Function