diff --git a/.travis.yml b/.travis.yml index 057c21907e..f329aec4b7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,10 +29,10 @@ script: - git checkout -f $TRAVIS_BRANCH - git checkout $build_head - git merge $TRAVIS_BRANCH - - mvn integration-test -Dlogging-level=INFO - #run jar sanity tests - VERSION=$(mvn help:evaluate -Dexpression=project.version | grep -E '^[0-9.]+') - echo $VERSION + #run jar sanity tests + - mvn integration-test -Dlogging-level=INFO #jacoco prep (appends to mvn run) - JACOCO_V=$(mvn help:evaluate -Dexpression=jacoco.version | grep -E '^[0-9.]+') - export _JAVA_OPTIONS="$_JAVA_OPTIONS -javaagent:$HOME/.m2/repository/org/jacoco/org.jacoco.agent/$JACOCO_V/org.jacoco.agent-$JACOCO_V-runtime.jar=destfile=$PWD/target/jacoco.exec,output=file,append=true,dumponexit=true" @@ -40,8 +40,6 @@ script: - git clone https://github.com/iotaledger/iri-regression-tests.git - cd iri-regression-tests - git checkout -f master - - curl -LO https://s3.eu-central-1.amazonaws.com/iotaledger-dbfiles/dev/testnet_files.tgz - - tar -xzf testnet_files.tgz - mkdir iri - cp -rf ../target iri/target - bash run_all_stable_tests.sh $VERSION diff --git a/DOCKER.md b/DOCKER.md index 7766a06900..be0b0b8198 100644 --- a/DOCKER.md +++ b/DOCKER.md @@ -2,13 +2,13 @@ Run the official iotaledger/iri container, passing the mandatory -p option: -```docker run iotaledger/iri:v1.7.0-RELEASE -p 14265``` +```docker run iotaledger/iri:vX.X.X-RELEASE -p 14265``` This will get your a running IRI with its API listening on port 14265, no neighbours and an empty database. The IRI Docker container by default expects data at /iri/data. Use the `-v` option of the `docker run` command to mount volumes so to have persistent data. You can also pass more command line options to the docker run command and those will be passed to IRI. If you want to use a iri.ini file with the docker container, supposing it's stored under /path/to/conf/iri.ini on your docker host, then pass `-v /path/to/conf:/iri/conf` and add -c /iri/conf/iri.ini as docker run arguments. So for example the `docker run` command above would become: -```docker run -v /path/to/conf:/iri/conf -v /path/to/data:/iri/data iotaledger/iri:v1.7.0-RELEASE -p 14265 -c /iri/conf/iri.ini``` +```docker run -v /path/to/conf:/iri/conf -v /path/to/data:/iri/data iotaledger/iri:vX.X.X-RELEASE -p 14265 -c /iri/conf/iri.ini``` Please refer to the IRI documentation for further command line options and iri.ini options. @@ -61,7 +61,7 @@ ExecStart=/usr/bin/docker run \ -p 14265:14265 \ -p 15600:15600 \ -p 14600:14600/udp \ -iotaledger/iri:v1.7.0-RELEASE \ +iotaledger/iri:vX.X.X-RELEASE \ -p 14265 \ --zmq-enabled \ --testnet diff --git a/Dockerfile b/Dockerfile index dc2f3b468a..1c0bb696b6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM iotacafe/maven:3.5.4.oracle8u181.1.webupd8.1.1-1 as local_stage_build +FROM iotacafe/maven:3.5.4.oracle8u181.1.webupd8.1.1-1@sha256:5e30eb28d778a65af2498bf1b7ef594adf046d44a8e4f7b32b326d8d10626e93 as local_stage_build MAINTAINER giorgio@iota.org WORKDIR /iri @@ -7,7 +7,7 @@ COPY . /iri RUN mvn clean package # execution image -FROM iotacafe/java:oracle8u181.1.webupd8.1-1 +FROM iotacafe/java:oracle8u181.1.webupd8.1-1@sha256:21b0fb1e5b5be7cd239a742238f346e076a46dc0003670cd50f079780288773f RUN apt-get update && apt-get install -y --no-install-recommends \ jq curl socat \ diff --git a/README.md b/README.md index 66057653b9..1f1af79861 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The IRI repository is the main IOTA Reference Implementation and the embodiment This is a full-featured [[IOTA]](https://iota.org/) node with a convenient JSON-REST HTTP interface. It allows users to become part of the [[IOTA]](https://iota.org) network as both a transaction relay -and network information provider through the easy-to-use [[API]](https://iota.readme.io/reference). +and network information provider through the easy-to-use [[API]](https://docs.iota.org/docs/iri/0.1/references/api-reference). It is specially designed for users seeking a fast, efficient and fully-compatible network setup. @@ -27,8 +27,8 @@ The IOTA network is an independent peer-to-peer network with a first-user, frien - As a 'friend-to-friend' network, you have the privilege of joining new users into the network through your node by adding them to your approved neighbors list — ensuring that you both broadcast to them and also receive their broadcasts. -You can **find neighbors** quickly at both our [[Discord Community]](https://discord.gg/7Gu2mG5) and [[forum.iota.org]](https://forum.iota.org/). - +You can **find neighbors** on the #nodesharing channel of our [[Discord server]](https://discord.gg/7Gu2mG5). + Everyone will be welcoming and very happy to help you get connected. If you want to get tokens for your testcase, please just ask in one of the communication channels. @@ -39,7 +39,13 @@ please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. # Documentation -This page contains basic instructions for setting up an IRI node. You can find the full documentation on our [documentation website](https://docs.iota.org/iri). Also see the [IRI API refernece](https://iota.readme.io/reference). +This page contains basic instructions for setting up an IRI node. You can find the full documentation on: +- Our [documentation website](https://docs.iota.org/docs/iri/0.1/introduction/overview) +- [IRI API refernece](https://docs.iota.org/docs/iri/0.1/references/api-reference) + +You can also use one of these great community guides: +- [IOTA Partners guide](https://iota.partners/) +- [IRI Playbook](https://iri-playbook.readthedocs.io/en/master/index.html) # Installing @@ -61,7 +67,7 @@ $ mvn package This will create a `target` directory in which you will find the executable jar file that you can use. -### How to run IRI +### How to run IRI #### Locally diff --git a/changelog.txt b/changelog.txt index 85d96bc9fa..6f02150a1c 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,33 @@ +1.7.1 + +- Feature: Add getAllAddresses to the spentAdddresses provider. This is to enable exporting with IXI (#1495) +- Fix: update solid flag from past-present (#1492) +- Fix: Remove git-commit plugin (#1483) +- Fix: Prevents IndexOutOfBoundsException within the MilestoneServiceImpl (#1480) +- Change: Disable Tip Solidifier +- Change: Pruning is now off by default (#1471) +- Feature: adds a tip-selection timeout mechanism (#1404) +- Fix: fetch correct arrival time while traversing dags for orphaned transactions. (#1407) +- Fix: recent seen transactions digest (#1453) +- Documentation: Changed
usage to

(#1457) +- Feature: Doclet annotations (#1155) +- Fix: fixes spent states of addresses not getting persisted on tx pruning (#1437) +- Change: Made API class easier to work with +- Fix: add boolean values to configurations for regression tests +- Fix: Boolean flag consistent usage (#1295) +- Feature: Do not request one transaction at a time (#1311) +- Fix: log inconsistent addresses in debug mode (#1347) +- Feature: Use xxHash instead of SHA-256 on incoming transaction hashes cache (#1326) +- Feature: Added iotacafe/maven and iotacafe/java container full digest with sha256 in Dockerfile's FROM directives. (#1259) +- Feature: Introducing a Cuckoo Filter for local snapshots (#1100) +- Change: Prepare IRI to have its version injected by CI (#1359) +- Change: print out line number in log info. (#1362) +- Feature: Add configuration parameter for remote trusted api hosts (#1203) +- Feature: upgrade RocksDB to version 5.17.2 (#1206) +- Fix: unknown IXI command response (#1327) +- Documentation: readme links fix (#1355) +- Fix: ZMQ publishes to TCP and IPC instead of just one of them + 1.7.0 - refactor milestone signature parameters to be configurable (#1322) - migration from legacy coordinator to compass diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 0fbf491c42..1f56059e0f 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -11,5 +11,5 @@ exec java \ -Xmx$JAVA_MAX_MEMORY \ -Djava.net.preferIPv4Stack=true \ -jar $DOCKER_IRI_JAR_PATH \ - --remote --remote-limit-api "$DOCKER_IRI_REMOTE_LIMIT_API" \ + --remote true --remote-limit-api "$DOCKER_IRI_REMOTE_LIMIT_API" \ "$@" diff --git a/pom.xml b/pom.xml index 7edc71d4bc..41bd2ac3bd 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ com.iota iri - 1.7.0-RELEASE + 1.7.1-RELEASE IRI IOTA Reference Implementation @@ -18,7 +18,7 @@ 1.8 UTF-8 - 3.0.1 + 3.1.0 3.0.0 1.4.26.Final 0.7.9 @@ -26,9 +26,17 @@ This variable will be load by checkstyle plugin automatically. It is more global than setting the configLocation attribute on every maven goal. --> checkstyle.xml + + com.github.iotaledger + java-md-doclet + master-SNAPSHOT + + jitpack.io + https://jitpack.io + sonatype-oss-public https://oss.sonatype.org/content/groups/public/ @@ -89,7 +97,7 @@ org.rocksdb rocksdbjni - 5.7.3 + 5.17.2 @@ -103,7 +111,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.6 + 2.9.8 @@ -124,6 +132,19 @@ undertow-websockets-jsr ${undertow.version} + + + + org.jboss.resteasy + resteasy-undertow + 3.6.3.Final + + + + org.jboss.resteasy + resteasy-jackson-provider + 3.6.3.Final + @@ -195,12 +216,31 @@ 1.21 test + + + ${md-doclet-group} + ${md-doclet-artifact} + ${md-doclet-version} + pl.touk throwing-function 1.3 + + + + com.google.guava + guava + 23.0 + + + + net.openhft + zero-allocation-hashing + 0.8 + @@ -218,6 +258,19 @@ false + + + org.apache.maven.plugins + maven-jar-plugin + 3.1.1 + + + + true + + + + @@ -339,7 +392,7 @@ commons-io:commons-io:2.5:jar:null:compile:2852e6e05fbb95076fc091f6d1780f1f8fe35e0f - org.rocksdb:rocksdbjni:5.7.3:jar:null:compile:421b44ad957a2b6cce5adedc204db551831b553d + org.rocksdb:rocksdbjni:5.17.2:jar:null:compile:bca52276cabe91a3b97cc18e50fa2eabc2986f58 com.google.code.gson:gson:2.8.1:jar:null:compile:02a8e0aa38a2e21cb39e2f5a7d6704cbdc941da0 @@ -380,6 +433,14 @@ pl.touk:throwing-function:1.3:jar:null:compile:32947866b8754295efde73ee7d39ea29a247a2b5 + + + org.jboss.resteasy:resteasy-undertow:3.6.3.Final:jar:null:compile:b5d9d0e709de777b87d72927b405a2e88d39d1fa + + + + org.jboss.resteasy:resteasy-jackson-provider:3.6.3.Final:jar:null:compile:5e999c6a9d18b6f0492ea44765403de164fb4abf + @@ -497,7 +558,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 2.10.4 + ${javadoc-version} private true @@ -636,10 +697,7 @@ @@ -650,21 +708,25 @@ javadoc - - com.iota.mdxdoclet.MDXDoclet - src/main/java - false - mdx - - -version "${project.version}" - - true - - com.iota - mdxdoclet - 0.1 - - + + org.iota.mddoclet.MDDoclet + src/main/java + false + + -version "${project.version}" + -classeslist "API" + -template "iri" + + -repolink "${project.scm.url}blob/master/src/main/java/" + + + true + + ${md-doclet-group} + ${md-doclet-artifact} + ${md-doclet-version} + + javadoc diff --git a/python-regression/ciglue.sh b/python-regression/ciglue.sh index d6be1c3ea5..fd0aea118f 100755 --- a/python-regression/ciglue.sh +++ b/python-regression/ciglue.sh @@ -28,7 +28,7 @@ cd .. pip install -e . for machine_dir in tests/features/machine?; do - python tiab/create_cluster.py -i $IMAGE -t $UUID -n $K8S_NAMESPACE -c $machine_dir/config.yml -o $machine_dir/output.yml -d + python tiab/create_cluster.py -i $IMAGE -t $UUID -n $K8S_NAMESPACE -c $machine_dir/config.yml -o $machine_dir/output.yml -x .. -d if [ $? -ne 0 ]; then ERROR=1 python < 0, "Transaction was not created correctly" - world.responses['attachToTangle'] = {} world.responses['attachToTangle'][node] = transaction - logger.info('Transaction Sent') setattr(static, "TEST_STORE_TRANSACTION", transaction.get('trytes')) + return transaction @step(r'an inconsistent transaction is generated on "([^"]+)"') @@ -55,9 +46,9 @@ def create_inconsistent_transaction(step, node): """ world.config['nodeId'] = node api = api_utils.prepare_api_call(node) - trunk = getattr(static, "NULL_HASH") + trunk = static.NULL_HASH branch = trunk - trytes = getattr(static, "EMPTY_TRANSACTION_TRYTES") + trytes = static.EMPTY_TRANSACTION_TRYTES argument_list = {'trunk_transaction': trunk, 'branch_transaction': branch, 'trytes': [trytes], 'min_weight_magnitude': 14} @@ -66,8 +57,6 @@ def create_inconsistent_transaction(step, node): transaction_trytes = transaction.get('trytes') transaction_hash = Transaction.from_tryte_string(transaction_trytes[0]) - logger.info(transaction_hash.hash) - if 'inconsistentTransactions' not in world.responses: world.responses['inconsistentTransactions'] = {} world.responses['inconsistentTransactions'][node] = transaction_hash.hash @@ -85,7 +74,7 @@ def issue_stitching_transaction(step, node, tag): trunk = side_tangle_transaction branch = gtta_transactions['branchTransaction'] - stitching_address = getattr(static, "STITCHING_ADDRESS") + stitching_address = static.STITCHING_ADDRESS logger.debug('Trunk: ' + str(trunk)) logger.debug('Branch: ' + str(branch)) @@ -106,7 +95,7 @@ def issue_stitching_transaction(step, node, tag): def reference_stitch_transaction(step): node = world.config['nodeId'] stitch = world.responses['previousTransaction'][node] - referencing_address = getattr(static, "REFERENCING_ADDRESS") + referencing_address = static.REFERENCING_ADDRESS api = api_utils.prepare_api_call(node) @@ -129,3 +118,58 @@ def make_transaction(node, arg_list): transaction_results = pool.start_pool(make_transaction, 1, {node: {'api': api, 'responses': world.responses}}) pool.fetch_results(transaction_results[0], 30) + + +@step(r'"(\d+)" transactions are issued on "([^"]+)" with:') +def issue_multiple_transactions(step, num_transactions, node): + transactions_to_store = [] + world.responses['evaluate_and_send'] = {} + world.config['nodeId'] = node + # Placeholder values for seed if present + seed_value = "" + seed_type = "" + + for arg_index, arg in enumerate(step.hashes): + if arg['keys'] == "seed" and arg['type'] == "staticList": + seed_value = arg['values'] + seed_type = arg['type'] + + for iteration in range(int(num_transactions)): + seed = "" + if seed_value != "" and seed_type == "staticList": + seed = getattr(static, seed_value)[iteration] + + api = api_utils.prepare_api_call(node, seed=seed) + + logger.info('Sending Transaction {}'.format(iteration + 1)) + transaction = transactions.evaluate_and_send(api, seed, step.hashes) + transaction_hash = Transaction.from_tryte_string(transaction.get('trytes')[0]).hash + transactions_to_store.append(transaction_hash) + + world.responses['evaluate_and_send'][node] = transactions_to_store + logger.info("Transactions generated and stored") + + +@step(r'a milestone is issued with index (\d+) and references') +def issue_a_milestone_with_reference(step, index): + """ + This method issues a milestone with a given index and reference transaction. The input transaction pointer should + always have the key "transactions", but may be a pointer to either a staticValue list stored in staticValues.py, or + a responseList for "findTransactions". + + :param index: The index of the milestone you are issuing + """ + node = world.config['nodeId'] + address = static.TEST_BLOWBALL_COO + api = api_utils.prepare_api_call(node) + + reference_transaction = transactions.fetch_transaction_from_list(step.hashes, node) + logger.info('Issuing milestone {}'.format(index)) + milestone = milestones.issue_milestone(address, api, index, reference_transaction) + + if 'latestMilestone' not in world.config: + world.config['latestMilestone'] = {} + + milestone_hash = Transaction.from_tryte_string(milestone['trytes'][0]).hash + milestone_hash2 = Transaction.from_tryte_string(milestone['trytes'][1]).hash + world.config['latestMilestone'][node] = [milestone_hash, milestone_hash2] diff --git a/python-regression/util/conversion.py b/python-regression/util/conversion.py new file mode 100644 index 0000000000..d8a8fd2731 --- /dev/null +++ b/python-regression/util/conversion.py @@ -0,0 +1,9 @@ +from iota import TryteString, trits_from_int + + +def int_to_trytestring(int_input, length): + trits = trits_from_int(int(int_input)) + trytes = TryteString.from_trits(trits) + if len(trytes) < length: + trytes += '9' * (length - len(trytes)) + return trytes diff --git a/python-regression/util/milestone_logic/__init__.py b/python-regression/util/milestone_logic/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python-regression/util/milestone_logic/milestones.py b/python-regression/util/milestone_logic/milestones.py new file mode 100644 index 0000000000..1c6d0340bd --- /dev/null +++ b/python-regression/util/milestone_logic/milestones.py @@ -0,0 +1,40 @@ +from iota import ProposedTransaction, ProposedBundle, Tag, Address +from util import conversion as converter +from util.transaction_bundle_logic import bundle_logic + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def issue_milestone(address, api, index, *reference_transaction): + txn1 = ProposedTransaction( + address=Address(address), + value=0 + ) + + txn2 = ProposedTransaction( + address=Address(address), + value=0 + ) + + bundle = ProposedBundle() + bundle.add_transaction(txn1) + bundle.add_transaction(txn2) + + bundle[0]._legacy_tag = Tag(converter.int_to_trytestring(index, 9)) + + bundle_logic.finalize(bundle) + + tips = api.get_transactions_to_approve(depth=3) + trunk = tips['trunkTransaction'] + if reference_transaction: + branch = reference_transaction[0] + else: + branch = tips['branchTransaction'] + + bundle_trytes = bundle.as_tryte_strings() + milestone = api.attach_to_tangle(trunk, branch, bundle_trytes, 9) + api.broadcast_and_store(milestone['trytes']) + + return milestone diff --git a/python-regression/util/neighbor_logic/neighbor_logic.py b/python-regression/util/neighbor_logic/neighbor_logic.py index 157de548cc..09f86e2864 100644 --- a/python-regression/util/neighbor_logic/neighbor_logic.py +++ b/python-regression/util/neighbor_logic/neighbor_logic.py @@ -17,7 +17,8 @@ def check_if_neighbors(api, neighbors, expected_neighbor): """ is_neighbor = False for neighbor in enumerate(neighbors): - if expected_neighbor == neighbors[neighbor]['address']: + logger.info(neighbor) + if expected_neighbor == neighbors[neighbor[0]]['address']: logger.info("Already a neighbor") is_neighbor = True else: diff --git a/python-regression/util/static_vals.py b/python-regression/util/static_vals.py index dbe2c16a3d..f5b20e4ced 100644 --- a/python-regression/util/static_vals.py +++ b/python-regression/util/static_vals.py @@ -4,11 +4,13 @@ TEST_TRANSACTION_HASHES = ["BNKODGPOSCN9ENBCFYXPIJZMSACAFTZIAGSWOCZFG9BYECELVD9JLBDSFIDKNXOQIRPTGNWZDMSYZ9999", "ZPZKTOXRHKRPGNJKOCMHBQWGSMTMSDTVSYHVNZN9MMMPAZHOJYHOESCXGIDLTMXPFWDFKRNHAILRZ9999"] -TEST_NEIGHBORS = [u'udp://178.128.236.6:14600',u'udp://167.99.178.3:14600'] +TEST_NEIGHBORS = [u'udp://178.128.236.6:14600', u'udp://167.99.178.3:14600'] TEST_HASH = "NMPXODIWSCTMWRTQ9AI9VROYNFACWCQDXDSJLNC9HKCECBCGQTBUBKVROXZLQWAZRQUGIJTLTMAMSH999" +CONFIRMATION_REFERENCE_HASH = "ZSPRRUJRXHBSRFGCCPVHNXWJJKXRYZSAU9ZEGWFD9LPTWOJZARRLOEQYYWIKOPSXIBFD9ADNIVAHKG999" NULL_HASH = "999999999999999999999999999999999999999999999999999999999999999999999999999999999" + TEST_TIP_LIST = ["SBKWTQWCFTF9DBZHJKQJKU9LXMZD9BMWJIJLZCCZYJFWIBGYYQBJOWWFWIHDEDTIHUB9PMOWZVCPKV999"] TEST_ADDRESS = "TEST9TRANSACTION9TEST9TRANSACTION9TEST9TRANSACTION9TEST9TRANSACTION9TEST999999999" @@ -16,14 +18,27 @@ TEST_STORE_ADDRESS = "STORE9AND9FIND9999999999999999999999999999999999999999999999999999999999999999999" TEST_CONSISTENCY_ADDRESS = "THIS9TRANSACTION9IS9NOT9CONSISTENT99999999999999999999999999999999999999999999999" TEST_BLOWBALL_COO = "EFPNKGPCBXXXLIBYFGIGYBYTFFPIOQVNNVVWTTIYZO9NFREQGVGDQQHUUQ9CLWAEMXVDFSSMOTGAHVIBH" +TEST_TRANSACTIONS_COO = "BTCAAFIH9CJIVIMWFMIHKFNWRTLJRKSTMRCVRE9CIP9AEDTOULVFRHQZT9QAQBZXXAZGBNMVOOKTKAXTB" +THE_BANK = ["THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9999999999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9TWO999999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9THREE9999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9FOUR99999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9FIVE99999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9SIX999999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9SEVEN9999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9EIGHT9999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9NINE99999999999999999999999999999999999999", + "THIS9TEST9ADDRESS9HAS9ONE9HUNDRED9IOTA9TEN999999999999999999999999999999999999999"] SIDE_TANGLE_ADDRESS = "SIDE9TANGLE9999999999999999999999999999999999999999999999999999999999999999999999" STITCHING_ADDRESS = "STITCHING9TRANSACTIONS99999999999999999999999999999999999999999999999999999999999" REFERENCING_ADDRESS = "REFERENCES9STITCHING9TRANSACTION9999999999999999999999999999999999999999999999999" +ATTACHED_TRANSACTIONS = [] +FOUND_TRANSACTIONS = [] -SIDE_TANGLE_TRANSACTIONS =[ +SIDE_TANGLE_TRANSACTIONS = [ 'MKSVLU9BBBKOIAMPHKCHWNKXESH9RWOOAPSJUZXPDIQIMBLIEXLNARBNPKNCHPEZSYBXWUVEKVJMXX999', 'HADZDR9WDCRFTNRPXZTUUSZYXPRHIGVANQQMHVPGIBDDKEFCWVPAXNZUNEKDAOIOYTVPKOLRNNGVSE999', 'DBXIMZZZNCFQAZDZDCXAGSMIJPO9WPUKUROVOSISVMQMROAYYHASJLAPGPJ9SYMK9RHULLHRFBLUUB999', @@ -82,3 +97,4 @@ LDLFAPDQZCKROIQRDKHZZIX9QQ9RQICWYLH9EUCFZUBKWAAREIXSIPLNQBGXAACBZAKCWLC999999999999999999999999999999999999999999999999\ 99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999\ 9999999999999999999999999999999999999999999999999999999999999999999999999999" + diff --git a/python-regression/util/test_logic/api_test_logic.py b/python-regression/util/test_logic/api_test_logic.py index 258a288f19..f4ab69ebe6 100644 --- a/python-regression/util/test_logic/api_test_logic.py +++ b/python-regression/util/test_logic/api_test_logic.py @@ -1,13 +1,15 @@ from aloe import world from iota import Iota,Address,Tag,TryteString -from util import static_vals +from copy import deepcopy +from . import value_fetch_logic as value_fetch + import logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -def prepare_api_call(node_name): +def prepare_api_call(node_name, **seed): """ Prepares an api target as an entry point for API calls on a specified node. @@ -19,7 +21,7 @@ def prepare_api_call(node_name): host = world.machine['nodes'][node_name]['host'] port = world.machine['nodes'][node_name]['ports']['api'] address = "http://" + host + ":" + str(port) - api = Iota(address) + api = Iota(address, **seed) logger.info('API call prepared for %s', address) return api @@ -32,18 +34,10 @@ def check_responses_for_call(api_call): return False -def fetch_response(api_call): - return world.responses[api_call] - - def place_response(api_call, node, response): world.responses[api_call][node] = response -def fetch_config(key): - return world.config[key] - - def check_neighbors(step, node): api = prepare_api_call(node) response = api.getNeighbors() @@ -53,12 +47,12 @@ def check_neighbors(step, node): for i in response: expected_neighbors = step.hashes if type(response[i]) != int: - for x in range(len(response[i])): + for x in range(len(response[i])): if expected_neighbors[0]['neighbors'] == response[i][x]['address']: contains_neighbor[0] = True if expected_neighbors[1]['neighbors'] == response[i][x]['address']: contains_neighbor[1] = True - + return contains_neighbor @@ -76,41 +70,38 @@ def prepare_options(args, option_list): :param args: The gherkin table arguments from the feature file :param option_list: The list dictionary that the arguments will be placed into """ + for x in range(len(args)): if len(args) != 0: key = args[x]['keys'] value = args[x]['values'] arg_type = args[x]['type'] - if arg_type == "int": - value = int(value) - elif arg_type == "list": - value = [value] - elif arg_type == "nodeAddress": - host = world.machine['nodes'][value]['host'] - port = world.machine['nodes'][value]['ports']['gossip-udp'] - address = "udp://" + host + ":" + str(port) - value = [address.decode()] - elif arg_type == "staticValue": - value = getattr(static_vals, value) - elif arg_type == "staticList": - address = getattr(static_vals, value) - value = [address] - elif arg_type == "bool": - if value == "False": - value = False - else: - value = True - elif arg_type == "responseValue": - config = fetch_config('nodeId') - response = fetch_response(value) - value = response[config] - elif arg_type == "responseList": - config = fetch_config('nodeId') - response = fetch_response(value) - value = [response[config]] - - option_list[key] = value + fetch_list = { + 'int': value_fetch.fetch_int, + 'string': value_fetch.fetch_string, + 'list': value_fetch.fetch_list, + 'nodeAddress': value_fetch.fetch_node_address, + 'staticValue': value_fetch.fetch_static_value, + 'staticList': value_fetch.fetch_static_list, + 'bool': value_fetch.fetch_bool, + 'responseValue': value_fetch.fetch_response_value, + 'responseList': value_fetch.fetch_response_list, + 'responseHashes': value_fetch.fetch_response_value_hashes, + 'configValue': value_fetch.fetch_config_value, + 'configList': value_fetch.fetch_config_list, + # TODO: remove the need for this logic + 'ignore': value_fetch.fetch_string + } + + option = fetch_list[arg_type](value) + + """ + Fills option_list with the fetched value. Excludes seed as an option, as it's only there for value + transactions and is not required as an argument for any api calls. + """ + if key != 'seed': + option_list[key] = option def fetch_call(api_call, api, options): @@ -160,12 +151,12 @@ def assign_nodes(node, node_list): if node == 'all nodes': for current_node in world.machine['nodes']: api = prepare_api_call(current_node) - node_list[current_node] = api + node_list[current_node] = {'api': api} node = next(iter(world.machine['nodes'])) world.config['nodeId'] = node else: api = prepare_api_call(node) - node_list[node] = api + node_list[node] = {'api': api} world.config['nodeId'] = node @@ -198,3 +189,21 @@ def prepare_transaction_arguments(arg_list): arg_list[key] = Tag(arg_list[key]) elif key == 'message': arg_list[key] = TryteString.from_unicode(arg_list[key]) + + + +def duplicate_arguments(arg_list): + """ + Duplicates the step arguments, providing a copy for storage and comparison. + + :param arg_list: The original step arguments you would like to copy. + :return: Copy of the original argument list. + """ + + stored_values = deepcopy(arg_list) + stored_value_list = {} + for index, value in enumerate(stored_values): + stored_value_list[index] = value + + return stored_value_list + diff --git a/python-regression/util/test_logic/value_fetch_logic.py b/python-regression/util/test_logic/value_fetch_logic.py new file mode 100644 index 0000000000..95aa4ee784 --- /dev/null +++ b/python-regression/util/test_logic/value_fetch_logic.py @@ -0,0 +1,145 @@ +from aloe import world +from util import static_vals as static + + +def fetch_config(key): + """ + Retrieves a stored configuration object from the aloe.world global variable. + :param key: The key of the object that will be retrieved + :return: The stored object + """ + return world.config[key] + + +def fetch_response(api_call): + """ + Retrieves a stored response from the aloe.world global variable. + :param api_call: The api call key for the response + :return: The stored response + """ + return world.responses[api_call] + + +def fetch_int(value): + """ + Returns an int representation of the input value. + :param value: The input value + :return: The int representation + """ + return int(value) + + +def fetch_string(value): + """ + Returns a string representation of the input value. + :param value: The input value + :return: The string representation + """ + return str(value) + + +def fetch_list(value): + """ + Returns the input value as a list. + :param value: The input value + :return: The list representation + """ + return [value] + + +def fetch_config_value(value): + """ + Fetches the configuration object referenced by the input value from the stored configuration values. + :param value: The configuration key to be fetched + :return: The referenced configuration object + """ + node = fetch_config('nodeId') + return world.config[value][node] + + +def fetch_config_list(value): + """ + Fetches the configuration object referenced by the input value and returns the object as a list representation. + :param value: The configuration key to be fetched + :return: The referenced configuration object in list format + """ + node = fetch_config('nodeId') + return [world.config[value][node]] + + +def fetch_node_address(value): + """ + Fetches the node address of the given node reference value from the stored machine configuration. + :param value: The name of the node you wish to pull the address for + :return: The address of the referenced node in list format + """ + host = world.machine['nodes'][value]['host'] + port = world.machine['nodes'][value]['ports']['gossip-udp'] + address = "udp://" + host + ":" + str(port) + return [address.decode()] + + +def fetch_static_value(value): + """ + Retrieves the referenced object from the util/static_vals.py file. + :param value: The reference for the object to be retrieved + :return: The stored object + """ + return getattr(static, value) + + +def fetch_static_list(value): + """ + Retrieves the referenced object from the util/static_vals.py file and returns it as a list. + :param value: The reference for the object to be retrieved + :return: The stored object in list format + """ + static_value = getattr(static, value) + return [static_value] + + +def fetch_bool(value): + """ + Returns the bool conversion of the input string. The input value should only ever be "True" or "False". + :param value: The input value + :return: The proper bool conversion of th input string + """ + if value == "False": + return False + else: + return True + + +def fetch_response_value(value): + """ + Retrieves the response object referenced by the input value from the aloe.world variable. + :param value: The api_call reference for the response object + :return: The stored response object + """ + config = fetch_config('nodeId') + response = fetch_response(value) + return response[config] + + +def fetch_response_list(value): + """ + Retrieves the response object referenced by the input value from the aloe.world variable and returns it as a list. + :param value: The api_call reference for the response object + :return: The stored response object in list format + """ + config = fetch_config('nodeId') + response = fetch_response(value) + return [response[config]] + + +def fetch_response_value_hashes(value): + """ + Retrieves the response object referenced by the input value from the aloe.world variable, and returns the 'hashes' + object from within it. + :param value: The api_call reference for the response object ['findTransactions' for this particular call] + :return: The 'hashes' list stored within the response object + """ + config = fetch_config('nodeId') + response = fetch_response(value) + return response[config]['hashes'] + diff --git a/python-regression/util/transaction_bundle_logic/bundle_logic.py b/python-regression/util/transaction_bundle_logic/bundle_logic.py new file mode 100644 index 0000000000..8e3ce43b1a --- /dev/null +++ b/python-regression/util/transaction_bundle_logic/bundle_logic.py @@ -0,0 +1,22 @@ +from iota import BundleHash, Fragment +from iota.crypto import HASH_LENGTH +from iota.crypto.kerl import Kerl + + +def finalize(bundle): + sponge = Kerl() + last_index = len(bundle) - 1 + + for (i, txn) in enumerate(bundle): + txn.current_index = i + txn.last_index = last_index + sponge.absorb(txn.get_signature_validation_trytes().as_trits()) + + bundle_hash_trits = [0] * HASH_LENGTH + sponge.squeeze(bundle_hash_trits) + + bundle_hash = BundleHash.from_trits(bundle_hash_trits) + + for txn in bundle: + txn.bundle_hash = bundle_hash + txn.signature_message_fragment = Fragment(txn.message or b'') diff --git a/python-regression/util/transaction_bundle_logic/transaction_logic.py b/python-regression/util/transaction_bundle_logic/transaction_logic.py index 02ec64d67b..a204bf6846 100644 --- a/python-regression/util/transaction_bundle_logic/transaction_logic.py +++ b/python-regression/util/transaction_bundle_logic/transaction_logic.py @@ -1,4 +1,7 @@ -from iota import ProposedBundle,ProposedTransaction,Address,Tag +from iota import ProposedBundle, ProposedTransaction, Address, Tag +from util import static_vals as static +from util.test_logic import api_test_logic as api_utils +from util.test_logic import value_fetch_logic as value_fetch import logging logging.basicConfig(level=logging.INFO) @@ -25,26 +28,37 @@ def create_transaction_bundle(address, tag, value): return bundle -def create_and_attach_transaction(api, arg_list): +def create_and_attach_transaction(api, value_transaction, arg_list, *reference): """ Create a transaction and attach it to the tangle. :param api: The api target you would like to make the call to + :param value_transaction: A bool to determine if this transaction is a value or zero value transaction :param arg_list: The argument list (dictionary) for the transaction :return sent: The return value for the attachToTangle call (contains the attached transaction trytes) """ transaction = ProposedTransaction(**arg_list) - bundle = ProposedBundle() - bundle.add_transaction(transaction) - bundle.finalize() - trytes = str(bundle[0].as_tryte_string()) + if value_transaction: + inputs = api.get_inputs(start=0, stop=10, threshold=0) + prepared_transaction = api.prepare_transfer( + transfers=[transaction], + inputs=[inputs['inputs'][0]], + change_address=Address(static.TEST_EMPTY_ADDRESS) + ) + else: + prepared_transaction = api.prepare_transfer( + transfers=[transaction] + ) gtta = api.get_transactions_to_approve(depth=3) - branch = str(gtta['branchTransaction']) trunk = str(gtta['trunkTransaction']) + if reference: + branch = reference[0] + else: + branch = str(gtta['branchTransaction']) - sent = api.attach_to_tangle(trunk, branch, [trytes], 9) + sent = api.attach_to_tangle(trunk, branch, prepared_transaction['trytes'], 9) return sent @@ -61,3 +75,68 @@ def attach_store_and_broadcast(api, args_list): api.broadcast_transactions(transaction.get('trytes')) logger.info('Done attaching, storing and broadcasting') return transaction + + +def check_for_seed(arg_list): + """ + Checks the given argument list for a seed, if none is provided, returns an empty string + + :param arg_list: The argument list for the step that will be searched + :return: The seed if provided, empty string if not + """ + seed = "" + for arg in arg_list: + if arg['keys'] == 'seed' and arg['type'] == 'staticList': + seed = arg['values'] + + return seed + + +def fetch_transaction_from_list(args, node): + """ + Fetches a reference transaction from either a static value list in the ../staticValues.py file, or from the response + for a previous "findTransactions" call. + + :param args: The step argument list + :param node: The current working node + :return: The transaction to be used as a reference + """ + + options = {} + api_utils.prepare_options(args, options) + + if args[0]['type'] == 'responseValue': + transaction_list = value_fetch.fetch_response(args[0]['values']) + reference_transaction = transaction_list[node][len(transaction_list) - 1] + elif args[0]['type'] == 'staticValue': + transaction_list = options['transactions'] + reference_transaction = transaction_list[len(transaction_list) - 1] + + assert reference_transaction, "No reference transaction found (Possibly incorrect argument type, check gherkin file" + + return reference_transaction + + +def evaluate_and_send(api, seed, arg_list): + """ + Prepares a transaction for sending. If the provided seed isn't empty, it changes the bool value to be passed on to + to the create_and_attach_transaction() function to instruct it to look for an available balance. + + :param api: The api target you would like to make the call to + :param seed: The seed associated with the given api (This can be an empty string if none is used) + :param arg_list: The argument list (dictionary) for the transaction + :return: The transaction object created in the create_and_attach_transaction() function + """ + is_value_transaction = False + + if seed != "": + is_value_transaction = True + + options = {} + api_utils.prepare_options(arg_list, options) + api_utils.prepare_transaction_arguments(options) + + transaction = create_and_attach_transaction(api, is_value_transaction, options) + api.broadcast_and_store(transaction.get('trytes')) + + return transaction diff --git a/src/main/java/com/iota/iri/IRI.java b/src/main/java/com/iota/iri/IRI.java index 327d822044..674898a132 100644 --- a/src/main/java/com/iota/iri/IRI.java +++ b/src/main/java/com/iota/iri/IRI.java @@ -5,7 +5,8 @@ import com.iota.iri.conf.ConfigFactory; import com.iota.iri.conf.IotaConfig; import com.iota.iri.service.API; - +import com.iota.iri.utils.IotaUtils; +import com.iota.iri.service.restserver.resteasy.RestEasy; import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -14,6 +15,7 @@ import com.beust.jcommander.ParameterException; import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.BooleanUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,7 +44,6 @@ public class IRI { public static final String MAINNET_NAME = "IRI"; public static final String TESTNET_NAME = "IRI Testnet"; - public static final String VERSION = "1.7.0-RELEASE"; /** * The entry point of IRI. @@ -114,18 +115,23 @@ private static class IRILauncher { */ public static void main(String [] args) throws Exception { IotaConfig config = createConfiguration(args); - log.info("Welcome to {} {}", config.isTestnet() ? TESTNET_NAME : MAINNET_NAME, VERSION); + String version = IotaUtils.getIriVersion(); + log.info("Welcome to {} {}", config.isTestnet() ? TESTNET_NAME : MAINNET_NAME, version); iota = new Iota(config); ixi = new IXI(iota); - api = new API(iota, ixi); + api = new API(iota.configuration, ixi, iota.transactionRequester, + iota.spentAddressesService, iota.tangle, iota.bundleValidator, + iota.snapshotProvider, iota.ledgerService, iota.node, iota.tipsSelector, + iota.tipsViewModel, iota.transactionValidator, + iota.latestMilestoneTracker); shutdownHook(); try { iota.init(); - api.init(); //TODO redundant parameter but we will touch this when we refactor IXI ixi.init(config.getIxiDir()); + api.init(new RestEasy(iota.configuration)); log.info("IOTA Node initialised correctly."); } catch (Exception e) { log.error("Exception during IOTA node initialisation: ", e); @@ -154,7 +160,7 @@ private static IotaConfig createConfiguration(String[] args) { IotaConfig iotaConfig = null; String message = "Configuration is created using "; try { - boolean testnet = ArrayUtils.contains(args, Config.TESTNET_FLAG); + boolean testnet = isTestnet(args); File configFile = chooseConfigFile(args); if (configFile != null) { iotaConfig = ConfigFactory.createFromFile(configFile, testnet); @@ -184,6 +190,23 @@ private static IotaConfig createConfiguration(String[] args) { return iotaConfig; } + /** + * We are connected to testnet when {@link Config#TESTNET_FLAG} is passed in program startup, + * following with true + * + * @param args the list of program startup arguments + * @return true if this is testnet, otherwise false + */ + private static boolean isTestnet(String[] args) { + int index = ArrayUtils.indexOf(args, Config.TESTNET_FLAG); + if (index != -1 && args.length > index+1) { + Boolean bool = BooleanUtils.toBooleanObject(args[index+1]); + return bool == null ? false : bool; + } + + return false; + } + /** * Parses the command line arguments for a config file that can be provided by parameter -c * or parameter --config. If no filename was provided we fall back to iota.ini file. diff --git a/src/main/java/com/iota/iri/IXI.java b/src/main/java/com/iota/iri/IXI.java index 94b7e9ff79..05d4f3c79a 100644 --- a/src/main/java/com/iota/iri/IXI.java +++ b/src/main/java/com/iota/iri/IXI.java @@ -196,7 +196,7 @@ public AbstractResponse processCommand(final String command, Map if (matcher.find()) { Map> ixiMap = ixiAPI.get(matcher.group(1)); - if (ixiMap != null) { + if (ixiMap != null && ixiMap.containsKey(matcher.group(2))) { return ixiMap.get(matcher.group(2)).call(request); } } diff --git a/src/main/java/com/iota/iri/Iota.java b/src/main/java/com/iota/iri/Iota.java index dd24a03c0a..d9a14be2c8 100644 --- a/src/main/java/com/iota/iri/Iota.java +++ b/src/main/java/com/iota/iri/Iota.java @@ -26,11 +26,11 @@ import com.iota.iri.storage.*; import com.iota.iri.storage.rocksDB.RocksDBPersistenceProvider; import com.iota.iri.utils.Pair; -import com.iota.iri.zmq.MessageQ; import java.security.SecureRandom; import java.util.List; +import com.iota.iri.zmq.ZmqMessageQueueProvider; import org.apache.commons.lang3.NotImplementedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,8 +56,9 @@ *

* Validation of a transaction is the process by which other devices choose the transaction. * This is done via a {@link TipSelector} algorithm, after which the transaction performs - * the necessary proof-of-work in order to cast their vote of confirmation/approval upon those tips.
- * + * the necessary proof-of-work in order to cast their vote of confirmation/approval upon those tips. + *

+ *

* As many other transactions repeat this process on top of each other, * validation of the transaction in question slowly builds up enough verifications. * Eventually this will reach a minimum acceptable verification threshold. @@ -106,7 +107,6 @@ public class Iota { public final Replicator replicator; public final IotaConfig configuration; public final TipsViewModel tipsViewModel; - public final MessageQ messageQ; public final TipSelector tipsSelector; /** @@ -141,12 +141,11 @@ public Iota(IotaConfig configuration) throws TransactionPruningException, Snapsh // legacy code bundleValidator = new BundleValidator(); tangle = new Tangle(); - messageQ = MessageQ.createWith(configuration); tipsViewModel = new TipsViewModel(); - transactionRequester = new TransactionRequester(tangle, snapshotProvider, messageQ); + transactionRequester = new TransactionRequester(tangle, snapshotProvider); transactionValidator = new TransactionValidator(tangle, snapshotProvider, tipsViewModel, transactionRequester); node = new Node(tangle, snapshotProvider, transactionValidator, transactionRequester, tipsViewModel, - latestMilestoneTracker, messageQ, configuration); + latestMilestoneTracker, configuration); replicator = new Replicator(node, configuration); udpReceiver = new UDPReceiver(node, configuration); tipsSolidifier = new TipsSolidifier(tangle, transactionValidator, tipsViewModel, configuration); @@ -156,8 +155,10 @@ public Iota(IotaConfig configuration) throws TransactionPruningException, Snapsh } /** + *

* Adds all database providers, and starts initialization of our services. - * According to the {@link IotaConfig}, data is optionally cleared, reprocessed and reverified.
+ * According to the {@link IotaConfig}, data is optionally cleared, reprocessed and reverified. + *

* After this function, incoming and outbound transaction processing has started. * * @throws Exception If along the way a service fails to initialize. @@ -208,17 +209,17 @@ private void injectDependencies() throws SnapshotException, TransactionPruningEx if (localSnapshotManager != null) { localSnapshotManager.init(snapshotProvider, snapshotService, transactionPruner, configuration); } - milestoneService.init(tangle, snapshotProvider, snapshotService, bundleValidator, messageQ, configuration); + milestoneService.init(tangle, snapshotProvider, snapshotService, bundleValidator, configuration); latestMilestoneTracker.init(tangle, snapshotProvider, milestoneService, milestoneSolidifier, - messageQ, configuration); + configuration); latestSolidMilestoneTracker.init(tangle, snapshotProvider, milestoneService, ledgerService, - latestMilestoneTracker, messageQ); + latestMilestoneTracker); seenMilestonesRetriever.init(tangle, snapshotProvider, transactionRequester); milestoneSolidifier.init(snapshotProvider, transactionValidator); ledgerService.init(tangle, snapshotProvider, snapshotService, milestoneService, spentAddressesService, bundleValidator); if (transactionPruner != null) { - transactionPruner.init(tangle, snapshotProvider, spentAddressesService, tipsViewModel, configuration); + transactionPruner.init(tangle, snapshotProvider, spentAddressesService, spentAddressesProvider, tipsViewModel, configuration); } transactionRequesterWorker.init(tangle, transactionRequester, tipsViewModel, node); } @@ -273,7 +274,6 @@ public void shutdown() throws Exception { replicator.shutdown(); transactionValidator.shutdown(); tangle.shutdown(); - messageQ.shutdown(); // free the resources of the snapshot provider last because all other instances need it snapshotProvider.shutdown(); @@ -296,7 +296,7 @@ private void initializeTangle() { } } if (configuration.isZmqEnabled()) { - tangle.addPersistenceProvider(new ZmqPublishProvider(messageQ)); + tangle.addMessageQueueProvider(new ZmqMessageQueueProvider(configuration)); } } @@ -305,7 +305,7 @@ private TipSelector createTipSelector(TipSelConfig config) { latestMilestoneTracker); RatingCalculator ratingCalculator = new CumulativeWeightCalculator(tangle, snapshotProvider); TailFinder tailFinder = new TailFinderImpl(tangle); - Walker walker = new WalkerAlpha(tailFinder, tangle, messageQ, new SecureRandom(), config); + Walker walker = new WalkerAlpha(tailFinder, tangle, new SecureRandom(), config); return new TipSelectorImpl(tangle, snapshotProvider, ledgerService, entryPointSelector, ratingCalculator, walker, config); } diff --git a/src/main/java/com/iota/iri/TransactionValidator.java b/src/main/java/com/iota/iri/TransactionValidator.java index dfe8409f4c..6f413e17ea 100644 --- a/src/main/java/com/iota/iri/TransactionValidator.java +++ b/src/main/java/com/iota/iri/TransactionValidator.java @@ -1,5 +1,6 @@ package com.iota.iri; +import com.google.common.annotations.VisibleForTesting; import com.iota.iri.controllers.TipsViewModel; import com.iota.iri.controllers.TransactionViewModel; import com.iota.iri.crypto.Curl; @@ -88,7 +89,7 @@ public void init(boolean testnet, int mwm) { newSolidThread.start(); } - //Package Private For Testing + @VisibleForTesting void setMwm(boolean testnet, int mwm) { minWeightMagnitude = mwm; @@ -246,7 +247,7 @@ public boolean checkSolidity(Hash hash, boolean milestone, int maxProcessedTrans if(fromHash(tangle, hash).isSolid()) { return true; } - Set analyzedHashes = new HashSet<>(snapshotProvider.getInitialSnapshot().getSolidEntryPoints().keySet()); + LinkedHashSet analyzedHashes = new LinkedHashSet<>(snapshotProvider.getInitialSnapshot().getSolidEntryPoints().keySet()); if(maxProcessedTransactions != Integer.MAX_VALUE) { maxProcessedTransactions += analyzedHashes.size(); } @@ -254,24 +255,26 @@ public boolean checkSolidity(Hash hash, boolean milestone, int maxProcessedTrans final Queue nonAnalyzedTransactions = new LinkedList<>(Collections.singleton(hash)); Hash hashPointer; while ((hashPointer = nonAnalyzedTransactions.poll()) != null) { - if (analyzedHashes.add(hashPointer)) { - if(analyzedHashes.size() >= maxProcessedTransactions) { - return false; - } + if (!analyzedHashes.add(hashPointer)) { + continue; + } + + if (analyzedHashes.size() >= maxProcessedTransactions) { + return false; + } + + TransactionViewModel transaction = fromHash(tangle, hashPointer); + if (!transaction.isSolid() && !snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(hashPointer)) { + if (transaction.getType() == PREFILLED_SLOT) { + solid = false; - final TransactionViewModel transaction = fromHash(tangle, hashPointer); - if(!transaction.isSolid() && !snapshotProvider.getInitialSnapshot().hasSolidEntryPoint(hashPointer)) { - if (transaction.getType() == PREFILLED_SLOT) { - solid = false; - - if (!transactionRequester.isTransactionRequested(hashPointer, milestone)) { - transactionRequester.requestTransaction(hashPointer, milestone); - break; - } - } else { - nonAnalyzedTransactions.offer(transaction.getTrunkTransactionHash()); - nonAnalyzedTransactions.offer(transaction.getBranchTransactionHash()); + if (!transactionRequester.isTransactionRequested(hashPointer, milestone)) { + transactionRequester.requestTransaction(hashPointer, milestone); + continue; } + } else { + nonAnalyzedTransactions.offer(transaction.getTrunkTransactionHash()); + nonAnalyzedTransactions.offer(transaction.getBranchTransactionHash()); } } } @@ -315,7 +318,7 @@ private Runnable spawnSolidTransactionsPropagation() { * its children (approvers) and try to quickly solidify them with {@link #quietQuickSetSolid}. * If we manage to solidify the transactions, we add them to the solidification queue for a traversal by a later run. */ - //Package private for testing + @VisibleForTesting void propagateSolidTransactions() { Set newSolidHashes = new HashSet<>(); useFirst.set(!useFirst.get()); @@ -448,7 +451,7 @@ private boolean checkApproovee(TransactionViewModel approovee) throws Exception return approovee.isSolid(); } - //Package Private For Testing + @VisibleForTesting boolean isNewSolidTxSetsEmpty () { return newSolidTransactionsOne.isEmpty() && newSolidTransactionsTwo.isEmpty(); } diff --git a/src/main/java/com/iota/iri/conf/APIConfig.java b/src/main/java/com/iota/iri/conf/APIConfig.java index 58fc960b6e..7a7844bca2 100644 --- a/src/main/java/com/iota/iri/conf/APIConfig.java +++ b/src/main/java/com/iota/iri/conf/APIConfig.java @@ -1,5 +1,6 @@ package com.iota.iri.conf; +import java.net.InetAddress; import java.util.List; /** @@ -24,6 +25,11 @@ public interface APIConfig extends Config { */ List getRemoteLimitApi(); + /** + * @return {@value Descriptions#REMOTE_TRUSTED_API_HOSTS} + */ + List getRemoteTrustedApiHosts(); + /** * @return {@value Descriptions#MAX_FIND_TRANSACTIONS} */ @@ -49,11 +55,15 @@ public interface APIConfig extends Config { */ String getRemoteAuth(); + /** + * These descriptions are used by JCommander when you enter java iri.jar --help at the command line. + */ interface Descriptions { String PORT = "The port that will be used by the API."; String API_HOST = "The host on which the API will listen to. Set to 0.0.0.0 to accept any host."; String REMOTE_LIMIT_API = "Commands that should be ignored by API."; - String REMOTE_AUTH = "A string in the form of :. Used to access the API"; + String REMOTE_TRUSTED_API_HOSTS = "Open the API interface to defined hosts. You can specify multiple hosts in a comma separated list \"--remote-trusted-api-hosts 192.168.0.55,10.0.0.10\". You must also provide the \"--remote\" parameter. Warning: \"--remote-limit-api\" will have no effect for these hosts."; + String REMOTE_AUTH = "A string in the form of :. Used to access the API. You can provide a clear text or an hashed password."; String MAX_FIND_TRANSACTIONS = "The maximal number of transactions that may be returned by the \"findTransactions\" API call. If the number of transactions found exceeds this number an error will be returned."; String MAX_REQUESTS_LIST = "The maximal number of parameters one can place in an API call. If the number parameters exceeds this number an error will be returned"; String MAX_GET_TRYTES = "The maximal number of trytes that may be returned by the \"getTrytes\" API call. If the number of transactions found exceeds this number an error will be returned."; diff --git a/src/main/java/com/iota/iri/conf/BaseIotaConfig.java b/src/main/java/com/iota/iri/conf/BaseIotaConfig.java index 6e859ab590..104f4dcd2b 100644 --- a/src/main/java/com/iota/iri/conf/BaseIotaConfig.java +++ b/src/main/java/com/iota/iri/conf/BaseIotaConfig.java @@ -12,29 +12,36 @@ import com.iota.iri.utils.IotaUtils; import org.apache.commons.lang3.ArrayUtils; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; -/* +/** Note: the fields in this class are being deserialized from Jackson so they must follow Java Bean convention. Meaning that every field must have a getter that is prefixed with `get` unless it is a boolean and then it should be prefixed with `is`. */ public abstract class BaseIotaConfig implements IotaConfig { - protected static final String SPLIT_STRING_TO_LIST_REGEX = ",| "; + public static final String SPLIT_STRING_TO_LIST_REGEX = ",| "; private boolean help; - + private boolean testnet = false; + //API protected int port = Defaults.API_PORT; protected String apiHost = Defaults.API_HOST; protected List remoteLimitApi = Defaults.REMOTE_LIMIT_API; + protected List remoteTrustedApiHosts = Defaults.REMOTE_LIMIT_API_HOSTS; protected int maxFindTransactions = Defaults.MAX_FIND_TRANSACTIONS; protected int maxRequestsList = Defaults.MAX_REQUESTS_LIST; protected int maxGetTrytes = Defaults.MAX_GET_TRYTES; protected int maxBodyLength = Defaults.MAX_BODY_LENGTH; protected String remoteAuth = Defaults.REMOTE_AUTH; + //We don't have a REMOTE config but we have a remote flag. We must add a field for JCommander private boolean remote; @@ -69,22 +76,29 @@ public abstract class BaseIotaConfig implements IotaConfig { protected double pPropagateRequest = Defaults.P_PROPAGATE_REQUEST; //ZMQ - protected boolean zmqEnabled = Defaults.ZMQ_ENABLED; + protected boolean zmqEnableTcp = Defaults.ZMQ_ENABLE_TCP; + protected boolean zmqEnableIpc = Defaults.ZMQ_ENABLE_IPC; protected int zmqPort = Defaults.ZMQ_PORT; protected int zmqThreads = Defaults.ZMQ_THREADS; protected String zmqIpc = Defaults.ZMQ_IPC; protected int qSizeNode = Defaults.QUEUE_SIZE; protected int cacheSizeBytes = Defaults.CACHE_SIZE_BYTES; - + /** + * @deprecated This field was replaced by {@link #zmqEnableTcp} and {@link #zmqEnableIpc}. It is only needed + * for backward compatibility to --zmq-enabled parameter with JCommander. + */ + @Deprecated + private boolean zmqEnabled; //Tip Selection protected int maxDepth = Defaults.MAX_DEPTH; protected double alpha = Defaults.ALPHA; + protected int tipSelectionTimeoutSec = Defaults.TIP_SELECTION_TIMEOUT_SEC; private int maxAnalyzedTransactions = Defaults.MAX_ANALYZED_TXS; - + //Tip Solidification protected boolean tipSolidifierEnabled = Defaults.TIP_SOLIDIFIER_ENABLED; - + //PearlDiver protected int powThreads = Defaults.POW_THREADS; @@ -108,11 +122,11 @@ public JCommander parseConfigFromArgs(String[] args) throws ParameterException { //One can invoke help via INI file (feature/bug) so we always create JCommander even if args is empty JCommander jCommander = JCommander.newBuilder() .addObject(this) - //This is in order to enable the `--conf` and `--testnet` option + //This is in order to enable the `--conf` option .acceptUnknownOptions(true) .allowParameterOverwriting(true) //This is the first line of JCommander Usage - .programName("java -jar iri-" + IRI.VERSION + ".jar") + .programName("java -jar iri-" + IotaUtils.getIriVersion() + ".jar") .build(); if (ArrayUtils.isNotEmpty(args)) { jCommander.parse(args); @@ -124,6 +138,17 @@ public JCommander parseConfigFromArgs(String[] args) throws ParameterException { public boolean isHelp() { return help; } + + @Override + public boolean isTestnet() { + return testnet; + } + + @JsonIgnore + @Parameter(names = {Config.TESTNET_FLAG}, description = Config.Descriptions.TESTNET, arity = 1) + protected void setTestnet(boolean testnet) { + this.testnet = testnet; + } @JsonProperty @Parameter(names = {"--help", "-h"} , help = true, hidden = true) @@ -144,6 +169,10 @@ public void setPort(int port) { @Override public String getApiHost() { + if (remote) { + return "0.0.0.0"; + } + return apiHost; } @@ -154,9 +183,9 @@ protected void setApiHost(String apiHost) { } @JsonIgnore - @Parameter(names = {"--remote"}, description = APIConfig.Descriptions.REMOTE) + @Parameter(names = {"--remote"}, description = APIConfig.Descriptions.REMOTE, arity = 1) protected void setRemote(boolean remote) { - this.apiHost = "0.0.0.0"; + this.remote = remote; } @Override @@ -170,6 +199,30 @@ protected void setRemoteLimitApi(String remoteLimitApi) { this.remoteLimitApi = IotaUtils.splitStringToImmutableList(remoteLimitApi, SPLIT_STRING_TO_LIST_REGEX); } + @Override + public List getRemoteTrustedApiHosts() { + return remoteTrustedApiHosts; + } + + @JsonProperty + @Parameter(names = {"--remote-trusted-api-hosts"}, description = APIConfig.Descriptions.REMOTE_TRUSTED_API_HOSTS) + public void setRemoteTrustedApiHosts(String remoteTrustedApiHosts) { + List addresses = IotaUtils.splitStringToImmutableList(remoteTrustedApiHosts, SPLIT_STRING_TO_LIST_REGEX); + List inetAddresses = addresses.stream().map(host -> { + try { + return InetAddress.getByName(host.trim()); + } catch (UnknownHostException e) { + throw new ParameterException("Invalid value for --remote-trusted-api-hosts address: ", e); + } + }).collect(Collectors.toList()); + + // always make sure that localhost exists as trusted host + if(!inetAddresses.contains(Defaults.REMOTE_LIMIT_API_DEFAULT_HOST)) { + inetAddresses.add(Defaults.REMOTE_LIMIT_API_DEFAULT_HOST); + } + this.remoteTrustedApiHosts = Collections.unmodifiableList(inetAddresses); + } + @Override public int getMaxFindTransactions() { return maxFindTransactions; @@ -374,7 +427,7 @@ public boolean isRevalidate() { } @JsonProperty - @Parameter(names = {"--revalidate"}, description = DbConfig.Descriptions.REVALIDATE) + @Parameter(names = {"--revalidate"}, description = DbConfig.Descriptions.REVALIDATE, arity = 1) protected void setRevalidate(boolean revalidate) { this.revalidate = revalidate; } @@ -385,7 +438,7 @@ public boolean isRescanDb() { } @JsonProperty - @Parameter(names = {"--rescan"}, description = DbConfig.Descriptions.RESCAN_DB) + @Parameter(names = {"--rescan"}, description = DbConfig.Descriptions.RESCAN_DB, arity = 1) protected void setRescanDb(boolean rescanDb) { this.rescanDb = rescanDb; } @@ -494,8 +547,8 @@ public int getLocalSnapshotsPruningDelay() { SnapshotConfig.Descriptions.LOCAL_SNAPSHOTS_PRUNING_DELAY) protected void setLocalSnapshotsPruningDelay(int localSnapshotsPruningDelay) { if (localSnapshotsPruningDelay < Defaults.LOCAL_SNAPSHOTS_PRUNING_DELAY_MIN) { - throw new ParameterException("LOCAL_SNAPSHOTS_PRUNING_DELAY should be at least " - + Defaults.LOCAL_SNAPSHOTS_PRUNING_DELAY_MIN + throw new ParameterException("LOCAL_SNAPSHOTS_PRUNING_DELAY should be at least " + + Defaults.LOCAL_SNAPSHOTS_PRUNING_DELAY_MIN + "(found " + localSnapshotsPruningDelay +")"); } @@ -622,15 +675,48 @@ protected void setSpentAddressesDbLogPath(String spentAddressesDbLogPath) { this.spentAddressesDbLogPath = spentAddressesDbLogPath; } + /** + * Checks if ZMQ is enabled. + * @return true if zmqEnableTcp or zmqEnableIpc is set. + */ @Override public boolean isZmqEnabled() { - return zmqEnabled; + return zmqEnableTcp || zmqEnableIpc; } + /** + * Activates ZMQ to listen on TCP and IPC. + * @deprecated Use {@link #setZmqEnableTcp(boolean) and/or {@link #setZmqEnableIpc(boolean)}} instead. + * @param zmqEnabled true if ZMQ should listen in TCP and IPC. + */ + @Deprecated @JsonProperty - @Parameter(names = "--zmq-enabled", description = ZMQConfig.Descriptions.ZMQ_ENABLED) + @Parameter(names = "--zmq-enabled", description = ZMQConfig.Descriptions.ZMQ_ENABLED, arity = 1) protected void setZmqEnabled(boolean zmqEnabled) { - this.zmqEnabled = zmqEnabled; + this.zmqEnableTcp = zmqEnabled; + this.zmqEnableIpc = zmqEnabled; + } + + @Override + public boolean isZmqEnableTcp() { + return zmqEnableTcp; + } + + @JsonProperty + @Parameter(names = "--zmq-enable-tcp", description = ZMQConfig.Descriptions.ZMQ_ENABLE_TCP, arity = 1) + public void setZmqEnableTcp(boolean zmqEnableTcp) { + this.zmqEnableTcp = zmqEnableTcp; + } + + @Override + public boolean isZmqEnableIpc() { + return zmqEnableIpc; + } + + @JsonProperty + @Parameter(names = "--zmq-enable-ipc", description = ZMQConfig.Descriptions.ZMQ_ENABLE_IPC, arity = 1) + public void setZmqEnableIpc(boolean zmqEnableIpc) { + this.zmqEnableIpc = zmqEnableIpc; } @Override @@ -642,6 +728,7 @@ public int getZmqPort() { @Parameter(names = "--zmq-port", description = ZMQConfig.Descriptions.ZMQ_PORT) protected void setZmqPort(int zmqPort) { this.zmqPort = zmqPort; + this.zmqEnableTcp = true; } @Override @@ -650,7 +737,7 @@ public int getZmqThreads() { } @JsonProperty - @Parameter(names = "--zmq-threads", description = ZMQConfig.Descriptions.ZMQ_PORT) + @Parameter(names = "--zmq-threads", description = ZMQConfig.Descriptions.ZMQ_THREADS) protected void setZmqThreads(int zmqThreads) { this.zmqThreads = zmqThreads; } @@ -664,6 +751,7 @@ public String getZmqIpc() { @Parameter(names = "--zmq-ipc", description = ZMQConfig.Descriptions.ZMQ_IPC) protected void setZmqIpc(String zmqIpc) { this.zmqIpc = zmqIpc; + this.zmqEnableIpc = true; } @Override @@ -740,26 +828,38 @@ public double getAlpha() { protected void setAlpha(double alpha) { this.alpha = alpha; } - + + @Override + public int getTipSelectionTimeoutSec() { + return tipSelectionTimeoutSec; + } + + @JsonProperty + @Parameter(names = "--tip-selection-timeout-sec", description = TipSelConfig.Descriptions.TIP_SELECTION_TIMEOUT_SEC) + protected void setTipSelectionTimeoutSec(int tipSelectionTimeoutSec) { + this.tipSelectionTimeoutSec = tipSelectionTimeoutSec; + } + @Override public boolean isTipSolidifierEnabled() { return tipSolidifierEnabled; } @JsonProperty - @Parameter(names = "--tip-solidifier", description = SolidificationConfig.Descriptions.TIP_SOLIDIFIER, + @Parameter(names = "--tip-solidifier", description = SolidificationConfig.Descriptions.TIP_SOLIDIFIER, arity = 1) protected void setTipSolidifierEnabled(boolean tipSolidifierEnabled) { this.tipSolidifierEnabled = tipSolidifierEnabled; } - + @Override public int getBelowMaxDepthTransactionLimit() { return maxAnalyzedTransactions; } @JsonProperty - @Parameter(names = "--max-analyzed-transactions", description = TipSelConfig.Descriptions.BELOW_MAX_DEPTH_TRANSACTION_LIMIT) + @Parameter(names = "--max-analyzed-transactions", + description = TipSelConfig.Descriptions.BELOW_MAX_DEPTH_TRANSACTION_LIMIT) protected void setBelowMaxDepthTransactionLimit(int maxAnalyzedTransactions) { this.maxAnalyzedTransactions = maxAnalyzedTransactions; } @@ -775,11 +875,16 @@ protected void setPowThreads(int powThreads) { this.powThreads = powThreads; } + /** + * Represents the default values primarily used by the {@link BaseIotaConfig} field initialisation. + */ public interface Defaults { //API int API_PORT = 14265; String API_HOST = "localhost"; List REMOTE_LIMIT_API = IotaUtils.createImmutableList("addNeighbors", "getNeighbors", "removeNeighbors", "attachToTangle", "interruptAttachingToTangle"); + InetAddress REMOTE_LIMIT_API_DEFAULT_HOST = InetAddress.getLoopbackAddress(); + List REMOTE_LIMIT_API_HOSTS = IotaUtils.createImmutableList(REMOTE_LIMIT_API_DEFAULT_HOST); int MAX_FIND_TRANSACTIONS = 100_000; int MAX_REQUESTS_LIST = 1_000; int MAX_GET_TRYTES = 10_000; @@ -823,16 +928,18 @@ public interface Defaults { //Zmq int ZMQ_THREADS = 1; + boolean ZMQ_ENABLE_IPC = false; String ZMQ_IPC = "ipc://iri"; - boolean ZMQ_ENABLED = false; + boolean ZMQ_ENABLE_TCP = false; int ZMQ_PORT = 5556; //TipSel int MAX_DEPTH = 15; double ALPHA = 0.001d; - + int TIP_SELECTION_TIMEOUT_SEC = 60; + //Tip solidification - boolean TIP_SOLIDIFIER_ENABLED = true; + boolean TIP_SOLIDIFIER_ENABLED = false; //PearlDiver int POW_THREADS = 0; @@ -847,8 +954,8 @@ public interface Defaults { //Snapshot boolean LOCAL_SNAPSHOTS_ENABLED = true; - boolean LOCAL_SNAPSHOTS_PRUNING_ENABLED = true; - + boolean LOCAL_SNAPSHOTS_PRUNING_ENABLED = false; + int LOCAL_SNAPSHOTS_PRUNING_DELAY = 40000; int LOCAL_SNAPSHOTS_PRUNING_DELAY_MIN = 10000; int LOCAL_SNAPSHOTS_INTERVAL_SYNCED = 10; @@ -857,7 +964,7 @@ public interface Defaults { int LOCAL_SNAPSHOTS_DEPTH_MIN = 100; String SPENT_ADDRESSES_DB_PATH = "spent-addresses-db"; String SPENT_ADDRESSES_DB_LOG_PATH = "spent-addresses-log"; - + String LOCAL_SNAPSHOTS_BASE_PATH = "mainnet"; String SNAPSHOT_FILE = "/snapshotMainnet.txt"; String SNAPSHOT_SIG_FILE = "/snapshotMainnet.sig"; diff --git a/src/main/java/com/iota/iri/conf/Config.java b/src/main/java/com/iota/iri/conf/Config.java index 0a1dbd90d9..1f248c2f01 100644 --- a/src/main/java/com/iota/iri/conf/Config.java +++ b/src/main/java/com/iota/iri/conf/Config.java @@ -6,7 +6,6 @@ public interface Config { String TESTNET_FLAG = "--testnet"; - /** * @return {@value Descriptions#TESTNET} */ diff --git a/src/main/java/com/iota/iri/conf/TestnetConfig.java b/src/main/java/com/iota/iri/conf/TestnetConfig.java index a35731f7f8..a194248bdc 100644 --- a/src/main/java/com/iota/iri/conf/TestnetConfig.java +++ b/src/main/java/com/iota/iri/conf/TestnetConfig.java @@ -55,7 +55,8 @@ public boolean isDontValidateTestnetMilestoneSig() { } @JsonProperty - @Parameter(names = "--testnet-no-coo-validation", description = MilestoneConfig.Descriptions.DONT_VALIDATE_TESTNET_MILESTONE_SIG) + @Parameter(names = "--testnet-no-coo-validation", + description = MilestoneConfig.Descriptions.DONT_VALIDATE_TESTNET_MILESTONE_SIG, arity = 1) protected void setDontValidateTestnetMilestoneSig(boolean dontValidateTestnetMilestoneSig) { this.dontValidateTestnetMilestoneSig = dontValidateTestnetMilestoneSig; } diff --git a/src/main/java/com/iota/iri/conf/TipSelConfig.java b/src/main/java/com/iota/iri/conf/TipSelConfig.java index 542ad47eed..935a8d7a28 100644 --- a/src/main/java/com/iota/iri/conf/TipSelConfig.java +++ b/src/main/java/com/iota/iri/conf/TipSelConfig.java @@ -17,6 +17,11 @@ public interface TipSelConfig extends Config { */ double getAlpha(); + /** + * @return Descriptions#TIP_SELECTION_TIMEOUT_SEC + */ + int getTipSelectionTimeoutSec(); + /** * @return Descriptions#BELOW_MAX_DEPTH_TRANSACTION_LIMIT */ @@ -27,6 +32,8 @@ interface Descriptions { String MAX_DEPTH = "The maximal number of previous milestones from where you can perform the random walk"; String ALPHA = "Parameter that defines the randomness of the tip selection. " + "Should be a number between 0 to infinity, where 0 is most random and infinity is most deterministic."; + String TIP_SELECTION_TIMEOUT_SEC = "Defines the maximum number of seconds the tip-selection is allowed to be ongoing. " + + "If the threshold is exceeded, tip-selection is aborted."; String BELOW_MAX_DEPTH_TRANSACTION_LIMIT = "The maximal number of unconfirmed transactions that may be analyzed in " + "order to find the latest milestone the transaction that we are stepping on during the walk approves"; } diff --git a/src/main/java/com/iota/iri/conf/ZMQConfig.java b/src/main/java/com/iota/iri/conf/ZMQConfig.java index b3a9b304b7..d5593ee7e5 100644 --- a/src/main/java/com/iota/iri/conf/ZMQConfig.java +++ b/src/main/java/com/iota/iri/conf/ZMQConfig.java @@ -2,17 +2,42 @@ public interface ZMQConfig extends Config { + /** + * @return Descriptions#ZMQ_ENABLED + */ boolean isZmqEnabled(); + /** + * @return Descriptions#ZMQ_ENABLE_TCP + */ + boolean isZmqEnableTcp(); + + /** + * @return Descriptions#ZMQ_ENABLE_IPC + */ + boolean isZmqEnableIpc(); + + /** + * @return Descriptions#ZMQ_PORT + */ int getZmqPort(); + /** + * @return Descriptions#ZMQ_THREADS + */ int getZmqThreads(); + /** + * @return Descriptions#ZMQ_IPC + */ String getZmqIpc(); interface Descriptions { - String ZMQ_ENABLED = "Enabling zmq channels."; String ZMQ_PORT = "The port used to connect to the ZMQ feed"; String ZMQ_IPC = "The path that is used to communicate with ZMQ in IPC"; + String ZMQ_ENABLED = "Enable zmq channels (deprecated). Use --zmq-enable-tcp or --zmq-enable-ipc instead"; + String ZMQ_ENABLE_TCP = "Enable zmq channels on tcp port 5556. Use --zmq-port=[PORT] to override."; + String ZMQ_ENABLE_IPC = "Enable zmq channels on ipc://iri. Use --zmq-ipc=[SOCKET] to override."; + String ZMQ_THREADS = "The threads used by ZMQ publisher"; } } diff --git a/src/main/java/com/iota/iri/controllers/TransactionViewModel.java b/src/main/java/com/iota/iri/controllers/TransactionViewModel.java index ed53da8dba..fb620f1da9 100644 --- a/src/main/java/com/iota/iri/controllers/TransactionViewModel.java +++ b/src/main/java/com/iota/iri/controllers/TransactionViewModel.java @@ -258,10 +258,9 @@ public static TransactionViewModel first(Tangle tangle) throws Exception { * @param tangle The tangle reference for the database * @param initialSnapshot snapshot that acts as genesis * @param item The string identifying the purpose of the update - * @return True if the update was successful, False if it failed * @throws Exception Thrown if any of the metadata fails to fetch, or if the database update fails */ - public boolean update(Tangle tangle, Snapshot initialSnapshot, String item) throws Exception { + public void update(Tangle tangle, Snapshot initialSnapshot, String item) throws Exception { getAddressHash(); getTrunkTransactionHash(); getBranchTransactionHash(); @@ -271,9 +270,9 @@ public boolean update(Tangle tangle, Snapshot initialSnapshot, String item) thro setAttachmentData(); setMetadata(); if (initialSnapshot.hasSolidEntryPoint(hash)) { - return false; + return; } - return tangle.update(transaction, hash, item); + tangle.update(transaction, hash, item); } /** @@ -400,6 +399,20 @@ public boolean store(Tangle tangle, Snapshot initialSnapshot) throws Exception { } return tangle.saveBatch(batch); } + + /** + * Creates a copy of the underlying {@link Transaction} object. + * + * @return the transaction object + */ + public Transaction getTransaction() { + Transaction t = new Transaction(); + + //if the supplied array to the call != null the transaction bytes are copied over from the buffer. + t.read(getBytes()); + t.readMetadata(transaction.metadata()); + return t; + } /** * Gets the {@link ApproveeViewModel} of a {@link Transaction}. If the current {@link ApproveeViewModel} is null, a @@ -699,12 +712,12 @@ public void setMetadata() { : TransactionViewModel.FILLED_SLOT; } - public static void updateSolidTransactions(Tangle tangle, Snapshot initialSnapshot, final Set analyzedHashes) + public static void updateSolidTransactions(Tangle tangle, Snapshot initialSnapshot, final LinkedHashSet analyzedHashes) throws Exception { - Iterator hashIterator = analyzedHashes.iterator(); + Object[] hashes = analyzedHashes.toArray(); TransactionViewModel transactionViewModel; - while (hashIterator.hasNext()) { - transactionViewModel = TransactionViewModel.fromHash(tangle, hashIterator.next()); + for(int i = hashes.length -1; i >= 0; i--){ + transactionViewModel = TransactionViewModel.fromHash(tangle, (Hash) hashes[i]); transactionViewModel.updateHeights(tangle, initialSnapshot); @@ -765,12 +778,10 @@ public void setSnapshot(Tangle tangle, Snapshot initialSnapshot, final int index * milestone accordingly. It first checks if the {@link Transaction#milestone} flag has changed and if so, it issues * a database update. * - * @param tangle Tangle instance which acts as a database interface <<<<<<< HEAD - * @param isMilestone True if the {@link Transaction} is a milestone and False if not - * @throws Exception Thrown if there is an error while saving the changes to the database ======= + * @param tangle Tangle instance which acts as a database interface * @param initialSnapshot the snapshot representing the starting point of our ledger * @param isMilestone true if the transaction is a milestone and false otherwise - * @throws Exception if something goes wrong while saving the changes to the database >>>>>>> release-v1.5.6 + * @throws Exception if something goes wrong while saving the changes to the database */ public void isMilestone(Tangle tangle, Snapshot initialSnapshot, final boolean isMilestone) throws Exception { if (isMilestone != transaction.milestone) { diff --git a/src/main/java/com/iota/iri/crypto/Kerl.java b/src/main/java/com/iota/iri/crypto/Kerl.java index d30dfde821..21e5c35eda 100644 --- a/src/main/java/com/iota/iri/crypto/Kerl.java +++ b/src/main/java/com/iota/iri/crypto/Kerl.java @@ -1,5 +1,6 @@ package com.iota.iri.crypto; +import com.google.common.annotations.VisibleForTesting; import com.iota.iri.utils.Converter; import org.bouncycastle.jcajce.provider.digest.Keccak; @@ -17,10 +18,14 @@ public final class Kerl implements Sponge { private static final int BIT_HASH_LENGTH = 384; - static final int BYTE_HASH_LENGTH = BIT_HASH_LENGTH / 8; //Package Private For Testing + + @VisibleForTesting + static final int BYTE_HASH_LENGTH = BIT_HASH_LENGTH / 8; private static final BigInteger RADIX = BigInteger.valueOf(Converter.RADIX); - static final int MAX_POWERS_LONG = 40; //Package Private For Testing + + @VisibleForTesting + static final int MAX_POWERS_LONG = 40; private static final BigInteger[] RADIX_POWERS = IntStream.range(0, MAX_POWERS_LONG + 1).mapToObj(RADIX::pow).toArray(BigInteger[]::new); //delegate diff --git a/src/main/java/com/iota/iri/network/Node.java b/src/main/java/com/iota/iri/network/Node.java index cc34b84619..2cb7bbc0e0 100644 --- a/src/main/java/com/iota/iri/network/Node.java +++ b/src/main/java/com/iota/iri/network/Node.java @@ -11,7 +11,7 @@ import com.iota.iri.service.milestone.LatestMilestoneTracker; import com.iota.iri.service.snapshot.SnapshotProvider; import com.iota.iri.storage.Tangle; -import com.iota.iri.zmq.MessageQ; +import net.openhft.hashing.LongHashFunction; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; @@ -19,9 +19,6 @@ import org.slf4j.LoggerFactory; import java.net.*; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.util.*; import java.util.concurrent.*; @@ -72,12 +69,12 @@ public class Node { private final TransactionValidator transactionValidator; private final LatestMilestoneTracker latestMilestoneTracker; private final TransactionRequester transactionRequester; - private final MessageQ messageQ; private static final SecureRandom rnd = new SecureRandom(); - private FIFOCache recentSeenBytes; + private FIFOCache recentSeenBytes; + private LongHashFunction recentSeenBytesHashFunction; private static AtomicLong recentSeenBytesMissCount = new AtomicLong(0L); private static AtomicLong recentSeenBytesHitCount = new AtomicLong(0L); @@ -99,11 +96,10 @@ public class Node { * @param transactionRequester Contains a set of transaction hashes to be requested from peers. * @param tipsViewModel Contains a hash of solid and non solid tips * @param latestMilestoneTracker Tracks milestones issued from the coordinator - * @param messageQ Responsible for publishing events on zeroMQ * @param configuration Contains all the config. * */ - public Node(final Tangle tangle, SnapshotProvider snapshotProvider, final TransactionValidator transactionValidator, final TransactionRequester transactionRequester, final TipsViewModel tipsViewModel, final LatestMilestoneTracker latestMilestoneTracker, final MessageQ messageQ, final NodeConfig configuration + public Node(final Tangle tangle, SnapshotProvider snapshotProvider, final TransactionValidator transactionValidator, final TransactionRequester transactionRequester, final TipsViewModel tipsViewModel, final LatestMilestoneTracker latestMilestoneTracker, final NodeConfig configuration ) { this.configuration = configuration; this.tangle = tangle; @@ -112,7 +108,6 @@ public Node(final Tangle tangle, SnapshotProvider snapshotProvider, final Transa this.transactionRequester = transactionRequester; this.tipsViewModel = tipsViewModel; this.latestMilestoneTracker = latestMilestoneTracker ; - this.messageQ = messageQ; this.reqHashSize = configuration.getRequestHashSize(); int packetSize = configuration.getTransactionPacketSize(); this.sendingPacket = new DatagramPacket(new byte[packetSize], packetSize); @@ -131,6 +126,7 @@ public void init() throws Exception { BROADCAST_QUEUE_SIZE = RECV_QUEUE_SIZE = REPLY_QUEUE_SIZE = configuration.getqSizeNode(); recentSeenBytes = new FIFOCache<>(configuration.getCacheSizeBytes(), configuration.getpDropCacheEntry()); + recentSeenBytesHashFunction = LongHashFunction.xx(); parseNeighborsConfig(); @@ -189,7 +185,7 @@ private Runnable spawnNeighborDNSRefresherThread() { final String hostname = n.getAddress().getHostString(); checkIp(hostname).ifPresent(ip -> { log.info("DNS Checker: Validating DNS Address '{}' with '{}'", hostname, ip); - messageQ.publish("dnscv %s %s", hostname, ip); + tangle.publish("dnscv %s %s", hostname, ip); final String neighborAddress = neighborIpCache.get(hostname); if (neighborAddress == null) { @@ -197,11 +193,11 @@ private Runnable spawnNeighborDNSRefresherThread() { } else { if (neighborAddress.equals(ip)) { log.info("{} seems fine.", hostname); - messageQ.publish("dnscc %s", hostname); + tangle.publish("dnscc %s", hostname); } else { if (configuration.isDnsRefresherEnabled()) { log.info("IP CHANGED for {}! Updating...", hostname); - messageQ.publish("dnscu %s", hostname); + tangle.publish("dnscu %s", hostname); String protocol = (n instanceof TCPNeighbor) ? "tcp://" : "udp://"; String port = ":" + n.getAddress().getPort(); @@ -297,7 +293,7 @@ public void preProcessReceivedData(byte[] receivedData, SocketAddress senderAddr try { //Transaction bytes - ByteBuffer digest = getBytesDigest(receivedData); + long digest = getBytesDigest(receivedData); //check if cached synchronized (recentSeenBytes) { @@ -318,9 +314,6 @@ public void preProcessReceivedData(byte[] receivedData, SocketAddress senderAddr addReceivedDataToReceiveQueue(receivedTransactionViewModel, neighbor); } - - } catch (NoSuchAlgorithmException e) { - log.error("MessageDigest: " + e); } catch (final TransactionValidator.StaleTimestampException e) { log.debug(e.getMessage()); try { @@ -360,7 +353,7 @@ public void preProcessReceivedData(byte[] receivedData, SocketAddress senderAddr } if (((hitCount + missCount) % 50000L == 0)) { log.info("RecentSeenBytes cache hit/miss ratio: " + hitCount + "/" + missCount); - messageQ.publish("hmr %d/%d", hitCount, missCount); + tangle.publish("hmr %d/%d", hitCount, missCount); recentSeenBytesMissCount.set(0L); recentSeenBytesHitCount.set(0L); } @@ -375,7 +368,7 @@ public void preProcessReceivedData(byte[] receivedData, SocketAddress senderAddr String uriString = uriScheme + ":/" + senderAddress.toString(); if (Neighbor.getNumPeers() < maxPeersAllowed) { log.info("Adding non-tethered neighbor: " + uriString); - messageQ.publish("antn %s", uriString); + tangle.publish("antn %s", uriString); try { final URI uri = new URI(uriString); // 3rd parameter false (not tcp), 4th parameter true (configured tethering) @@ -392,7 +385,7 @@ public void preProcessReceivedData(byte[] receivedData, SocketAddress senderAddr // Avoid ever growing list in case of an attack. rejectedAddresses.clear(); } else if (rejectedAddresses.add(uriString)) { - messageQ.publish("rntn %s %s", uriString, String.valueOf(maxPeersAllowed)); + tangle.publish("rntn %s %s", uriString, String.valueOf(maxPeersAllowed)); log.info("Refused non-tethered neighbor: " + uriString + " (max-peers = " + String.valueOf(maxPeersAllowed) + ")"); } @@ -520,7 +513,7 @@ public void replyToRequest(Hash requestedHash, Neighbor neighbor) { try { sendPacket(sendingPacket, transactionViewModel, neighbor); - ByteBuffer digest = getBytesDigest(transactionViewModel.getBytes()); + long digest = getBytesDigest(transactionViewModel.getBytes()); synchronized (recentSeenBytes) { recentSeenBytes.put(digest, transactionViewModel.getHash()); } @@ -588,7 +581,7 @@ public void sendPacket(DatagramPacket sendingPacket, TransactionViewModel transa * Does the same as {@link #sendPacket(DatagramPacket, TransactionViewModel, Neighbor)} but defaults to using the * same internal {@link #sendingPacket} as all the other methods in this class, which allows external callers to * send packets that are in "sync" (sending is synchronized over the packet object) with the rest of the methods - * used in this class.
+ * used in this class. * * @param transactionViewModel the transaction that shall be sent * @param neighbor the neighbor that should receive the packet @@ -655,7 +648,7 @@ private Runnable spawnTipRequesterThread() { long now = System.currentTimeMillis(); if ((now - lastTime) > 10000L) { lastTime = now; - messageQ.publish("rstat %d %d %d %d %d", + tangle.publish("rstat %d %d %d %d %d", getReceiveQueueSize(), getBroadcastQueueSize(), transactionRequester.numberOfTransactionsToRequest(), getReplyQueueSize(), TransactionViewModel.getNumberOfStoredTransactions(tangle)); @@ -771,10 +764,8 @@ public void shutdown() throws InterruptedException { executor.awaitTermination(6, TimeUnit.SECONDS); } - private ByteBuffer getBytesDigest(byte[] receivedData) throws NoSuchAlgorithmException { - MessageDigest digest = MessageDigest.getInstance("SHA-256"); - digest.update(receivedData, 0, TransactionViewModel.SIZE); - return ByteBuffer.wrap(digest.digest()); + private long getBytesDigest(byte[] receivedData) { + return recentSeenBytesHashFunction.hashBytes(receivedData, 0, TransactionViewModel.SIZE); } // helpers methods @@ -837,7 +828,7 @@ private void parseNeighborsConfig() { .map(u -> newNeighbor(u, true)) .peek(u -> { log.info("-> Adding neighbor : {} ", u.getAddress()); - messageQ.publish("-> Adding Neighbor : %s", u.getAddress()); + tangle.publish("-> Adding Neighbor : %s", u.getAddress()); }).forEach(neighbors::add); } diff --git a/src/main/java/com/iota/iri/network/TransactionRequester.java b/src/main/java/com/iota/iri/network/TransactionRequester.java index 1b846d345f..5f0873fd68 100644 --- a/src/main/java/com/iota/iri/network/TransactionRequester.java +++ b/src/main/java/com/iota/iri/network/TransactionRequester.java @@ -1,9 +1,10 @@ package com.iota.iri.network; +import com.google.common.annotations.VisibleForTesting; import com.iota.iri.controllers.TransactionViewModel; import com.iota.iri.model.Hash; +import com.iota.iri.service.snapshot.Snapshot; import com.iota.iri.service.snapshot.SnapshotProvider; -import com.iota.iri.zmq.MessageQ; import com.iota.iri.storage.Tangle; import org.apache.commons.lang3.ArrayUtils; import org.slf4j.Logger; @@ -18,7 +19,6 @@ public class TransactionRequester { private static final Logger log = LoggerFactory.getLogger(TransactionRequester.class); - private final MessageQ messageQ; private final Set milestoneTransactionsToRequest = new LinkedHashSet<>(); private final Set transactionsToRequest = new LinkedHashSet<>(); @@ -32,10 +32,15 @@ public class TransactionRequester { private final Tangle tangle; private final SnapshotProvider snapshotProvider; - public TransactionRequester(Tangle tangle, SnapshotProvider snapshotProvider, MessageQ messageQ) { + /** + * Create {@link TransactionRequester} for receiving transactions from the tangle. + * + * @param tangle used to request transaction + * @param snapshotProvider that allows to retrieve the {@link Snapshot} instances that are relevant for the node + */ + public TransactionRequester(Tangle tangle, SnapshotProvider snapshotProvider) { this.tangle = tangle; this.snapshotProvider = snapshotProvider; - this.messageQ = messageQ; } public void init(double pRemoveRequest) { @@ -87,7 +92,7 @@ public void requestTransaction(Hash hash, boolean milestone) throws Exception { * * It used when the queue capacity is reached, and new transactions would be dropped as a result. */ - // @VisibleForTesting + @VisibleForTesting void popEldestTransactionToRequest() { Iterator iterator = transactionsToRequest.iterator(); if (iterator.hasNext()) { @@ -138,7 +143,7 @@ public Hash transactionToRequest(boolean milestone) throws Exception { iterator.remove(); // ... dump a log message ... log.info("Removed existing tx from request list: " + hash); - messageQ.publish("rtl %s", hash); + tangle.publish("rtl %s", hash); // ... and continue to the next element in the set continue; diff --git a/src/main/java/com/iota/iri/network/TransactionRequesterWorker.java b/src/main/java/com/iota/iri/network/TransactionRequesterWorker.java index 52d5e8aec2..9bb59041d8 100644 --- a/src/main/java/com/iota/iri/network/TransactionRequesterWorker.java +++ b/src/main/java/com/iota/iri/network/TransactionRequesterWorker.java @@ -1,28 +1,32 @@ package com.iota.iri.network; /** + *

* Creates a background worker that tries to work through the request queue by sending random tips along the requested - * transactions.
- *
+ * transactions. + *

+ *

* This massively increases the sync speed of new nodes that would otherwise be limited to requesting in the same rate - * as new transactions are received.
+ * as new transactions are received. + *

*/ public interface TransactionRequesterWorker { /** - * Works through the request queue by sending a request alongside a random tip to each of our neighbors.
+ * Works through the request queue by sending a request alongside a random tip to each of our neighbors. + * * @return true when we have send the request to our neighbors, otherwise false */ boolean processRequestQueue(); /** * Starts the background worker that automatically calls {@link #processRequestQueue()} periodically to process the - * requests in the queue.
+ * requests in the queue. */ void start(); /** - * Stops the background worker that automatically works through the request queue.
+ * Stops the background worker that automatically works through the request queue. */ void shutdown(); } diff --git a/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java b/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java index e3d6ea3bb6..18b403ad69 100644 --- a/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java +++ b/src/main/java/com/iota/iri/network/impl/TransactionRequesterWorkerImpl.java @@ -1,5 +1,6 @@ package com.iota.iri.network.impl; +import com.google.common.annotations.VisibleForTesting; import com.iota.iri.controllers.TipsViewModel; import com.iota.iri.controllers.TransactionViewModel; import com.iota.iri.model.Hash; @@ -16,39 +17,41 @@ import java.util.concurrent.TimeUnit; /** + *

* Creates a background worker that tries to work through the request queue by sending random tips along the requested - * transactions.
- *
+ * transactions. + *

+ *

* This massively increases the sync speed of new nodes that would otherwise be limited to requesting in the same rate - * as new transactions are received.
- *
+ * as new transactions are received. + *

* Note: To reduce the overhead for the node we only trigger this worker if the request queue gets bigger than the * {@link #REQUESTER_THREAD_ACTIVATION_THRESHOLD}. Otherwise we rely on the processing of the queue due to normal - * outgoing traffic like transactions that get relayed by our node.
+ * outgoing traffic like transactions that get relayed by our node. */ public class TransactionRequesterWorkerImpl implements TransactionRequesterWorker { /** - * The minimum amount of transactions in the request queue that are required for the worker to trigger.
+ * The minimum amount of transactions in the request queue that are required for the worker to trigger. */ public static final int REQUESTER_THREAD_ACTIVATION_THRESHOLD = 50; /** - * The time (in milliseconds) that the worker waits between its iterations.
+ * The time (in milliseconds) that the worker waits between its iterations. */ private static final int REQUESTER_THREAD_INTERVAL = 100; /** - * The logger of this class (a rate limited logger than doesn't spam the CLI output).
+ * The logger of this class (a rate limited logger than doesn't spam the CLI output). */ private static final Logger log = LoggerFactory.getLogger(TransactionRequesterWorkerImpl.class); /** - * The Tangle object which acts as a database interface.
+ * The Tangle object which acts as a database interface. */ private Tangle tangle; /** - * The manager for the requested transactions that allows us to access the request queue.
+ * The manager for the requested transactions that allows us to access the request queue. */ private TransactionRequester transactionRequester; @@ -58,26 +61,27 @@ public class TransactionRequesterWorkerImpl implements TransactionRequesterWorke private TipsViewModel tipsViewModel; /** - * The network manager of the node.
+ * The network manager of the node. */ private Node node; /** - * The manager of the background task.
+ * The manager of the background task. */ private final SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService( "Transaction Requester", log); /** - * Initializes the instance and registers its dependencies.
- *
- * It simply stores the passed in values in their corresponding private properties.
- *
+ *

+ * Initializes the instance and registers its dependencies. + * It simply stores the passed in values in their corresponding private properties. + *

+ *

* Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
- *
+ * allows us to still instantiate, initialize and assign in one line - see Example: + *

* {@code transactionRequesterWorker = new TransactionRequesterWorkerImpl().init(...);} * * @param tangle Tangle object which acts as a database interface @@ -99,10 +103,11 @@ public TransactionRequesterWorkerImpl init(Tangle tangle, TransactionRequester t /** * {@inheritDoc} - *
+ *

* To reduce the overhead for the node we only trigger this worker if the request queue gets bigger than the {@link * #REQUESTER_THREAD_ACTIVATION_THRESHOLD}. Otherwise we rely on the processing of the queue due to normal outgoing - * traffic like transactions that get relayed by our node.
+ * traffic like transactions that get relayed by our node. + *

*/ @Override public boolean processRequestQueue() { @@ -131,12 +136,12 @@ private void sendToNodes(TransactionViewModel transaction) { } } - //Package Private For Testing + @VisibleForTesting boolean isActive() { return transactionRequester.numberOfTransactionsToRequest() >= REQUESTER_THREAD_ACTIVATION_THRESHOLD; } - //Package Private For Testing + @VisibleForTesting boolean isValidTransaction(TransactionViewModel transaction) { return transaction != null && ( transaction.getType() != TransactionViewModel.PREFILLED_SLOT @@ -155,15 +160,18 @@ public void shutdown() { } /** - * Retrieves a random solid tip that can be sent together with our request.
- *
- * It simply retrieves the hash of the tip from the {@link #tipsViewModel} and tries to load it from the - * database.
+ *

+ * Retrieves a random solid tip that can be sent together with our request. + *

+ *

+ * It retrieves the hash of the tip from the {@link #tipsViewModel} and tries to load it from the + * database. + *

* * @return a random tip * @throws Exception if anything unexpected happens while trying to retrieve the random tip. */ - //Package Private For Testing + @VisibleForTesting TransactionViewModel getTransactionToSendWithRequest() throws Exception { Hash tip = tipsViewModel.getRandomSolidTipHash(); if (tip == null) { diff --git a/src/main/java/com/iota/iri/service/API.java b/src/main/java/com/iota/iri/service/API.java index 4a7d4b4350..c19a7283e7 100644 --- a/src/main/java/com/iota/iri/service/API.java +++ b/src/main/java/com/iota/iri/service/API.java @@ -6,8 +6,9 @@ import com.iota.iri.BundleValidator; import com.iota.iri.IRI; import com.iota.iri.IXI; -import com.iota.iri.Iota; +import com.iota.iri.TransactionValidator; import com.iota.iri.conf.APIConfig; +import com.iota.iri.conf.IotaConfig; import com.iota.iri.controllers.*; import com.iota.iri.crypto.Curl; import com.iota.iri.crypto.PearlDiver; @@ -17,47 +18,38 @@ import com.iota.iri.model.HashFactory; import com.iota.iri.model.persistables.Transaction; import com.iota.iri.network.Neighbor; +import com.iota.iri.network.Node; +import com.iota.iri.network.TransactionRequester; import com.iota.iri.service.dto.*; +import com.iota.iri.service.ledger.LedgerService; +import com.iota.iri.service.milestone.LatestMilestoneTracker; +import com.iota.iri.service.restserver.RestConnector; +import com.iota.iri.service.snapshot.SnapshotProvider; +import com.iota.iri.service.spentaddresses.SpentAddressesService; import com.iota.iri.service.tipselection.TipSelector; +import com.iota.iri.service.tipselection.impl.TipSelectionCancelledException; import com.iota.iri.service.tipselection.impl.WalkValidatorImpl; +import com.iota.iri.storage.Tangle; import com.iota.iri.utils.Converter; -import com.iota.iri.utils.IotaIOUtils; -import com.iota.iri.utils.MapIdentityManager; -import io.undertow.Undertow; -import io.undertow.security.api.AuthenticationMechanism; -import io.undertow.security.api.AuthenticationMode; -import io.undertow.security.handlers.AuthenticationCallHandler; -import io.undertow.security.handlers.AuthenticationConstraintHandler; -import io.undertow.security.handlers.AuthenticationMechanismsHandler; -import io.undertow.security.handlers.SecurityInitialHandler; -import io.undertow.security.idm.IdentityManager; -import io.undertow.security.impl.BasicAuthenticationMechanism; -import io.undertow.server.HttpHandler; -import io.undertow.server.HttpServerExchange; -import io.undertow.util.*; + +import com.iota.iri.utils.IotaUtils; import org.apache.commons.lang3.StringUtils; +import org.iota.mddoclet.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.xnio.channels.StreamSinkChannel; -import org.xnio.streams.ChannelInputStream; - -import java.io.IOException; import java.io.UnsupportedEncodingException; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.net.URI; import java.net.URISyntaxException; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.security.InvalidAlgorithmParameterException; import java.util.*; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; -import static io.undertow.Handlers.path; - /** *

* The API makes it possible to interact with the node by requesting information or actions to be taken. @@ -76,221 +68,150 @@ @SuppressWarnings("unchecked") public class API { + private static final Logger log = LoggerFactory.getLogger(API.class); + + //region [CONSTANTS] /////////////////////////////////////////////////////////////////////////////// + public static final String REFERENCE_TRANSACTION_NOT_FOUND = "reference transaction not found"; public static final String REFERENCE_TRANSACTION_TOO_OLD = "reference transaction is too old"; public static final String INVALID_SUBTANGLE = "This operation cannot be executed: " + "The subtangle has not been updated yet."; + + private static final String OVER_MAX_ERROR_MESSAGE = "Could not complete request"; + private static final String INVALID_PARAMS = "Invalid parameters"; - private static final Logger log = LoggerFactory.getLogger(API.class); - private final IXI ixi; - - private Undertow server; - - private final Gson gson = new GsonBuilder().create(); - private volatile PearlDiver pearlDiver = new PearlDiver(); - - private final AtomicInteger counter = new AtomicInteger(0); - - private Pattern trytesPattern = Pattern.compile("[9A-Z]*"); + private static final char ZERO_LENGTH_ALLOWED = 'Y'; + private static final char ZERO_LENGTH_NOT_ALLOWED = 'N'; + + private static final int HASH_SIZE = 81; + private static final int TRYTES_SIZE = 2673; - private final static int HASH_SIZE = 81; - private final static int TRYTES_SIZE = 2673; - - private final static long MAX_TIMESTAMP_VALUE = (long) (Math.pow(3, 27) - 1) / 2; // max positive 27-trits value + private static final long MAX_TIMESTAMP_VALUE = (long) (Math.pow(3, 27) - 1) / 2; // max positive 27-trits value + //endregion //////////////////////////////////////////////////////////////////////////////////////////////////////// + private static int counterGetTxToApprove = 0; private static long ellapsedTime_getTxToApprove = 0L; private static int counter_PoW = 0; private static long ellapsedTime_PoW = 0L; + + //region [CONSTRUCTOR_FIELDS] /////////////////////////////////////////////////////////////////////////////// + private final IotaConfig configuration; + private final IXI ixi; + private final TransactionRequester transactionRequester; + private final SpentAddressesService spentAddressesService; + private final Tangle tangle; + private final BundleValidator bundleValidator; + private final SnapshotProvider snapshotProvider; + private final LedgerService ledgerService; + private final Node node; + private final TipSelector tipsSelector; + private final TipsViewModel tipsViewModel; + private final TransactionValidator transactionValidator; + private final LatestMilestoneTracker latestMilestoneTracker; + private final int maxFindTxs; private final int maxRequestList; private final int maxGetTrytes; - private final int maxBodyLength; - private final boolean testNet; - private final static String overMaxErrorMessage = "Could not complete request"; - private final static String invalidParams = "Invalid parameters"; + private final String[] features; + + //endregion //////////////////////////////////////////////////////////////////////////////////////////////////////// + + private final Gson gson = new GsonBuilder().create(); + private volatile PearlDiver pearlDiver = new PearlDiver(); - private final static char ZERO_LENGTH_ALLOWED = 'Y'; - private final static char ZERO_LENGTH_NOT_ALLOWED = 'N'; - private Iota instance; + private final AtomicInteger counter = new AtomicInteger(0); - private final String[] features; + private Pattern trytesPattern = Pattern.compile("[9A-Z]*"); + + //Package Private For Testing + final Map, AbstractResponse>> commandRoute; + + + private RestConnector connector; + + private final ExecutorService tipSelExecService = Executors.newSingleThreadExecutor(r -> new Thread(r, "tip-selection")); /** * Starts loading the IOTA API, parameters do not have to be initialized. - * - * @param instance The data source we interact with during any API call. - * @param ixi If a command is not in the standard API, - * we try to process it as a Nashorn JavaScript module through {@link IXI} + * + * @param configuration + * @param ixi If a command is not in the standard API, + * we try to process it as a Nashorn JavaScript module through {@link IXI} + * @param transactionRequester Service where transactions get requested + * @param spentAddressesService Service to check if addresses are spent + * @param tangle The transaction storage + * @param bundleValidator Validates bundles + * @param snapshotProvider Manager of our currently taken snapshots + * @param ledgerService contains all the relevant business logic for modifying and calculating the ledger state. + * @param node Handles and manages neighbors + * @param tipsSelector Handles logic for selecting tips based on other transactions + * @param tipsViewModel Contains the current tips of this node + * @param transactionValidator Validates transactions + * @param latestMilestoneTracker Service that tracks the latest milestone */ - public API(Iota instance, IXI ixi) { - this.instance = instance; + public API(IotaConfig configuration, IXI ixi, TransactionRequester transactionRequester, + SpentAddressesService spentAddressesService, Tangle tangle, BundleValidator bundleValidator, + SnapshotProvider snapshotProvider, LedgerService ledgerService, Node node, TipSelector tipsSelector, + TipsViewModel tipsViewModel, TransactionValidator transactionValidator, + LatestMilestoneTracker latestMilestoneTracker) { + this.configuration = configuration; this.ixi = ixi; - APIConfig configuration = instance.configuration; + + this.transactionRequester = transactionRequester; + this.spentAddressesService = spentAddressesService; + this.tangle = tangle; + this.bundleValidator = bundleValidator; + this.snapshotProvider = snapshotProvider; + this.ledgerService = ledgerService; + this.node = node; + this.tipsSelector = tipsSelector; + this.tipsViewModel = tipsViewModel; + this.transactionValidator = transactionValidator; + this.latestMilestoneTracker = latestMilestoneTracker; + maxFindTxs = configuration.getMaxFindTransactions(); maxRequestList = configuration.getMaxRequestsList(); maxGetTrytes = configuration.getMaxGetTrytes(); - maxBodyLength = configuration.getMaxBodyLength(); - testNet = configuration.isTestnet(); - - features = Feature.calculateFeatureNames(instance.configuration); - } - - /** - * Prepares the IOTA API for usage. Until this method is called, no HTTP requests can be made. - * The order of loading is as follows - *

    - *
  1. - * Read the spend addresses from the previous epoch. Used in {@link #wasAddressSpentFrom(Hash)}. - * This only happens if {@link APIConfig#isTestnet()} is false - * If reading from the previous epoch fails, a log is printed. The API will continue to initialize. - *
  2. - *
  3. - * Get the {@link APIConfig} from the {@link Iota} instance, - * and read {@link APIConfig#getPort()} and {@link APIConfig#getApiHost()} - *
  4. - *
  5. - * Builds a secure {@link Undertow} server with the port and host. - * If {@link APIConfig#getRemoteAuth()} is defined, remote authentication is blocked for anyone except - * those defined in {@link APIConfig#getRemoteAuth()} or localhost. - * This is done with {@link BasicAuthenticationMechanism} in a {@link AuthenticationMode#PRO_ACTIVE} mode. - * By default, this authentication is disabled. - *
  6. - *
  7. - * Starts the server, opening it for HTTP API requests - *
  8. - *
- */ - public void init() throws IOException { - APIConfig configuration = instance.configuration; - final int apiPort = configuration.getPort(); - final String apiHost = configuration.getApiHost(); - - log.debug("Binding JSON-REST API Undertow server on {}:{}", apiHost, apiPort); - - server = Undertow.builder().addHttpListener(apiPort, apiHost) - .setHandler(path().addPrefixPath("/", addSecurity(new HttpHandler() { - @Override - public void handleRequest(final HttpServerExchange exchange) throws Exception { - HttpString requestMethod = exchange.getRequestMethod(); - if (Methods.OPTIONS.equals(requestMethod)) { - String allowedMethods = "GET,HEAD,POST,PUT,DELETE,TRACE,OPTIONS,CONNECT,PATCH"; - //return list of allowed methods in response headers - exchange.setStatusCode(StatusCodes.OK); - exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, MimeMappings.DEFAULT_MIME_MAPPINGS.get("txt")); - exchange.getResponseHeaders().put(Headers.CONTENT_LENGTH, 0); - exchange.getResponseHeaders().put(Headers.ALLOW, allowedMethods); - exchange.getResponseHeaders().put(new HttpString("Access-Control-Allow-Origin"), "*"); - exchange.getResponseHeaders().put(new HttpString("Access-Control-Allow-Headers"), "User-Agent, Origin, X-Requested-With, Content-Type, Accept, X-IOTA-API-Version"); - exchange.getResponseSender().close(); - return; - } - if (exchange.isInIoThread()) { - exchange.dispatch(this); - return; - } - processRequest(exchange); - } - }))).build(); - server.start(); - } - - /** - * Sends the API response back as JSON to the requester. - * Status code of the HTTP request is also set according to the type of response. - *
    - *
  • {@link ErrorResponse}: 400
  • - *
  • {@link AccessLimitedResponse}: 401
  • - *
  • {@link ExceptionResponse}: 500
  • - *
  • Default: 200
  • - *
- * - * @param exchange Contains information about what the client sent to us - * @param res The response of the API. - * See {@link #processRequest(HttpServerExchange)} - * and {@link #process(String, InetSocketAddress)} for the different responses in each case. - * @param beginningTime The time when we received the request, in milliseconds. - * This will be used to set the response duration in {@link AbstractResponse#setDuration(Integer)} - * @throws IOException When connection to client has been lost - Currently being caught. - */ - private void sendResponse(HttpServerExchange exchange, AbstractResponse res, long beginningTime) throws IOException { - res.setDuration((int) (System.currentTimeMillis() - beginningTime)); - final String response = gson.toJson(res); - - if (res instanceof ErrorResponse) { - // bad request or invalid parameters - exchange.setStatusCode(400); - } else if (res instanceof AccessLimitedResponse) { - // API method not allowed - exchange.setStatusCode(401); - } else if (res instanceof ExceptionResponse) { - // internal error - exchange.setStatusCode(500); - } - - setupResponseHeaders(exchange); - - ByteBuffer responseBuf = ByteBuffer.wrap(response.getBytes(StandardCharsets.UTF_8)); - exchange.setResponseContentLength(responseBuf.array().length); - StreamSinkChannel sinkChannel = exchange.getResponseChannel(); - sinkChannel.getWriteSetter().set( channel -> { - if (responseBuf.remaining() > 0) { - try { - sinkChannel.write(responseBuf); - if (responseBuf.remaining() == 0) { - exchange.endExchange(); - } - } catch (IOException e) { - log.error("Lost connection to client - cannot send response"); - exchange.endExchange(); - sinkChannel.getWriteSetter().set(null); - } - } - else { - exchange.endExchange(); - } - }); - sinkChannel.resumeWrites(); + features = Feature.calculateFeatureNames(configuration); + + commandRoute = new HashMap<>(); + commandRoute.put(ApiCommand.ADD_NEIGHBORS, addNeighbors()); + commandRoute.put(ApiCommand.ATTACH_TO_TANGLE, attachToTangle()); + commandRoute.put(ApiCommand.BROADCAST_TRANSACTIONs, broadcastTransactions()); + commandRoute.put(ApiCommand.FIND_TRANSACTIONS, findTransactions()); + commandRoute.put(ApiCommand.GET_BALANCES, getBalances()); + commandRoute.put(ApiCommand.GET_INCLUSION_STATES, getInclusionStates()); + commandRoute.put(ApiCommand.GET_NEIGHBORS, getNeighbors()); + commandRoute.put(ApiCommand.GET_NODE_INFO, getNodeInfo()); + commandRoute.put(ApiCommand.GET_NODE_API_CONFIG, getNodeAPIConfiguration()); + commandRoute.put(ApiCommand.GET_TIPS, getTips()); + commandRoute.put(ApiCommand.GET_TRANSACTIONS_TO_APPROVE, getTransactionsToApprove()); + commandRoute.put(ApiCommand.GET_TRYTES, getTrytes()); + commandRoute.put(ApiCommand.INTERRUPT_ATTACHING_TO_TANGLE, interruptAttachingToTangle()); + commandRoute.put(ApiCommand.REMOVE_NEIGHBORS, removeNeighbors()); + commandRoute.put(ApiCommand.STORE_TRANSACTIONS, storeTransactions()); + commandRoute.put(ApiCommand.GET_MISSING_TRANSACTIONS, getMissingTransactions()); + commandRoute.put(ApiCommand.CHECK_CONSISTENCY, checkConsistency()); + commandRoute.put(ApiCommand.WERE_ADDRESSES_SPENT_FROM, wereAddressesSpentFrom()); } /** - *

- * Processes an API HTTP request. - * No checks have been done until now, except that it is not an OPTIONS request. - * We can be sure that we are in a thread that allows blocking. - *

- *

- * The request process duration is recorded. - * During this the request gets verified. If it is incorrect, an {@link ErrorResponse} is made. - * Otherwise it is processed in {@link #process(String, InetSocketAddress)}. - * The result is sent back to the requester. - *

- * - * @param exchange Contains the data the client sent to us - * @throws IOException If the body of this HTTP request cannot be read + * Initializes the API for usage. + * Will initialize and start the supplied {@link RestConnector} + * + * @param connector THe connector we use to handle API requests */ - private void processRequest(final HttpServerExchange exchange) throws IOException { - final ChannelInputStream cis = new ChannelInputStream(exchange.getRequestChannel()); - exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, "application/json"); - - final long beginningTime = System.currentTimeMillis(); - final String body = IotaIOUtils.toString(cis, StandardCharsets.UTF_8); - AbstractResponse response; - - if (!exchange.getRequestHeaders().contains("X-IOTA-API-Version")) { - response = ErrorResponse.create("Invalid API Version"); - } else if (body.length() > maxBodyLength) { - response = ErrorResponse.create("Request too long"); - } else { - response = process(body, exchange.getSourceAddress()); - } - sendResponse(exchange, response, beginningTime); + public void init(RestConnector connector){ + this.connector = connector; + connector.init(this::process); + connector.start(); } - + /** * Handles an API request body. * Its returned {@link AbstractResponse} is created using the following logic @@ -323,9 +244,7 @@ private void processRequest(final HttpServerExchange exchange) throws IOExceptio * @throws UnsupportedEncodingException If the requestString cannot be parsed into a Map. Currently caught and turned into a {@link ExceptionResponse}. */ - private AbstractResponse process(final String requestString, InetSocketAddress sourceAddress) - throws UnsupportedEncodingException { - + private AbstractResponse process(final String requestString, InetAddress netAddress){ try { // Request JSON data into map Map request; @@ -347,148 +266,32 @@ private AbstractResponse process(final String requestString, InetSocketAddress s // Is this command allowed to be run from this request address? // We check the remote limit API configuration. - if (instance.configuration.getRemoteLimitApi().contains(command) && - !sourceAddress.getAddress().isLoopbackAddress()) { + if (configuration.getRemoteLimitApi().contains(command) && + !configuration.getRemoteTrustedApiHosts().contains(netAddress)) { return AccessLimitedResponse.create("COMMAND " + command + " is not available on this node"); } log.debug("# {} -> Requesting command '{}'", counter.incrementAndGet(), command); - switch (command) { - case "storeMessage": { - if (!testNet) { - return AccessLimitedResponse.create("COMMAND storeMessage is only available on testnet"); - } - - if (!request.containsKey("address") || !request.containsKey("message")) { - return ErrorResponse.create("Invalid params"); - } - - String address = (String) request.get("address"); - String message = (String) request.get("message"); - return storeMessageStatement(address, message); - } - - case "addNeighbors": { - List uris = getParameterAsList(request,"uris",0); - log.debug("Invoking 'addNeighbors' with {}", uris); - return addNeighborsStatement(uris); - } - case "attachToTangle": { - final Hash trunkTransaction = HashFactory.TRANSACTION.create(getParameterAsStringAndValidate(request,"trunkTransaction", HASH_SIZE)); - final Hash branchTransaction = HashFactory.TRANSACTION.create(getParameterAsStringAndValidate(request,"branchTransaction", HASH_SIZE)); - final int minWeightMagnitude = getParameterAsInt(request,"minWeightMagnitude"); - - final List trytes = getParameterAsList(request,"trytes", TRYTES_SIZE); - - List elements = attachToTangleStatement(trunkTransaction, branchTransaction, minWeightMagnitude, trytes); - return AttachToTangleResponse.create(elements); - } - case "broadcastTransactions": { - final List trytes = getParameterAsList(request,"trytes", TRYTES_SIZE); - broadcastTransactionsStatement(trytes); - return AbstractResponse.createEmptyResponse(); - } - case "findTransactions": { - return findTransactionsStatement(request); - } - case "getBalances": { - final List addresses = getParameterAsList(request,"addresses", HASH_SIZE); - final List tips = request.containsKey("tips") ? - getParameterAsList(request,"tips", HASH_SIZE): - null; - final int threshold = getParameterAsInt(request, "threshold"); - return getBalancesStatement(addresses, tips, threshold); - } - case "getInclusionStates": { - if (invalidSubtangleStatus()) { - return ErrorResponse.create(INVALID_SUBTANGLE); - } - final List transactions = getParameterAsList(request,"transactions", HASH_SIZE); - final List tips = getParameterAsList(request,"tips", HASH_SIZE); - - return getInclusionStatesStatement(transactions, tips); - } - case "getNeighbors": { - return getNeighborsStatement(); - } - case "getNodeInfo": { - return getNodeInfoStatement(); - } - case "getNodeAPIConfiguration": { - return getNodeAPIConfigurationStatement(); - } - case "getTips": { - return getTipsStatement(); - } - case "getTransactionsToApprove": { - Optional reference = request.containsKey("reference") ? - Optional.of(HashFactory.TRANSACTION.create(getParameterAsStringAndValidate(request,"reference", HASH_SIZE))) - : Optional.empty(); - int depth = getParameterAsInt(request, "depth"); - - return getTransactionsToApproveStatement(depth, reference); - } - case "getTrytes": { - final List hashes = getParameterAsList(request,"hashes", HASH_SIZE); - return getTrytesStatement(hashes); - } - - case "interruptAttachingToTangle": { - return interruptAttachingToTangleStatement(); - } - case "removeNeighbors": { - List uris = getParameterAsList(request,"uris",0); - log.debug("Invoking 'removeNeighbors' with {}", uris); - return removeNeighborsStatement(uris); - } - - case "storeTransactions": { - try { - final List trytes = getParameterAsList(request,"trytes", TRYTES_SIZE); - storeTransactionsStatement(trytes); - return AbstractResponse.createEmptyResponse(); - } catch (RuntimeException e) { - //transaction not valid - return ErrorResponse.create("Invalid trytes input"); - } - } - case "getMissingTransactions": { - //TransactionRequester.instance().rescanTransactionsToRequest(); - synchronized (instance.transactionRequester) { - List missingTx = Arrays.stream(instance.transactionRequester.getRequestedTransactions()) - .map(Hash::toString) - .collect(Collectors.toList()); - return GetTipsResponse.create(missingTx); - } - } - case "checkConsistency": { - if (invalidSubtangleStatus()) { - return ErrorResponse.create(INVALID_SUBTANGLE); - } - final List transactions = getParameterAsList(request,"tails", HASH_SIZE); - return checkConsistencyStatement(transactions); - } - case "wereAddressesSpentFrom": { - final List addresses = getParameterAsList(request,"addresses", HASH_SIZE); - return wereAddressesSpentFromStatement(addresses); - } - default: { - AbstractResponse response = ixi.processCommand(command, request); - return response == null ? - ErrorResponse.create("Command [" + command + "] is unknown") : - response; + ApiCommand apiCommand = ApiCommand.findByName(command); + if (apiCommand != null) { + return commandRoute.get(apiCommand).apply(request); + } else { + AbstractResponse response = ixi.processCommand(command, request); + if (response == null) { + return ErrorResponse.create("Command [" + command + "] is unknown"); + } else { + return response; } } - - } catch (final ValidationException e) { - log.info("API Validation failed: " + e.getLocalizedMessage()); - return ErrorResponse.create(e.getLocalizedMessage()); - } catch (final InvalidAlgorithmParameterException e) { - log.info("API InvalidAlgorithmParameter passed: " + e.getLocalizedMessage()); - return ErrorResponse.create(e.getLocalizedMessage()); - } catch (final Exception e) { - log.error("API Exception: {}", e.getLocalizedMessage(), e); + } catch (ValidationException e) { + log.error("API Validation failed: " + e.getLocalizedMessage()); + return ExceptionResponse.create(e.getLocalizedMessage()); + } catch (IllegalStateException e) { + log.error("API Exception: " + e.getLocalizedMessage()); + return ExceptionResponse.create(e.getLocalizedMessage()); + } catch (RuntimeException e) { + log.error("Unexpected API Exception: " + e.getLocalizedMessage()); return ExceptionResponse.create(e.getLocalizedMessage()); } } @@ -498,7 +301,9 @@ private AbstractResponse process(final String requestString, InetSocketAddress s * If an address has a pending transaction, it is also marked as spend. * * @param addresses List of addresses to check if they were ever spent from. + * @return {@link com.iota.iri.service.dto.WereAddressesSpentFrom} **/ + @Document(name="wereAddressesSpentFrom") private AbstractResponse wereAddressesSpentFromStatement(List addresses) throws Exception { final List addressesHash = addresses.stream() .map(HashFactory.ADDRESS::create) @@ -508,7 +313,7 @@ private AbstractResponse wereAddressesSpentFromStatement(List addresses) int index = 0; for (Hash address : addressesHash) { - states[index++] = instance.spentAddressesService.wasAddressSpentFrom(address); + states[index++] = spentAddressesService.wasAddressSpentFrom(address); } return WereAddressesSpentFrom.create(states); } @@ -522,7 +327,7 @@ private AbstractResponse wereAddressesSpentFromStatement(List addresses) * @throws Exception When a model could not be loaded. */ private Hash findTail(Hash hash) throws Exception { - TransactionViewModel tx = TransactionViewModel.fromHash(instance.tangle, hash); + TransactionViewModel tx = TransactionViewModel.fromHash(tangle, hash); final Hash bundleHash = tx.getBundleHash(); long index = tx.getCurrentIndex(); boolean foundApprovee = false; @@ -530,9 +335,9 @@ private Hash findTail(Hash hash) throws Exception { // As long as the index is bigger than 0 and we are still traversing the same bundle // If the hash we asked about is already a tail, this loop never starts while (index-- > 0 && tx.getBundleHash().equals(bundleHash)) { - Set approvees = tx.getApprovers(instance.tangle).getHashes(); + Set approvees = tx.getApprovers(tangle).getHashes(); for (Hash approvee : approvees) { - TransactionViewModel nextTx = TransactionViewModel.fromHash(instance.tangle, approvee); + TransactionViewModel nextTx = TransactionViewModel.fromHash(tangle, approvee); if (nextTx.getBundleHash().equals(bundleHash)) { tx = nextTx; foundApprovee = true; @@ -553,12 +358,12 @@ private Hash findTail(Hash hash) throws Exception { /** * - * Checks the consistency of the transactions. - * Marks state as false on the following checks: + * Check the consistency of the transactions. + * A consistent transaction is one where the following statements are true: *
    - *
  • Missing a reference transaction
  • - *
  • Invalid bundle
  • - *
  • Tails of tails are invalid
  • + *
  • Valid bundle
  • + *
  • The transaction is not missing a reference transaction
  • + *
  • Tails of tails are valid
  • *
* * If a transaction does not exist, or it is not a tail, an {@link ErrorResponse} is returned. @@ -566,6 +371,7 @@ private Hash findTail(Hash hash) throws Exception { * @param transactionsList Transactions you want to check the consistency for * @return {@link CheckConsistency} **/ + @Document(name="checkConsistency") private AbstractResponse checkConsistencyStatement(List transactionsList) throws Exception { final List transactions = transactionsList.stream().map(HashFactory.TRANSACTION::create).collect(Collectors.toList()); boolean state = true; @@ -573,7 +379,7 @@ private AbstractResponse checkConsistencyStatement(List transactionsList // Check if the transactions themselves are valid for (Hash transaction : transactions) { - TransactionViewModel txVM = TransactionViewModel.fromHash(instance.tangle, transaction); + TransactionViewModel txVM = TransactionViewModel.fromHash(tangle, transaction); if (txVM.getType() == TransactionViewModel.PREFILLED_SLOT) { return ErrorResponse.create("Invalid transaction, missing: " + transaction); } @@ -586,7 +392,7 @@ private AbstractResponse checkConsistencyStatement(List transactionsList state = false; info = "tails are not solid (missing a referenced tx): " + transaction; break; - } else if (instance.bundleValidator.validate(instance.tangle, instance.snapshotProvider.getInitialSnapshot(), txVM.getHash()).size() == 0) { + } else if (bundleValidator.validate(tangle, snapshotProvider.getInitialSnapshot(), txVM.getHash()).size() == 0) { state = false; info = "tails are not consistent (bundle is invalid): " + transaction; break; @@ -595,10 +401,9 @@ private AbstractResponse checkConsistencyStatement(List transactionsList // Transactions are valid, lets check ledger consistency if (state) { - instance.snapshotProvider.getLatestSnapshot().lockRead(); + snapshotProvider.getLatestSnapshot().lockRead(); try { - WalkValidatorImpl walkValidator = new WalkValidatorImpl(instance.tangle, instance.snapshotProvider, instance.ledgerService, - instance.configuration); + WalkValidatorImpl walkValidator = new WalkValidatorImpl(tangle, snapshotProvider, ledgerService, configuration); for (Hash transaction : transactions) { if (!walkValidator.isValid(transaction)) { state = false; @@ -607,7 +412,7 @@ private AbstractResponse checkConsistencyStatement(List transactionsList } } } finally { - instance.snapshotProvider.getLatestSnapshot().unlockRead(); + snapshotProvider.getLatestSnapshot().unlockRead(); } } @@ -621,18 +426,19 @@ private AbstractResponse checkConsistencyStatement(List transactionsList * @return false if we received at least a solid milestone, otherwise true */ public boolean invalidSubtangleStatus() { - return (instance.snapshotProvider.getLatestSnapshot().getIndex() == instance.snapshotProvider.getInitialSnapshot().getIndex()); + return (snapshotProvider.getLatestSnapshot().getIndex() == snapshotProvider.getInitialSnapshot().getIndex()); } /** - * Returns the set of neighbors you are connected with, as well as their activity statistics (or counters). - * The activity counters are reset after restarting IRI. + * Returns an IRI node's neighbors, as well as their activity. + * Note: The activity counters are reset after restarting IRI. * * @return {@link com.iota.iri.service.dto.GetNeighborsResponse} **/ - private AbstractResponse getNeighborsStatement() { - return GetNeighborsResponse.create(instance.node.getNeighbors()); - } + @Document(name="getNeighbors") + private AbstractResponse getNeighborsStatement() { + return GetNeighborsResponse.create(node.getNeighbors()); + } /** * Temporarily add a list of neighbors to your node. @@ -646,23 +452,24 @@ private AbstractResponse getNeighborsStatement() { * @param uris list of neighbors to add * @return {@link com.iota.iri.service.dto.AddedNeighborsResponse} **/ - private AbstractResponse addNeighborsStatement(List uris) { - int numberOfAddedNeighbors = 0; - try { - for (final String uriString : uris) { + @Document(name="addNeighbors") + private AbstractResponse addNeighborsStatement(List uris) { + int numberOfAddedNeighbors = 0; + try { + for (final String uriString : uris) { log.info("Adding neighbor: " + uriString); - final Neighbor neighbor = instance.node.newNeighbor(new URI(uriString), true); - if (!instance.node.getNeighbors().contains(neighbor)) { - instance.node.getNeighbors().add(neighbor); + final Neighbor neighbor = node.newNeighbor(new URI(uriString), true); + if (!node.getNeighbors().contains(neighbor)) { + node.getNeighbors().add(neighbor); numberOfAddedNeighbors++; } } - } catch (URISyntaxException|RuntimeException e) { + } catch (URISyntaxException|RuntimeException e) { return ErrorResponse.create("Invalid uri scheme: " + e.getLocalizedMessage()); - } - return AddedNeighborsResponse.create(numberOfAddedNeighbors); - } - + } + return AddedNeighborsResponse.create(numberOfAddedNeighbors); + } + /** * Temporarily removes a list of neighbors from your node. * The added neighbors will be added again after relaunching IRI. @@ -676,12 +483,13 @@ private AbstractResponse addNeighborsStatement(List uris) { * @param uris The URIs of the neighbors we want to remove. * @return {@link com.iota.iri.service.dto.RemoveNeighborsResponse} **/ + @Document(name="removeNeighbors") private AbstractResponse removeNeighborsStatement(List uris) { int numberOfRemovedNeighbors = 0; try { for (final String uriString : uris) { log.info("Removing neighbor: " + uriString); - if (instance.node.removeNeighbor(new URI(uriString),true)) { + if (node.removeNeighbor(new URI(uriString),true)) { numberOfRemovedNeighbors++; } } @@ -692,23 +500,24 @@ private AbstractResponse removeNeighborsStatement(List uris) { } /** - * Returns the raw transaction data (trytes) of a specific transaction. - * These trytes can then be easily converted into the actual transaction object. + * raw transaction data (trytes) of a specific transaction. + * These trytes can then be converted into the actual transaction object. * See utility and {@link Transaction} functions in an IOTA library for more details. * * @param hashes The transaction hashes you want to get trytes from. * @return {@link com.iota.iri.service.dto.GetTrytesResponse} **/ + @Document(name="getTrytes") private synchronized AbstractResponse getTrytesStatement(List hashes) throws Exception { final List elements = new LinkedList<>(); for (final String hash : hashes) { - final TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(instance.tangle, HashFactory.TRANSACTION.create(hash)); + final TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(tangle, HashFactory.TRANSACTION.create(hash)); if (transactionViewModel != null) { elements.add(Converter.trytes(transactionViewModel.trits())); } } if (elements.size() > maxGetTrytes){ - return ErrorResponse.create(overMaxErrorMessage); + return ErrorResponse.create(OVER_MAX_ERROR_MESSAGE); } return GetTrytesResponse.create(elements); } @@ -719,7 +528,7 @@ private synchronized AbstractResponse getTrytesStatement(List hashes) th * * @return The current amount of times this node has returned transactions to approve */ - private static int getCounterGetTxToApprove() { + public static int getCounterGetTxToApprove() { return counterGetTxToApprove; } @@ -759,10 +568,10 @@ private static void incEllapsedTimeGetTxToApprove(long ellapsedTime) { * @param depth Number of bundles to go back to determine the transactions for approval. * @param reference Hash of transaction to start random-walk from, used to make sure the tips returned reference a given transaction in their past. * @return {@link com.iota.iri.service.dto.GetTransactionsToApproveResponse} - * @throws Exception When tip selection has failed. Currently caught and returned as an {@link ErrorResponse}. **/ - private synchronized AbstractResponse getTransactionsToApproveStatement(int depth, Optional reference) throws Exception { - if (depth < 0 || depth > instance.configuration.getMaxDepth()) { + @Document(name="getTransactionsToApprove") + private synchronized AbstractResponse getTransactionsToApproveStatement(int depth, Optional reference) { + if (depth < 0 || depth > configuration.getMaxDepth()) { return ErrorResponse.create("Invalid depth input"); } @@ -791,7 +600,17 @@ List getTransactionToApproveTips(int depth, Optional reference) thro throw new IllegalStateException(INVALID_SUBTANGLE); } - List tips = instance.tipsSelector.getTransactionsToApprove(depth, reference); + Future> tipSelection = null; + List tips; + try { + tipSelection = tipSelExecService.submit(() -> tipsSelector.getTransactionsToApprove(depth, reference)); + tips = tipSelection.get(configuration.getTipSelectionTimeoutSec(), TimeUnit.SECONDS); + } catch (TimeoutException ex) { + // interrupt the tip-selection thread so that it aborts + tipSelection.cancel(true); + throw new TipSelectionCancelledException(String.format("tip-selection exceeded timeout of %d seconds", + configuration.getTipSelectionTimeoutSec())); + } if (log.isDebugEnabled()) { gatherStatisticsOnTipSelection(); @@ -801,11 +620,11 @@ List getTransactionToApproveTips(int depth, Optional reference) thro /** *

- * Handles statistics on tip selection. - * Increases the tip selection by one use. - *

+ * Handles statistics on tip selection. + * Increases the tip selection by one use. + *

*

- * If the {@link #getCounterGetTxToApprove()} is a power of 100, a log is send and counters are reset. + * If the {@link #getCounterGetTxToApprove()} is a power of 100, a log is send and counters are reset. *

*/ private void gatherStatisticsOnTipSelection() { @@ -826,8 +645,9 @@ private void gatherStatisticsOnTipSelection() { * * @return {@link com.iota.iri.service.dto.GetTipsResponse} **/ + @Document(name="getTips") private synchronized AbstractResponse getTipsStatement() throws Exception { - return GetTipsResponse.create(instance.tipsViewModel.getTips() + return GetTipsResponse.create(tipsViewModel.getTips() .stream() .map(Hash::toString) .collect(Collectors.toList())); @@ -839,28 +659,32 @@ private synchronized AbstractResponse getTipsStatement() throws Exception { * These trytes are returned by attachToTangle, or by doing proof of work somewhere else. * * @param trytes Transaction data to be stored. + * @return {@link com.iota.iri.service.dto.AbstractResponse.Emptyness} * @throws Exception When storing or updating a transaction fails. **/ - public void storeTransactionsStatement(List trytes) throws Exception { + @Document(name="storeTransactions") + public AbstractResponse storeTransactionsStatement(List trytes) throws Exception { final List elements = new LinkedList<>(); byte[] txTrits = Converter.allocateTritsForTrytes(TRYTES_SIZE); for (final String trytesPart : trytes) { //validate all trytes Converter.trits(trytesPart, txTrits, 0); - final TransactionViewModel transactionViewModel = instance.transactionValidator.validateTrits(txTrits, - instance.transactionValidator.getMinWeightMagnitude()); + final TransactionViewModel transactionViewModel = transactionValidator.validateTrits(txTrits, + transactionValidator.getMinWeightMagnitude()); elements.add(transactionViewModel); } for (final TransactionViewModel transactionViewModel : elements) { //store transactions - if(transactionViewModel.store(instance.tangle, instance.snapshotProvider.getInitialSnapshot())) { + if(transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot())) { transactionViewModel.setArrivalTime(System.currentTimeMillis() / 1000L); - instance.transactionValidator.updateStatus(transactionViewModel); + transactionValidator.updateStatus(transactionViewModel); transactionViewModel.updateSender("local"); - transactionViewModel.update(instance.tangle, instance.snapshotProvider.getInitialSnapshot(), "sender"); + transactionViewModel.update(tangle, snapshotProvider.getInitialSnapshot(), "sender"); } } + + return AbstractResponse.createEmptyResponse(); } /** @@ -868,6 +692,7 @@ public void storeTransactionsStatement(List trytes) throws Exception { * * @return {@link com.iota.iri.service.dto.AbstractResponse.Emptyness} **/ + @Document(name="interruptAttachingToTangle") private AbstractResponse interruptAttachingToTangleStatement(){ pearlDiver.cancel(); return AbstractResponse.createEmptyResponse(); @@ -879,35 +704,36 @@ private AbstractResponse interruptAttachingToTangleStatement(){ * @return {@link com.iota.iri.service.dto.GetNodeInfoResponse} * @throws Exception When we cant find the first milestone in the database **/ + @Document(name="getNodeInfo") private AbstractResponse getNodeInfoStatement() throws Exception{ - String name = instance.configuration.isTestnet() ? IRI.TESTNET_NAME : IRI.MAINNET_NAME; - MilestoneViewModel milestone = MilestoneViewModel.first(instance.tangle); + String name = configuration.isTestnet() ? IRI.TESTNET_NAME : IRI.MAINNET_NAME; + MilestoneViewModel milestone = MilestoneViewModel.first(tangle); return GetNodeInfoResponse.create( - name, - IRI.VERSION, + name, + IotaUtils.getIriVersion(), Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().freeMemory(), System.getProperty("java.version"), Runtime.getRuntime().maxMemory(), Runtime.getRuntime().totalMemory(), - instance.latestMilestoneTracker.getLatestMilestoneHash(), - instance.latestMilestoneTracker.getLatestMilestoneIndex(), + latestMilestoneTracker.getLatestMilestoneHash(), + latestMilestoneTracker.getLatestMilestoneIndex(), - instance.snapshotProvider.getLatestSnapshot().getHash(), - instance.snapshotProvider.getLatestSnapshot().getIndex(), + snapshotProvider.getLatestSnapshot().getHash(), + snapshotProvider.getLatestSnapshot().getIndex(), milestone != null ? milestone.index() : -1, - instance.snapshotProvider.getLatestSnapshot().getInitialIndex(), + snapshotProvider.getLatestSnapshot().getInitialIndex(), - instance.node.howManyNeighbors(), - instance.node.queuedTransactionsSize(), + node.howManyNeighbors(), + node.queuedTransactionsSize(), System.currentTimeMillis(), - instance.tipsViewModel.size(), - instance.transactionRequester.numberOfTransactionsToRequest(), + tipsViewModel.size(), + transactionRequester.numberOfTransactionsToRequest(), features, - instance.configuration.getCoordinator().toString()); + configuration.getCoordinator().toString()); } /** @@ -916,26 +742,27 @@ private AbstractResponse getNodeInfoStatement() throws Exception{ * @return {@link GetNodeAPIConfigurationResponse} */ private AbstractResponse getNodeAPIConfigurationStatement() { - return GetNodeAPIConfigurationResponse.create(instance.configuration); + return GetNodeAPIConfigurationResponse.create(configuration); } /** *

- * Get the inclusion states of a set of transactions. - * This is for determining if a transaction was accepted and confirmed by the network or not. - * You can search for multiple tips (and thus, milestones) to get past inclusion states of transactions. + * Get the inclusion states of a set of transactions. + * This endpoint determines if a transaction is confirmed by the network (referenced by a valid milestone). + * You can search for multiple tips (and thus, milestones) to get past inclusion states of transactions. *

*

- * This API call returns a list of boolean values in the same order as the submitted transactions.
- * Boolean values will be true for confirmed transactions, otherwise false. + * This API call returns a list of boolean values in the same order as the submitted transactions. + * Boolean values will be true for confirmed transactions, otherwise false. *

* Returns an {@link com.iota.iri.service.dto.ErrorResponse} if a tip is missing or the subtangle is not solid * * @param transactions List of transactions you want to get the inclusion state for. - * @param tips List of tips (including milestones) you want to search for the inclusion state. + * @param tips List of tip transaction hashes (including milestones) you want to search for * @return {@link com.iota.iri.service.dto.GetInclusionStatesResponse} * @throws Exception When a transaction cannot be loaded from hash **/ + @Document(name="getInclusionStates") private AbstractResponse getInclusionStatesStatement( final List transactions, final List tips) throws Exception { @@ -954,7 +781,7 @@ private AbstractResponse getInclusionStatesStatement( List tipsIndex = new LinkedList<>(); { for(Hash tip: tps) { - TransactionViewModel tx = TransactionViewModel.fromHash(instance.tangle, tip); + TransactionViewModel tx = TransactionViewModel.fromHash(tangle, tip); if (tx.getType() != TransactionViewModel.PREFILLED_SLOT) { tipsIndex.add(tx.snapshotIndex()); } @@ -978,7 +805,7 @@ private AbstractResponse getInclusionStatesStatement( // Sets to 1 if the transaction index is below the max index of tips (included). for(Hash hash: trans) { - TransactionViewModel transaction = TransactionViewModel.fromHash(instance.tangle, hash); + TransactionViewModel transaction = TransactionViewModel.fromHash(tangle, hash); if(transaction.getType() == TransactionViewModel.PREFILLED_SLOT || transaction.snapshotIndex() == 0) { inclusionStates[count] = -1; } else if(transaction.snapshotIndex() > maxTipsIndex) { @@ -996,7 +823,7 @@ private AbstractResponse getInclusionStatesStatement( // Sorts all tips per snapshot index. Stops if a tip is not in our database, or just as a hash. for (final Hash tip : tps) { - TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(instance.tangle, tip); + TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(tangle, tip); if (transactionViewModel.getType() == TransactionViewModel.PREFILLED_SLOT){ return ErrorResponse.create("One of the tips is absent"); } @@ -1008,7 +835,7 @@ private AbstractResponse getInclusionStatesStatement( // Loop over all transactions without a state, and counts the amount per snapshot index for(int i = 0; i < inclusionStates.length; i++) { if(inclusionStates[i] == 0) { - TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(instance.tangle, trans.get(i)); + TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(tangle, trans.get(i)); int snapshotIndex = transactionViewModel.snapshotIndex(); sameIndexTransactionCount.putIfAbsent(snapshotIndex, 0); sameIndexTransactionCount.put(snapshotIndex, sameIndexTransactionCount.get(snapshotIndex) + 1); @@ -1072,7 +899,7 @@ private boolean exhaustiveSearchWithinIndex( // Check if the transactions have indeed this index. Otherwise ignore. // Starts off with the tips in nonAnalyzedTransactions, but transaction trunk & branch gets added. - final TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(instance.tangle, pointer); + final TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(tangle, pointer); if (transactionViewModel.snapshotIndex() == index) { // Do we have the complete transaction? if (transactionViewModel.getType() == TransactionViewModel.PREFILLED_SLOT) { @@ -1105,13 +932,12 @@ private boolean exhaustiveSearchWithinIndex( /** *

- * Find the transactions which match the specified input and return. - * All input values are lists, for which a list of return values (transaction hashes), in the same order, is returned for all individual elements. - * The input fields can either be bundles, addresses, tags or approvees. + * Find transactions that contain the given values in their transaction fields. + * All input values are lists, for which a list of return values (transaction hashes), in the same order, is returned for all individual elements. + * The input fields can either be bundles, addresses, tags or approvees. *

* - * Using multiple of these input fields returns the intersection of the values. - * Returns an {@link com.iota.iri.service.dto.ErrorResponse} if more than maxFindTxs was found. + * Using multiple transaction fields returns transactions hashes at the intersection of those values. * * @param request The map with input fields * Must contain at least one of 'bundles', 'addresses', 'tags' or 'approvees'. @@ -1119,6 +945,7 @@ private boolean exhaustiveSearchWithinIndex( * @throws Exception If a model cannot be loaded, no valid input fields were supplied * or the total transactions to find exceeds {@link APIConfig#getMaxFindTransactions()}. **/ + @Document(name="findTransactions") private synchronized AbstractResponse findTransactionsStatement(final Map request) throws Exception { final Set foundTransactions = new HashSet<>(); @@ -1129,7 +956,7 @@ private synchronized AbstractResponse findTransactionsStatement(final Map bundles = getParameterAsSet(request,"bundles",HASH_SIZE); for (final String bundle : bundles) { bundlesTransactions.addAll( - BundleViewModel.load(instance.tangle, HashFactory.BUNDLE.create(bundle)) + BundleViewModel.load(tangle, HashFactory.BUNDLE.create(bundle)) .getHashes()); } foundTransactions.addAll(bundlesTransactions); @@ -1141,7 +968,7 @@ private synchronized AbstractResponse findTransactionsStatement(final Map addresses = getParameterAsSet(request,"addresses",HASH_SIZE); for (final String address : addresses) { addressesTransactions.addAll( - AddressViewModel.load(instance.tangle, HashFactory.ADDRESS.create(address)) + AddressViewModel.load(tangle, HashFactory.ADDRESS.create(address)) .getHashes()); } foundTransactions.addAll(addressesTransactions); @@ -1154,14 +981,14 @@ private synchronized AbstractResponse findTransactionsStatement(final Map approvees = getParameterAsSet(request,"approvees",HASH_SIZE); for (final String approvee : approvees) { approveeTransactions.addAll( - TransactionViewModel.fromHash(instance.tangle, HashFactory.TRANSACTION.create(approvee)) - .getApprovers(instance.tangle) + TransactionViewModel.fromHash(tangle, HashFactory.TRANSACTION.create(approvee)) + .getApprovers(tangle) .getHashes()); } foundTransactions.addAll(approveeTransactions); @@ -1184,7 +1011,7 @@ private synchronized AbstractResponse findTransactionsStatement(final Map maxFindTxs){ - return ErrorResponse.create(overMaxErrorMessage); + return ErrorResponse.create(OVER_MAX_ERROR_MESSAGE); } final List elements = foundTransactions.stream() @@ -1215,7 +1042,7 @@ private synchronized AbstractResponse findTransactionsStatement(final Maptag is a {@link Hash#NULL_HASH}. */ private String padTag(String tag) throws ValidationException { @@ -1257,48 +1084,50 @@ private Set getParameterAsSet( * Broadcast a list of transactions to all neighbors. * The trytes to be used for this call should be valid, attached transaction trytes. * These trytes are returned by attachToTangle, or by doing proof of work somewhere else. - * + * * @param trytes the list of transaction trytes to broadcast + * @return {@link com.iota.iri.service.dto.AbstractResponse.Emptyness} **/ - public void broadcastTransactionsStatement(List trytes) { + @Document(name="broadcastTransactions") + public AbstractResponse broadcastTransactionsStatement(List trytes) { final List elements = new LinkedList<>(); byte[] txTrits = Converter.allocateTritsForTrytes(TRYTES_SIZE); for (final String tryte : trytes) { //validate all trytes Converter.trits(tryte, txTrits, 0); - final TransactionViewModel transactionViewModel = instance.transactionValidator.validateTrits( - txTrits, instance.transactionValidator.getMinWeightMagnitude()); + final TransactionViewModel transactionViewModel = transactionValidator.validateTrits( + txTrits, transactionValidator.getMinWeightMagnitude()); elements.add(transactionViewModel); } for (final TransactionViewModel transactionViewModel : elements) { //push first in line to broadcast transactionViewModel.weightMagnitude = Curl.HASH_LENGTH; - instance.node.broadcast(transactionViewModel); + node.broadcast(transactionViewModel); } + return AbstractResponse.createEmptyResponse(); } /** *

- * Calculates the confirmed balance, as viewed by the specified tips. - * If you do not specify the referencing tips, - * the returned balance is based on the latest confirmed milestone. - * In addition to the balances, it also returns the referencing tips (or milestone), - * as well as the index with which the confirmed balance was determined. - * The balances are returned as a list in the same order as the addresses were provided as input. + * Calculates the confirmed balance, as viewed by the specified tips. + * If the tips parameter is missing, the returned balance is correct as of the latest confirmed milestone. + * In addition to the balances, it also returns the referencing tips (or milestone), + * as well as the index with which the confirmed balance was determined. + * The balances are returned as a list in the same order as the addresses were provided as input. *

- * Returns an {@link ErrorResponse} if tips are not found, inconsistent or the threshold is invalid. * - * @param addresses The addresses where we will find the balance for. + * @param addresses Address for which to get the balance (do not include the checksum) * @param tips The optional tips to find the balance through. * @param threshold The confirmation threshold between 0 and 100(inclusive). * Should be set to 100 for getting balance by counting only confirmed transactions. * @return {@link com.iota.iri.service.dto.GetBalancesResponse} * @throws Exception When the database has encountered an error **/ - private AbstractResponse getBalancesStatement(List addresses, - List tips, + @Document(name="getBalances") + private AbstractResponse getBalancesStatement(List addresses, + List tips, int threshold) throws Exception { if (threshold <= 0 || threshold > 100) { @@ -1311,11 +1140,11 @@ private AbstractResponse getBalancesStatement(List addresses, List hashes; final Map balances = new HashMap<>(); - instance.snapshotProvider.getLatestSnapshot().lockRead(); - final int index = instance.snapshotProvider.getLatestSnapshot().getIndex(); + snapshotProvider.getLatestSnapshot().lockRead(); + final int index = snapshotProvider.getLatestSnapshot().getIndex(); if (tips == null || tips.isEmpty()) { - hashes = Collections.singletonList(instance.snapshotProvider.getLatestSnapshot().getHash()); + hashes = Collections.singletonList(snapshotProvider.getLatestSnapshot().getHash()); } else { hashes = tips.stream() .map(tip -> (HashFactory.TRANSACTION.create(tip))) @@ -1325,7 +1154,7 @@ private AbstractResponse getBalancesStatement(List addresses, try { // Get the balance for each address at the last snapshot for (final Hash address : addressList) { - Long value = instance.snapshotProvider.getLatestSnapshot().getBalance(address); + Long value = snapshotProvider.getLatestSnapshot().getBalance(address); if (value == null) { value = 0L; } @@ -1338,10 +1167,10 @@ private AbstractResponse getBalancesStatement(List addresses, // Calculate the difference created by the non-verified transactions which tips approve. // This difference is put in a map with address -> value changed for (Hash tip : hashes) { - if (!TransactionViewModel.exists(instance.tangle, tip)) { + if (!TransactionViewModel.exists(tangle, tip)) { return ErrorResponse.create("Tip not found: " + tip.toString()); } - if (!instance.ledgerService.isBalanceDiffConsistent(visitedHashes, diff, tip)) { + if (!ledgerService.isBalanceDiffConsistent(visitedHashes, diff, tip)) { return ErrorResponse.create("Tips are not consistent"); } } @@ -1349,7 +1178,7 @@ private AbstractResponse getBalancesStatement(List addresses, // Update the found balance according to 'diffs' balance changes diff.forEach((key, value) -> balances.computeIfPresent(key, (hash, aLong) -> value + aLong)); } finally { - instance.snapshotProvider.getLatestSnapshot().unlockRead(); + snapshotProvider.getLatestSnapshot().unlockRead(); } final List elements = addressList.stream() @@ -1400,22 +1229,22 @@ public static void incEllapsedTimePoW(long ellapsedTime) { /** *

- * Prepares the specified transactions (trytes) for attachment to the Tangle by doing Proof of Work. - * You need to supply branchTransaction as well as trunkTransaction. - * These are the tips which you're going to validate and reference with this transaction. - * These are obtainable by the getTransactionsToApprove API call. + * Prepares the specified transactions (trytes) for attachment to the Tangle by doing Proof of Work. + * You need to supply branchTransaction as well as trunkTransaction. + * These are the tips which you're going to validate and reference with this transaction. + * These are obtainable by the getTransactionsToApprove API call. *

*

- * The returned value is a different set of tryte values which you can input into - * broadcastTransactions and storeTransactions. - * The last 243 trytes of the return value consist of the following: - *

    - *
  • trunkTransaction
  • - *
  • branchTransaction
  • - *
  • nonce
  • - *
- * These are valid trytes which are then accepted by the network. + * The returned value is a different set of tryte values which you can input into + * broadcastTransactions and storeTransactions. + * The last 243 trytes of the return value consist of the following: + *
    + *
  • trunkTransaction
  • + *
  • branchTransaction
  • + *
  • nonce
  • + *
*

+ * These are valid trytes which are then accepted by the network. * @param trunkTransaction A reference to an external transaction (tip) used as trunk. * The transaction with index 0 will have this tip in its trunk. * All other transactions reference the previous transaction in the bundle (Their index-1). @@ -1427,9 +1256,10 @@ public static void incEllapsedTimePoW(long ellapsedTime) { * Each 0-trit on the end of the transaction represents 1 magnitude. * A 9-tryte represents 3 magnitudes, since a 9 is represented by 3 0-trits. * Transactions with a different minWeightMagnitude are compatible. - * @param trytes the list of trytes to prepare for network attachment, by doing proof of work. + * @param trytes The list of trytes to prepare for network attachment, by doing proof of work. * @return The list of transactions in trytes, ready to be broadcast to the network. **/ + @Document(name="attachToTangle", returnParam="trytes") public synchronized List attachToTangleStatement(Hash trunkTransaction, Hash branchTransaction, int minWeightMagnitude, List trytes) { @@ -1474,13 +1304,13 @@ public synchronized List attachToTangleStatement(Hash trunkTransaction, TransactionViewModel.ATTACHMENT_TIMESTAMP_UPPER_BOUND_TRINARY_OFFSET, TransactionViewModel.ATTACHMENT_TIMESTAMP_UPPER_BOUND_TRINARY_SIZE); - if (!pearlDiver.search(transactionTrits, minWeightMagnitude, instance.configuration.getPowThreads())) { + if (!pearlDiver.search(transactionTrits, minWeightMagnitude, configuration.getPowThreads())) { transactionViewModels.clear(); break; } //validate PoW - throws exception if invalid - final TransactionViewModel transactionViewModel = instance.transactionValidator.validateTrits( - transactionTrits, instance.transactionValidator.getMinWeightMagnitude()); + final TransactionViewModel transactionViewModel = transactionValidator.validateTrits( + transactionTrits, transactionValidator.getMinWeightMagnitude()); transactionViewModels.add(transactionViewModel); prevTransaction = transactionViewModel.getHash(); @@ -1564,7 +1394,7 @@ private void validateTrytes(String paramName, int size, String result) throws Va */ private void validateParamExists(Map request, String paramName) throws ValidationException { if (!request.containsKey(paramName)) { - throw new ValidationException(invalidParams); + throw new ValidationException(INVALID_PARAMS); } } @@ -1585,7 +1415,7 @@ private List getParameterAsList(Map request, String para validateParamExists(request, paramName); final List paramList = (List) request.get(paramName); if (paramList.size() > maxRequestList) { - throw new ValidationException(overMaxErrorMessage); + throw new ValidationException(OVER_MAX_ERROR_MESSAGE); } if (size > 0) { @@ -1619,53 +1449,14 @@ private boolean validTrytes(String trytes, int length, char zeroAllowed) { return matcher.matches(); } - /** - * Updates the {@link HttpServerExchange} {@link HeaderMap} with the proper response settings. - * @param exchange Contains information about what the client has send to us - */ - private static void setupResponseHeaders(HttpServerExchange exchange) { - final HeaderMap headerMap = exchange.getResponseHeaders(); - headerMap.add(new HttpString("Access-Control-Allow-Origin"),"*"); - headerMap.add(new HttpString("Keep-Alive"), "timeout=500, max=100"); - } - - /** - * Sets up the {@link HttpHandler} to have correct security settings. - * Remote authentication is blocked for anyone except - * those defined in {@link APIConfig#getRemoteAuth()} or localhost. - * This is done with {@link BasicAuthenticationMechanism} in a {@link AuthenticationMode#PRO_ACTIVE} mode. - * - * @param toWrap the path handler used in creating the server. - * @return The updated handler - */ - private HttpHandler addSecurity(HttpHandler toWrap) { - String credentials = instance.configuration.getRemoteAuth(); - if (credentials == null || credentials.isEmpty()) { - return toWrap; - } - - final Map users = new HashMap<>(2); - users.put(credentials.split(":")[0], credentials.split(":")[1].toCharArray()); - - IdentityManager identityManager = new MapIdentityManager(users); - HttpHandler handler = toWrap; - handler = new AuthenticationCallHandler(handler); - handler = new AuthenticationConstraintHandler(handler); - final List mechanisms = - Collections.singletonList(new BasicAuthenticationMechanism("Iota Realm")); - - handler = new AuthenticationMechanismsHandler(handler, mechanisms); - handler = new SecurityInitialHandler(AuthenticationMode.PRO_ACTIVE, identityManager, handler); - return handler; - } - /** * If a server is running, stops the server from accepting new incoming requests. * Does not remove the instance, so the server may be restarted without having to recreate it. */ public void shutDown() { - if (server != null) { - server.stop(); + tipSelExecService.shutdownNow(); + if (connector != null) { + connector.stop(); } } @@ -1747,4 +1538,188 @@ private synchronized AbstractResponse storeMessageStatement(String address, Stri broadcastTransactionsStatement(powResult); return AbstractResponse.createEmptyResponse(); } + + // + // FUNCTIONAL COMMAND ROUTES + // + private Function, AbstractResponse> addNeighbors() { + return request -> { + List uris = getParameterAsList(request,"uris",0); + log.debug("Invoking 'addNeighbors' with {}", uris); + return addNeighborsStatement(uris); + }; + } + + private Function, AbstractResponse> attachToTangle() { + return request -> { + final Hash trunkTransaction = HashFactory.TRANSACTION.create(getParameterAsStringAndValidate(request,"trunkTransaction", HASH_SIZE)); + final Hash branchTransaction = HashFactory.TRANSACTION.create(getParameterAsStringAndValidate(request,"branchTransaction", HASH_SIZE)); + final int minWeightMagnitude = getParameterAsInt(request,"minWeightMagnitude"); + + final List trytes = getParameterAsList(request,"trytes", TRYTES_SIZE); + + List elements = attachToTangleStatement(trunkTransaction, branchTransaction, minWeightMagnitude, trytes); + return AttachToTangleResponse.create(elements); + }; + } + + private Function, AbstractResponse> broadcastTransactions() { + return request -> { + final List trytes = getParameterAsList(request,"trytes", TRYTES_SIZE); + broadcastTransactionsStatement(trytes); + return AbstractResponse.createEmptyResponse(); + }; + } + + private Function, AbstractResponse> findTransactions() { + return request -> { + try { + return findTransactionsStatement(request); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } + + private Function, AbstractResponse> getBalances() { + return request -> { + final List addresses = getParameterAsList(request,"addresses", HASH_SIZE); + final List tips = request.containsKey("tips") ? + getParameterAsList(request,"tips", HASH_SIZE): + null; + final int threshold = getParameterAsInt(request, "threshold"); + + try { + return getBalancesStatement(addresses, tips, threshold); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } + + private Function, AbstractResponse> getInclusionStates() { + return request -> { + if (invalidSubtangleStatus()) { + return ErrorResponse.create(INVALID_SUBTANGLE); + } + final List transactions = getParameterAsList(request, "transactions", HASH_SIZE); + final List tips = getParameterAsList(request, "tips", HASH_SIZE); + + try { + return getInclusionStatesStatement(transactions, tips); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } + + private Function, AbstractResponse> getNeighbors() { + return request -> getNeighborsStatement(); + } + + private Function, AbstractResponse> getNodeInfo() { + return request -> { + try { + return getNodeInfoStatement(); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } + + private Function, AbstractResponse> getNodeAPIConfiguration() { + return request -> getNodeAPIConfigurationStatement(); + } + + private Function, AbstractResponse> getTips() { + return request -> { + try { + return getTipsStatement(); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } + + private Function, AbstractResponse> getTransactionsToApprove() { + return request -> { + Optional reference = request.containsKey("reference") ? + Optional.of(HashFactory.TRANSACTION.create(getParameterAsStringAndValidate(request,"reference", HASH_SIZE))) + : Optional.empty(); + int depth = getParameterAsInt(request, "depth"); + + return getTransactionsToApproveStatement(depth, reference); + }; + } + + private Function, AbstractResponse> getTrytes() { + return request -> { + final List hashes = getParameterAsList(request,"hashes", HASH_SIZE); + try { + return getTrytesStatement(hashes); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } + + private Function, AbstractResponse> interruptAttachingToTangle() { + return request -> interruptAttachingToTangleStatement(); + } + + private Function, AbstractResponse> removeNeighbors() { + return request -> { + List uris = getParameterAsList(request,"uris",0); + log.debug("Invoking 'removeNeighbors' with {}", uris); + return removeNeighborsStatement(uris); + }; + } + + private Function, AbstractResponse> storeTransactions() { + return request -> { + try { + final List trytes = getParameterAsList(request,"trytes", TRYTES_SIZE); + storeTransactionsStatement(trytes); + } catch (Exception e) { + //transaction not valid + return ErrorResponse.create("Invalid trytes input"); + } + return AbstractResponse.createEmptyResponse(); + }; + } + + private Function, AbstractResponse> getMissingTransactions() { + return request -> { + synchronized (transactionRequester) { + List missingTx = Arrays.stream(transactionRequester.getRequestedTransactions()) + .map(Hash::toString) + .collect(Collectors.toList()); + return GetTipsResponse.create(missingTx); + } + }; + } + + private Function, AbstractResponse> checkConsistency() { + return request -> { + if (invalidSubtangleStatus()) { + return ErrorResponse.create(INVALID_SUBTANGLE); + } + final List transactions = getParameterAsList(request,"tails", HASH_SIZE); + try { + return checkConsistencyStatement(transactions); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } + private Function, AbstractResponse> wereAddressesSpentFrom() { + return request -> { + final List addresses = getParameterAsList(request,"addresses", HASH_SIZE); + try { + return wereAddressesSpentFromStatement(addresses); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }; + } } diff --git a/src/main/java/com/iota/iri/service/ApiCommand.java b/src/main/java/com/iota/iri/service/ApiCommand.java new file mode 100644 index 0000000000..280d42be8d --- /dev/null +++ b/src/main/java/com/iota/iri/service/ApiCommand.java @@ -0,0 +1,125 @@ +package com.iota.iri.service; + +/** + * + * ApiCommand is a list of all public API endpoints officially supported by IRI + * + */ +public enum ApiCommand { + + /** + * Add a temporary neighbor to this node + */ + ADD_NEIGHBORS("addNeighbors"), + + /** + * Prepare transactions for tangle attachment by doing proof of work + */ + ATTACH_TO_TANGLE("attachToTangle"), + + /** + * Broadcast transactions to the tangle + */ + BROADCAST_TRANSACTIONs("broadcastTransactions"), + + /** + * Check the consistency of a transaction + */ + CHECK_CONSISTENCY("checkConsistency"), + + /** + * Find transactions by bundle, address, tag and approve + */ + FIND_TRANSACTIONS("findTransactions"), + + /** + * Get the balance of an address + */ + GET_BALANCES("getBalances"), + + /** + * Get the acceptance of a transaction on the tangle + */ + GET_INCLUSION_STATES("getInclusionStates"), + + /** + * Get the neighbors on this node, including temporary added + */ + GET_NEIGHBORS("getNeighbors"), + + /** + * Get information about this node + */ + GET_NODE_INFO("getNodeInfo"), + + /** + * Get information about the API configuration + */ + GET_NODE_API_CONFIG("getNodeAPIConfiguration"), + + /** + * Get all tips currently on this node + */ + GET_TIPS("getTips"), + + /** + * Get all the transactions this node is currently requesting + */ + GET_MISSING_TRANSACTIONS("getMissingTransactions"), + + /** + * Get 2 transactions to approve for proof of work + */ + GET_TRANSACTIONS_TO_APPROVE("getTransactionsToApprove"), + + /** + * Get trytes of a transaction by its hash + */ + GET_TRYTES("getTrytes"), + + /** + * Stop attaching to the tangle + */ + INTERRUPT_ATTACHING_TO_TANGLE("interruptAttachingToTangle"), + + /** + * Temporary remove a neighbor from this node + */ + REMOVE_NEIGHBORS("removeNeighbors"), + + /** + * Store a transaction on this node, without broadcasting + */ + STORE_TRANSACTIONS("storeTransactions"), + + /** + * Check if an address has been spent from + */ + WERE_ADDRESSES_SPENT_FROM("wereAddressesSpentFrom"); + + private String name; + + private ApiCommand(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + + /** + * Looks up the {@link ApiCommand} based on its name + * + * @param name the name of the API we are looking for + * @return The ApiCommand if it exists, otherwise null + */ + public static ApiCommand findByName(String name) { + for (ApiCommand c : values()) { + if (c.toString().equals(name)) { + return c; + } + } + return null; + } +} diff --git a/src/main/java/com/iota/iri/service/ValidationException.java b/src/main/java/com/iota/iri/service/ValidationException.java index a71ef6d4f1..0d169d290a 100644 --- a/src/main/java/com/iota/iri/service/ValidationException.java +++ b/src/main/java/com/iota/iri/service/ValidationException.java @@ -1,6 +1,6 @@ package com.iota.iri.service; -public class ValidationException extends Exception { +public class ValidationException extends RuntimeException { /** * Initializes a new instance of the ValidationException. diff --git a/src/main/java/com/iota/iri/service/dto/AbstractResponse.java b/src/main/java/com/iota/iri/service/dto/AbstractResponse.java index 382d8f5bcb..c5f98c3086 100644 --- a/src/main/java/com/iota/iri/service/dto/AbstractResponse.java +++ b/src/main/java/com/iota/iri/service/dto/AbstractResponse.java @@ -7,7 +7,8 @@ /** * - * Every response that the IRI API gives is a child of this class.
+ * Every response that the IRI API gives is a child of this class. + * * Duration for every response is recorded automatically during the processing of a request. * **/ @@ -21,7 +22,7 @@ public abstract class AbstractResponse { private static class Emptyness extends AbstractResponse {} /** - * The duration it took to process this command in milliseconds + * Number of milliseconds it took to complete the request */ private Integer duration; diff --git a/src/main/java/com/iota/iri/service/dto/AttachToTangleResponse.java b/src/main/java/com/iota/iri/service/dto/AttachToTangleResponse.java index eb50d4fe07..be1681a60e 100644 --- a/src/main/java/com/iota/iri/service/dto/AttachToTangleResponse.java +++ b/src/main/java/com/iota/iri/service/dto/AttachToTangleResponse.java @@ -7,8 +7,8 @@ /** * * Contains information about the result of a successful {@code attachToTangle} API call. + * * @see {@link API#attachToTangleStatement} for how this response is created. - * */ public class AttachToTangleResponse extends AbstractResponse { diff --git a/src/main/java/com/iota/iri/service/dto/CheckConsistency.java b/src/main/java/com/iota/iri/service/dto/CheckConsistency.java index 7e8d9f810e..1e36eff6f1 100644 --- a/src/main/java/com/iota/iri/service/dto/CheckConsistency.java +++ b/src/main/java/com/iota/iri/service/dto/CheckConsistency.java @@ -11,17 +11,13 @@ public class CheckConsistency extends AbstractResponse { /** - * The state of all the provided tails, which is set to {@code false} on the following checks
- *
    - *
  1. Missing a reference transaction
  2. - *
  3. Invalid bundle
  4. - *
  5. Tails of tails are invalid
  6. - *
+ * States of the specified transactions in the same order as the values in the `tails` parameter. + * A `true` value means that the transaction is consistent. */ private boolean state; /** - * If state is {@code false}, this provides information on the cause of the inconsistency. + * If state is {@code false}, this contains information about why the transaction is inconsistent. */ private String info; diff --git a/src/main/java/com/iota/iri/service/dto/FindTransactionsResponse.java b/src/main/java/com/iota/iri/service/dto/FindTransactionsResponse.java index 29658338f0..c2fc4f79bc 100644 --- a/src/main/java/com/iota/iri/service/dto/FindTransactionsResponse.java +++ b/src/main/java/com/iota/iri/service/dto/FindTransactionsResponse.java @@ -15,10 +15,12 @@ public class FindTransactionsResponse extends AbstractResponse { /** * The transaction hashes which are returned depend on your input. * For each specified input value, the command will return the following: - * bundles: returns the list of transactions which contain the specified bundle hash. - * addresses: returns the list of transactions which have the specified address as an input/output field. - * tags: returns the list of transactions which contain the specified tag value. - * approvees: returns the list of transactions which reference (i.e. approve) the specified transaction. + *
    + *
  • bundles: returns an array of transaction hashes that contain the given bundle hash.
  • + *
  • addresses: returns an array of transaction hashes that contain the given address in the `address` field.
  • + *
  • tags: returns an array of transaction hashes that contain the given value in the `tag` field.
  • + *
  • approvees: returns an array of transaction hashes that contain the given transactions in their `branchTransaction` or `trunkTransaction` fields.
  • + *
*/ private String [] hashes; diff --git a/src/main/java/com/iota/iri/service/dto/GetBalancesResponse.java b/src/main/java/com/iota/iri/service/dto/GetBalancesResponse.java index 57acf7e565..c4034d3e6f 100644 --- a/src/main/java/com/iota/iri/service/dto/GetBalancesResponse.java +++ b/src/main/java/com/iota/iri/service/dto/GetBalancesResponse.java @@ -13,12 +13,14 @@ public class GetBalancesResponse extends AbstractResponse { /** - * The balances as a list in the same order as the addresses were provided as input + * Array of balances in the same order as the `addresses` parameters were passed to the endpoint */ private List balances; /** - * The tips used to view the balances. If none were supplied this will be the latest confirmed milestone. + * The referencing tips. + * If no `tips` parameter was passed to the endpoint, + * this field contains the hash of the latest milestone that confirmed the balance */ private List references; diff --git a/src/main/java/com/iota/iri/service/dto/GetNeighborsResponse.java b/src/main/java/com/iota/iri/service/dto/GetNeighborsResponse.java index 3643b0dc8a..47c7a1c34c 100644 --- a/src/main/java/com/iota/iri/service/dto/GetNeighborsResponse.java +++ b/src/main/java/com/iota/iri/service/dto/GetNeighborsResponse.java @@ -2,12 +2,10 @@ import java.util.List; -import com.iota.iri.service.API; - /** * * Contains information about the result of a successful {@code getNeighbors} API call. - * See {@link API#getNeighborsStatement} for how this response is created. + * See {@link GetNeighborsResponse#create(List)} for how this response is created. * */ public class GetNeighborsResponse extends AbstractResponse { @@ -25,7 +23,7 @@ public class GetNeighborsResponse extends AbstractResponse { *
  • numberOfSentTransactions
  • *
  • numberOfStaleTransactions
  • * - * @see {@link com.iota.iri.service.dto.GetNeighborsResponse.Neighbor} + * @see com.iota.iri.service.dto.GetNeighborsResponse.Neighbor */ private Neighbor[] neighbors; @@ -58,77 +56,110 @@ public static AbstractResponse create(final List * A plain DTO of an iota neighbor. * */ - static class Neighbor { + @SuppressWarnings("unused") + public static class Neighbor { + /** + * The address of your neighbor + */ private String address; - public long numberOfAllTransactions, - numberOfRandomTransactionRequests, - numberOfNewTransactions, - numberOfInvalidTransactions, - numberOfStaleTransactions, - numberOfSentTransactions; - public String connectionType; + + /** + * Number of all transactions sent (invalid, valid, already-seen) + */ + private long numberOfAllTransactions; + + /** + * Random tip requests which were sent + */ + private long numberOfRandomTransactionRequests; + + /** + * New transactions which were transmitted. + */ + private long numberOfNewTransactions; + + /** + * Invalid transactions your neighbor has sent you. + * These are transactions with invalid signatures or overall schema. + */ + private long numberOfInvalidTransactions; + + /** + * Stale transactions your neighbor has sent you. + * These are transactions with a timestamp older than your latest snapshot. + */ + private long numberOfStaleTransactions; + + /** + * Amount of transactions send through your neighbor + */ + private long numberOfSentTransactions; + + /** + * The method type your neighbor is using to connect (TCP / UDP) + */ + private String connectionType; /** - * The address of your neighbor * - * @return the address + * {@link #address} */ public String getAddress() { return address; } /** - * Number of all transactions sent (invalid, valid, already-seen) * - * @return the number + * {@link #numberOfAllTransactions} */ public long getNumberOfAllTransactions() { return numberOfAllTransactions; } /** - * New transactions which were transmitted. * - * @return the number + * {@link #numberOfNewTransactions} */ public long getNumberOfNewTransactions() { return numberOfNewTransactions; } /** - * Invalid transactions your neighbor has sent you. - * These are transactions with invalid signatures or overall schema. * - * @return the number + * {@link #numberOfInvalidTransactions} */ public long getNumberOfInvalidTransactions() { return numberOfInvalidTransactions; } /** - * Stale transactions your neighbor has sent you. - * These are transactions with a timestamp older than your latest snapshot. - * - * @return the number + * + * {@link #numberOfStaleTransactions} */ public long getNumberOfStaleTransactions() { return numberOfStaleTransactions; } /** - * Amount of transactions send through your neighbor * - * @return the number + * {@link #numberOfSentTransactions} */ public long getNumberOfSentTransactions() { return numberOfSentTransactions; } /** - * The method type your neighbor is using to connect (TCP / UDP) * - * @return the connection type + * {@link #numberOfRandomTransactionRequests} + */ + public long getNumberOfRandomTransactionRequests() { + return numberOfRandomTransactionRequests; + } + + /** + * + * {@link #connectionType} */ public String getConnectionType() { return connectionType; diff --git a/src/main/java/com/iota/iri/service/ledger/LedgerService.java b/src/main/java/com/iota/iri/service/ledger/LedgerService.java index 830d35d04d..b785cdafc9 100644 --- a/src/main/java/com/iota/iri/service/ledger/LedgerService.java +++ b/src/main/java/com/iota/iri/service/ledger/LedgerService.java @@ -8,32 +8,39 @@ import java.util.Set; /** + *

    * Represents the service that contains all the relevant business logic for modifying and calculating the ledger - * state.
    - *
    - * This class is stateless and does not hold any domain specific models.
    + * state. + *

    + * This class is stateless and does not hold any domain specific models. */ public interface LedgerService { /** + *

    * Restores the ledger state after a restart of IRI, which allows us to fast forward to the point where we - * stopped before the restart.
    - *
    + * stopped before the restart. + *

    + *

    * It looks for the last solid milestone that was applied to the ledger in the database and then replays all * milestones leading up to this point by applying them to the latest snapshot. We do not check every single * milestone again but assume that the data in the database is correct. If the database would have any * inconsistencies and the application fails, the latest solid milestone tracker will check and apply the milestones - * one by one and repair the corresponding inconsistencies.
    + * one by one and repair the corresponding inconsistencies. + *

    * * @throws LedgerException if anything unexpected happens while trying to restore the ledger state */ void restoreLedgerState() throws LedgerException; /** - * Applies the given milestone to the ledger state.
    - *
    + *

    + * Applies the given milestone to the ledger state. + *

    + *

    * It first marks the transactions that were confirmed by this milestones as confirmed by setting their * corresponding {@code snapshotIndex} value. Then it generates the {@link com.iota.iri.model.StateDiff} that - * reflects the accumulated balance changes of all these transactions and applies it to the latest Snapshot.
    + * reflects the accumulated balance changes of all these transactions and applies it to the latest Snapshot. + *

    * * @param milestone the milestone that shall be applied * @return {@code true} if the milestone could be applied to the ledger and {@code false} otherwise @@ -42,11 +49,14 @@ public interface LedgerService { boolean applyMilestoneToLedger(MilestoneViewModel milestone) throws LedgerException; /** - * Checks the consistency of the combined balance changes of the given tips.
    - *
    + *

    + * Checks the consistency of the combined balance changes of the given tips. + *

    + *

    * It simply calculates the balance changes of the tips and then combines them to verify that they are leading to a * consistent ledger state (which means that they are not containing any double-spends or spends of non-existent - * IOTA).
    + * IOTA). + *

    * * @param hashes a list of hashes that reference the chosen tips * @return {@code true} if the tips are consistent and {@code false} otherwise @@ -55,14 +65,18 @@ public interface LedgerService { boolean tipsConsistent(List hashes) throws LedgerException; /** - * Checks if the balance changes of the transactions that are referenced by the given tip are consistent.
    - *
    + *

    + * Checks if the balance changes of the transactions that are referenced by the given tip are consistent. + *

    + *

    * It first calculates the balance changes, then adds them to the given {@code diff} and finally checks their * consistency. If we are only interested in the changes that are referenced by the given {@code tip} we need to - * pass in an empty map for the {@code diff} parameter.
    - *
    + * pass in an empty map for the {@code diff} parameter. + *

    + *

    * The {@code diff} as well as the {@code approvedHashes} parameters are modified, so they will contain the new - * balance changes and the approved transactions after this method terminates.
    + * balance changes and the approved transactions after this method terminates. + *

    * * @param approvedHashes a set of transaction hashes that shall be considered to be approved already (and that * consequently shall be excluded from the calculation) @@ -74,15 +88,18 @@ public interface LedgerService { boolean isBalanceDiffConsistent(Set approvedHashes, Map diff, Hash tip) throws LedgerException; /** + *

    * Generates the accumulated balance changes of the transactions that are directly or indirectly referenced by the * given transaction relative to the referenced milestone. Also persists the spent addresses that were found in the - * process
    - *
    + * process. + *

    + *

    * It simply iterates over all approvees that have not been confirmed yet and that have not been processed already - * (by being part of the {@code visitedNonMilestoneSubtangleHashes} set) and collects their balance changes.
    + * (by being part of the {@code visitedNonMilestoneSubtangleHashes} set) and collects their balance changes. + *

    * * @param visitedTransactions a set of transaction hashes that shall be considered to be visited already - * @param startTransaction the transaction that marks the start of the dag traversal and that has its approovees + * @param startTransaction the transaction that marks the start of the dag traversal and that has its approvees * examined * @return a map of the balance changes (addresses associated to their balance) or {@code null} if the balance could * not be generated due to inconsistencies diff --git a/src/main/java/com/iota/iri/service/ledger/impl/LedgerServiceImpl.java b/src/main/java/com/iota/iri/service/ledger/impl/LedgerServiceImpl.java index ff80f39664..cfb2495a8d 100644 --- a/src/main/java/com/iota/iri/service/ledger/impl/LedgerServiceImpl.java +++ b/src/main/java/com/iota/iri/service/ledger/impl/LedgerServiceImpl.java @@ -19,28 +19,31 @@ import java.util.*; /** - * Creates a service instance that allows us to perform ledger state specific operations.
    - *
    - * This class is stateless and does not hold any domain specific models.
    + *

    + * Creates a service instance that allows us to perform ledger state specific operations. + *

    + *

    + * This class is stateless and does not hold any domain specific models. + *

    */ public class LedgerServiceImpl implements LedgerService { /** - * Holds the tangle object which acts as a database interface.
    + * Holds the tangle object which acts as a database interface. */ private Tangle tangle; /** - * Holds the snapshot provider which gives us access to the relevant snapshots.
    + * Holds the snapshot provider which gives us access to the relevant snapshots. */ private SnapshotProvider snapshotProvider; /** - * Holds a reference to the service instance containing the business logic of the snapshot package.
    + * Holds a reference to the service instance containing the business logic of the snapshot package. */ private SnapshotService snapshotService; /** - * Holds a reference to the service instance containing the business logic of the milestone package.
    + * Holds a reference to the service instance containing the business logic of the milestone package. */ private MilestoneService milestoneService; @@ -49,15 +52,18 @@ public class LedgerServiceImpl implements LedgerService { private BundleValidator bundleValidator; /** - * Initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties.
    - *
    + *

    + * Initializes the instance and registers its dependencies. + *

    + *

    + * It stores the passed in values in their corresponding private properties. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code ledgerService = new LedgerServiceImpl().init(...);} * * @param tangle Tangle object which acts as a database interface @@ -235,17 +241,21 @@ public Map generateBalanceDiff(Set visitedTransactions, Hash s /** + *

    * Generates the {@link com.iota.iri.model.StateDiff} that belongs to the given milestone in the database and marks * all transactions that have been approved by the milestone accordingly by setting their {@code snapshotIndex} - * value.
    - *
    + * value. + *

    + *

    * It first checks if the {@code snapshotIndex} of the transaction belonging to the milestone was correctly set * already (to determine if this milestone was processed already) and proceeds to generate the {@link * com.iota.iri.model.StateDiff} if that is not the case. To do so, it calculates the balance changes, checks if - * they are consistent and only then writes them to the database.
    - *
    + * they are consistent and only then writes them to the database. + *

    + *

    * If inconsistencies in the {@code snapshotIndex} are found it issues a reset of the corresponding milestone to - * recover from this problem.
    + * recover from this problem. + *

    * * @param milestone the milestone that shall have its {@link com.iota.iri.model.StateDiff} generated * @return {@code true} if the {@link com.iota.iri.model.StateDiff} could be generated and {@code false} otherwise diff --git a/src/main/java/com/iota/iri/service/milestone/LatestMilestoneTracker.java b/src/main/java/com/iota/iri/service/milestone/LatestMilestoneTracker.java index eb49dec575..03b41fd726 100644 --- a/src/main/java/com/iota/iri/service/milestone/LatestMilestoneTracker.java +++ b/src/main/java/com/iota/iri/service/milestone/LatestMilestoneTracker.java @@ -4,38 +4,47 @@ import com.iota.iri.model.Hash; /** + *

    * The manager that keeps track of the latest milestone by incorporating a background worker that periodically checks if - * new milestones have arrived.
    - *
    + * new milestones have arrived. + *

    + *

    * Knowing about the latest milestone and being able to compare it to the latest solid milestone allows us to determine - * if our node is "in sync".
    + * if our node is "in sync". + *

    */ public interface LatestMilestoneTracker { /** - * Returns the index of the latest milestone that was seen by this tracker.
    - *
    - * It simply returns the internal property that is used to store the latest milestone index.
    + *

    + * Returns the index of the latest milestone that was seen by this tracker. + *

    + *

    + * It returns the internal property that is used to store the latest milestone index. + *

    * * @return the index of the latest milestone that was seen by this tracker */ int getLatestMilestoneIndex(); /** - * Returns the transaction hash of the latest milestone that was seen by this tracker.
    - *
    - * It simply returns the internal property that is used to store the latest milestone index.
    - * + *

    + * Returns the transaction hash of the latest milestone that was seen by this tracker. + *

    + *

    + * It returns the internal property that is used to store the latest milestone index. + *

    * @return the transaction hash of the latest milestone that was seen by this tracker */ Hash getLatestMilestoneHash(); /** - * Sets the latest milestone.
    - *
    - * It simply stores the passed in values in their corresponding internal properties and can therefore be used to + * Sets the latest milestone. + *

    + * It stores the passed in values in their corresponding internal properties and can therefore be used to * inform the {@link LatestSolidMilestoneTracker} about a new milestone. It is internally used to set the new * milestone but can also be used by tests to mock a certain behaviour or in case we detect a new milestone in other - * parts of the code.
    + * parts of the code. + *

    * * @param latestMilestoneHash the transaction hash of the milestone * @param latestMilestoneIndex the milestone index of the milestone @@ -43,10 +52,11 @@ public interface LatestMilestoneTracker { void setLatestMilestone(Hash latestMilestoneHash, int latestMilestoneIndex); /** - * Analyzes the given transaction to determine if it is a valid milestone.
    - *
    + * Analyzes the given transaction to determine if it is a valid milestone. + *

    * If the transaction that was analyzed represents a milestone, we check if it is younger than the current latest - * milestone and update the internal properties accordingly.
    + * milestone and update the internal properties accordingly. + *

    * * @param transaction the transaction that shall be examined * @return {@code true} if the milestone could be processed and {@code false} if the bundle is not complete, yet @@ -56,7 +66,7 @@ public interface LatestMilestoneTracker { /** * Does the same as {@link #processMilestoneCandidate(TransactionViewModel)} but automatically retrieves the - * transaction belonging to the passed in hash.
    + * transaction belonging to the passed in hash. * * @param transactionHash the hash of the transaction that shall be examined * @return {@code true} if the milestone could be processed and {@code false} if the bundle is not complete, yet @@ -65,11 +75,14 @@ public interface LatestMilestoneTracker { boolean processMilestoneCandidate(Hash transactionHash) throws MilestoneException; /** + *

    * Since the {@link LatestMilestoneTracker} scans all milestone candidates whenever IRI restarts, this flag gives us - * the ability to determine if this initialization process has finished.
    - *
    + * the ability to determine if this initialization process has finished. + *

    + *

    * The values returned by {@link #getLatestMilestoneHash()} and {@link #getLatestMilestoneIndex()} will potentially - * return wrong values until the scan has completed.
    + * return wrong values until the scan has completed. + *

    * * @return {@code true} if the initial scan of milestones has finished and {@code false} otherwise */ @@ -77,12 +90,12 @@ public interface LatestMilestoneTracker { /** * This method starts the background worker that automatically calls {@link #processMilestoneCandidate(Hash)} on all - * newly found milestone candidates to update the latest milestone.
    + * newly found milestone candidates to update the latest milestone. */ void start(); /** - * This method stops the background worker that updates the latest milestones.
    + * This method stops the background worker that updates the latest milestones. */ void shutdown(); } diff --git a/src/main/java/com/iota/iri/service/milestone/LatestSolidMilestoneTracker.java b/src/main/java/com/iota/iri/service/milestone/LatestSolidMilestoneTracker.java index d10b7178b6..ca99037665 100644 --- a/src/main/java/com/iota/iri/service/milestone/LatestSolidMilestoneTracker.java +++ b/src/main/java/com/iota/iri/service/milestone/LatestSolidMilestoneTracker.java @@ -3,21 +3,27 @@ import com.iota.iri.service.snapshot.SnapshotProvider; /** + *

    * This interface defines the contract for the manager that keeps track of the latest solid milestone by incorporating a - * background worker that periodically checks for new solid milestones.
    - *
    + * background worker that periodically checks for new solid milestones. + *

    + *

    * Whenever it finds a new solid milestone that hasn't been applied to the ledger state, yet it triggers the application * logic which in return updates the {@link SnapshotProvider#getLatestSnapshot()}. Since the latest solid milestone is * encoded in this latest snapshot of the node, this tracker does not introduce separate getters for the latest solid - * milestone.
    + * milestone. + *

    */ public interface LatestSolidMilestoneTracker { /** + *

    * This method searches for new solid milestones that follow the current latest solid milestone and that have not - * been applied to the ledger state yet and applies them.
    - *
    + * been applied to the ledger state yet and applies them. + *

    + *

    * It takes care of applying the solid milestones in the correct order by only allowing solid milestones to be - * applied that are directly following our current latest solid milestone.
    + * applied that are directly following our current latest solid milestone. + *

    * * @throws MilestoneException if anything unexpected happens while updating the latest solid milestone */ @@ -25,12 +31,12 @@ public interface LatestSolidMilestoneTracker { /** * This method starts the background worker that automatically calls {@link #trackLatestSolidMilestone()} - * periodically to keep the latest solid milestone up to date.
    + * periodically to keep the latest solid milestone up to date. */ void start(); /** - * This method stops the background worker that automatically updates the latest solid milestone.
    + * This method stops the background worker that automatically updates the latest solid milestone. */ void shutdown(); } diff --git a/src/main/java/com/iota/iri/service/milestone/MilestoneService.java b/src/main/java/com/iota/iri/service/milestone/MilestoneService.java index e9a0a98c23..91559d38c8 100644 --- a/src/main/java/com/iota/iri/service/milestone/MilestoneService.java +++ b/src/main/java/com/iota/iri/service/milestone/MilestoneService.java @@ -7,18 +7,22 @@ import java.util.Optional; /** - * Represents the service that contains all the relevant business logic for interacting with milestones.
    - *
    - * This class is stateless and does not hold any domain specific models.
    + *

    + * Represents the service that contains all the relevant business logic for interacting with milestones. + *

    + * This class is stateless and does not hold any domain specific models. */ public interface MilestoneService { /** + *

    * Finds the latest solid milestone that was previously processed by IRI (before a restart) by performing a search - * in the database.
    - *
    + * in the database. + *

    + *

    * It determines if the milestones were processed by checking the {@code snapshotIndex} value of their corresponding - * transactions.
    - * + * transactions. + *

    + * * @return the latest solid milestone that was previously processed by IRI or an empty value if no previously * processed solid milestone can be found * @throws MilestoneException if anything unexpected happend while performing the search @@ -26,11 +30,14 @@ public interface MilestoneService { Optional findLatestProcessedSolidMilestoneInDatabase() throws MilestoneException; /** - * Analyzes the given transaction to determine if it is a valid milestone.
    - *
    + *

    + * Analyzes the given transaction to determine if it is a valid milestone. + *

    + *

    * It first checks if all transactions that belong to the milestone bundle are known already and only then verifies - * the signature to analyze if the given milestone was really issued by the coordinator.
    - * + * the signature to analyze if the given milestone was really issued by the coordinator. + *

    + * * @param transactionViewModel transaction that shall be analyzed * @param milestoneIndex milestone index of the transaction (see {@link #getMilestoneIndex(TransactionViewModel)}) * @return validity status of the transaction regarding its role as a milestone @@ -40,23 +47,28 @@ MilestoneValidity validateMilestone(TransactionViewModel transactionViewModel, i throws MilestoneException; /** - * Updates the milestone index of all transactions that belong to a milestone.
    - *
    + *

    + * Updates the milestone index of all transactions that belong to a milestone. + *

    + *

    * It does that by iterating through all approvees of the milestone defined by the given {@code milestoneHash} until * it reaches transactions that have been approved by a previous milestone. This means that this method only works - * if the transactions belonging to the previous milestone have been updated already.
    - *
    + * if the transactions belonging to the previous milestone have been updated already. + *

    + *

    * While iterating through the transactions we also examine the milestone index that is currently set to detect * corruptions in the database where a following milestone was processed before the current one. If such a * corruption in the database is found we trigger a reset of the wrongly processed milestone to repair the ledger - * state and recover from this error.
    - *
    + * state and recover from this error. + *

    + *

    * In addition to these checks we also update the solid entry points, if we detect that a transaction references a * transaction that dates back before the last local snapshot (and that has not been marked as a solid entry point, * yet). This allows us to handle back-referencing transactions and maintain local snapshot files that can always be * used to bootstrap a node, even if the coordinator suddenly approves really old transactions (this only works * if the transaction that got referenced is still "known" to the node by having a sufficiently high pruning - * delay).
    + * delay). + *

    * * @param milestoneHash the hash of the transaction * @param newIndex the milestone index that shall be set @@ -65,29 +77,36 @@ MilestoneValidity validateMilestone(TransactionViewModel transactionViewModel, i void updateMilestoneIndexOfMilestoneTransactions(Hash milestoneHash, int newIndex) throws MilestoneException; /** + *

    * Resets all milestone related information of the transactions that were "confirmed" by the given milestone and - * rolls back the ledger state to the moment before the milestone was applied.
    - *
    + * rolls back the ledger state to the moment before the milestone was applied. + *

    + *

    * This allows us to reprocess the milestone in case of errors where the given milestone could not be applied to the * ledger state. It is for example used by the automatic repair routine of the {@link LatestSolidMilestoneTracker} - * (to recover from inconsistencies due to crashes of IRI).
    - *
    + * (to recover from inconsistencies due to crashes of IRI). + *

    + *

    * It recursively resets additional milestones if inconsistencies are found within the resetted milestone (wrong - * {@code milestoneIndex}es).
    - * + * {@code milestoneIndex}es). + *

    + * * @param index milestone index that shall be reverted * @throws MilestoneException if anything goes wrong while resetting the corrupted milestone */ void resetCorruptedMilestone(int index) throws MilestoneException; /** + *

    * Checks if the given transaction was confirmed by the milestone with the given index (or any of its * predecessors). - *
    + *

    + *

    * We determine if the transaction was confirmed by examining its {@code snapshotIndex} value. For this method to * work we require that the previous milestones have been processed already (which is enforced by the {@link * com.iota.iri.service.milestone.LatestSolidMilestoneTracker} which applies the milestones in the order that they - * are issued by the coordinator).
    + * are issued by the coordinator). + *

    * * @param transaction the transaction that shall be examined * @param milestoneIndex the milestone index that we want to check against @@ -98,7 +117,7 @@ MilestoneValidity validateMilestone(TransactionViewModel transactionViewModel, i /** * Does the same as {@link #isTransactionConfirmed(TransactionViewModel, int)} but defaults to the latest solid * milestone index for the {@code milestoneIndex} which means that the transaction has been included in our current - * ledger state.
    + * ledger state. * * @param transaction the transaction that shall be examined * @return {@code true} if the transaction belongs to the milestone and {@code false} otherwise @@ -106,11 +125,14 @@ MilestoneValidity validateMilestone(TransactionViewModel transactionViewModel, i boolean isTransactionConfirmed(TransactionViewModel transaction); /** - * Retrieves the milestone index of the given transaction by decoding the {@code OBSOLETE_TAG}.
    - *
    + *

    + * Retrieves the milestone index of the given transaction by decoding the {@code OBSOLETE_TAG}. + *

    + *

    * The returned result will of cause only have a reasonable value if we hand in a transaction that represents a real - * milestone.
    - * + * milestone. + *

    + * * @param milestoneTransaction the transaction that shall have its milestone index retrieved * @return the milestone index of the transaction */ diff --git a/src/main/java/com/iota/iri/service/milestone/MilestoneSolidifier.java b/src/main/java/com/iota/iri/service/milestone/MilestoneSolidifier.java index b4362e8a87..453c9bee3d 100644 --- a/src/main/java/com/iota/iri/service/milestone/MilestoneSolidifier.java +++ b/src/main/java/com/iota/iri/service/milestone/MilestoneSolidifier.java @@ -5,11 +5,11 @@ /** * This interface defines the contract for a manager that tries to solidify unsolid milestones by incorporating a * background worker that periodically checks the solidity of the milestones and issues transaction requests for the - * missing transactions until the milestones become solid.
    + * missing transactions until the milestones become solid. */ public interface MilestoneSolidifier { /** - * This method allows us to add new milestones to the solidifier that will consequently be solidified.
    + * This method allows us to add new milestones to the solidifier that will consequently be solidified. * * @param milestoneHash Hash of the milestone that shall be solidified * @param milestoneIndex index of the milestone that shall be solidified @@ -17,12 +17,12 @@ public interface MilestoneSolidifier { void add(Hash milestoneHash, int milestoneIndex); /** - * This method starts the background worker that asynchronously solidifies the milestones.
    + * This method starts the background worker that asynchronously solidifies the milestones. */ void start(); /** - * This method shuts down the background worker that asynchronously solidifies the milestones.
    + * This method shuts down the background worker that asynchronously solidifies the milestones. */ void shutdown(); } diff --git a/src/main/java/com/iota/iri/service/milestone/MilestoneValidity.java b/src/main/java/com/iota/iri/service/milestone/MilestoneValidity.java index 74784bdfd0..7c54f55e1c 100644 --- a/src/main/java/com/iota/iri/service/milestone/MilestoneValidity.java +++ b/src/main/java/com/iota/iri/service/milestone/MilestoneValidity.java @@ -1,7 +1,7 @@ package com.iota.iri.service.milestone; /** - * Validity states of milestone transactions that are used to express their "relevance" for the ledger state.
    + * Validity states of milestone transactions that are used to express their "relevance" for the ledger state. */ public enum MilestoneValidity { VALID, diff --git a/src/main/java/com/iota/iri/service/milestone/SeenMilestonesRetriever.java b/src/main/java/com/iota/iri/service/milestone/SeenMilestonesRetriever.java index 292415f4b4..45f8ec5664 100644 --- a/src/main/java/com/iota/iri/service/milestone/SeenMilestonesRetriever.java +++ b/src/main/java/com/iota/iri/service/milestone/SeenMilestonesRetriever.java @@ -1,36 +1,44 @@ package com.iota.iri.service.milestone; /** - * Attempts to retrieve the milestones that have been defined in the local snapshots file.
    - *
    + *

    + * Attempts to retrieve the milestones that have been defined in the local snapshots file. + *

    + *

    * The manager incorporates a background worker that proactively requests the missing milestones until all defined * milestones are known. After all milestones have been retrieved the manager shuts down automatically (to free the - * unused resources).
    - *
    + * unused resources). + *

    + *

    * Note: When we bootstrap a node with a local snapshot file, we are provided with a list of all seen milestones that * were known during the creation of the snapshot. This list allows new nodes or nodes that start over with an * empty database, to retrieve the missing milestones efficiently by directly requesting them from its neighbours - * (without having to wait for them to be discovered during the solidification process).
    - *
    + * (without having to wait for them to be discovered during the solidification process). + *

    + *

    * This speeds up the sync-times massively and leads to nodes that are up within minutes rather than hours or even - * days.
    + * days. + *

    */ public interface SeenMilestonesRetriever { /** - * Triggers the retrieval of the milestones by issuing transaction requests to the nodes neighbours.
    - *
    - * It gets periodically called by the background worker to automatically retrieve all missing milestones.
    + *

    + * Triggers the retrieval of the milestones by issuing transaction requests to the nodes neighbours. + *

    + *

    + * It gets periodically called by the background worker to automatically retrieve all missing milestones. + *

    */ void retrieveSeenMilestones(); /** * Starts the background worker that automatically calls {@link #retrieveSeenMilestones()} - * periodically to retrieves all "seen" missing milestones.
    + * periodically to retrieves all "seen" missing milestones. */ void start(); /** - * Stops the background worker that retrieves all "seen" missing milestones.
    + * Stops the background worker that retrieves all "seen" missing milestones. */ void shutdown(); } diff --git a/src/main/java/com/iota/iri/service/milestone/impl/LatestMilestoneTrackerImpl.java b/src/main/java/com/iota/iri/service/milestone/impl/LatestMilestoneTrackerImpl.java index 9fe203a3ea..fd6d575a0f 100644 --- a/src/main/java/com/iota/iri/service/milestone/impl/LatestMilestoneTrackerImpl.java +++ b/src/main/java/com/iota/iri/service/milestone/impl/LatestMilestoneTrackerImpl.java @@ -16,7 +16,6 @@ import com.iota.iri.utils.log.interval.IntervalLogger; import com.iota.iri.utils.thread.DedicatedScheduledExecutorService; import com.iota.iri.utils.thread.SilentScheduledExecutorService; -import com.iota.iri.zmq.MessageQ; import java.util.ArrayDeque; import java.util.Deque; @@ -25,128 +24,126 @@ import java.util.concurrent.TimeUnit; /** + *

    * Creates a tracker that automatically detects new milestones by incorporating a background worker that periodically * checks all transactions that are originating from the coordinator address and that exposes the found latest milestone - * via getters.
    - *
    + * via getters. + *

    + *

    * It can be used to determine the sync-status of the node by comparing these values against the latest solid - * milestone.
    + * milestone. + *

    */ public class LatestMilestoneTrackerImpl implements LatestMilestoneTracker { /** - * Holds the amount of milestone candidates that will be analyzed per iteration of the background worker.
    + * Holds the amount of milestone candidates that will be analyzed per iteration of the background worker. */ private static final int MAX_CANDIDATES_TO_ANALYZE = 5000; /** - * Holds the time (in milliseconds) between iterations of the background worker.
    + * Holds the time (in milliseconds) between iterations of the background worker. */ private static final int RESCAN_INTERVAL = 1000; /** - * Holds the logger of this class (a rate limited logger than doesn't spam the CLI output).
    + * Holds the logger of this class (a rate limited logger than doesn't spam the CLI output). */ private static final IntervalLogger log = new IntervalLogger(LatestMilestoneTrackerImpl.class); /** - * Holds the Tangle object which acts as a database interface.
    + * Holds the Tangle object which acts as a database interface. */ private Tangle tangle; /** * The snapshot provider which gives us access to the relevant snapshots that the node uses (for faster - * bootstrapping).
    + * bootstrapping). */ private SnapshotProvider snapshotProvider; /** - * Service class containing the business logic of the milestone package.
    + * Service class containing the business logic of the milestone package. */ private MilestoneService milestoneService; /** - * Holds a reference to the manager that takes care of solidifying milestones.
    + * Holds a reference to the manager that takes care of solidifying milestones. */ private MilestoneSolidifier milestoneSolidifier; /** - * Holds a reference to the ZeroMQ interface that allows us to emit messages for external recipients.
    - */ - private MessageQ messageQ; - - /** - * Holds the coordinator address which is used to filter possible milestone candidates.
    + * Holds the coordinator address which is used to filter possible milestone candidates. */ private Hash coordinatorAddress; /** - * Holds a reference to the manager of the background worker.
    + * Holds a reference to the manager of the background worker. */ private final SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService( "Latest Milestone Tracker", log.delegate()); /** - * Holds the milestone index of the latest milestone that we have seen / processed.
    + * Holds the milestone index of the latest milestone that we have seen / processed. */ private int latestMilestoneIndex; /** - * Holds the transaction hash of the latest milestone that we have seen / processed.
    + * Holds the transaction hash of the latest milestone that we have seen / processed. */ private Hash latestMilestoneHash; /** * A set that allows us to keep track of the candidates that have been seen and added to the {@link - * #milestoneCandidatesToAnalyze} already.
    + * #milestoneCandidatesToAnalyze} already. */ private final Set seenMilestoneCandidates = new HashSet<>(); /** - * A list of milestones that still have to be analyzed.
    + * A list of milestones that still have to be analyzed. */ private final Deque milestoneCandidatesToAnalyze = new ArrayDeque<>(); /** * A flag that allows us to detect if the background worker is in its first iteration (for different log - * handling).
    + * handling). */ private boolean firstRun = true; /** - * Flag which indicates if this tracker has finished its initial scan of all old milestone candidates.
    + * Flag which indicates if this tracker has finished its initial scan of all old milestone candidates. */ private boolean initialized = false; /** - * This method initializes the instance and registers its dependencies.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    * It simply stores the passed in values in their corresponding private properties and bootstraps the latest - * milestone with values for the latest milestone that can be found quickly.
    - *
    + * milestone with values for the latest milestone that can be found quickly. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code latestMilestoneTracker = new LatestMilestoneTrackerImpl().init(...);} * * @param tangle Tangle object which acts as a database interface * @param snapshotProvider manager for the snapshots that allows us to retrieve the relevant snapshots of this node * @param milestoneService contains the important business logic when dealing with milestones * @param milestoneSolidifier manager that takes care of solidifying milestones - * @param messageQ ZeroMQ interface that allows us to emit messages for external recipients * @param config configuration object which allows us to determine the important config parameters of the node * @return the initialized instance itself to allow chaining */ public LatestMilestoneTrackerImpl init(Tangle tangle, SnapshotProvider snapshotProvider, - MilestoneService milestoneService, MilestoneSolidifier milestoneSolidifier, MessageQ messageQ, - IotaConfig config) { + MilestoneService milestoneService, MilestoneSolidifier milestoneSolidifier, IotaConfig config) { this.tangle = tangle; this.snapshotProvider = snapshotProvider; this.milestoneService = milestoneService; this.milestoneSolidifier = milestoneSolidifier; - this.messageQ = messageQ; coordinatorAddress = config.getCoordinator(); @@ -157,13 +154,15 @@ public LatestMilestoneTrackerImpl init(Tangle tangle, SnapshotProvider snapshotP /** * {@inheritDoc} - *
    + * + *

    * In addition to setting the internal properties, we also issue a log message and publish the change to the ZeroMQ - * message processor so external receivers get informed about this change.
    + * message processor so external receivers get informed about this change. + *

    */ @Override public void setLatestMilestone(Hash latestMilestoneHash, int latestMilestoneIndex) { - messageQ.publish("lmi %d %d", this.latestMilestoneIndex, latestMilestoneIndex); + tangle.publish("lmi %d %d", this.latestMilestoneIndex, latestMilestoneIndex); log.delegate().info("Latest milestone has changed from #" + this.latestMilestoneIndex + " to #" + latestMilestoneIndex); @@ -192,9 +191,6 @@ public boolean processMilestoneCandidate(Hash transactionHash) throws MilestoneE /** * {@inheritDoc} - *
    - * If we detect a milestone that is either {@code INCOMPLETE} or not solid, yet we hand it over to the - * {@link MilestoneSolidifier} that takes care of requesting the missing parts of the milestone bundle.
    */ @Override public boolean processMilestoneCandidate(TransactionViewModel transaction) throws MilestoneException { @@ -223,12 +219,12 @@ public boolean processMilestoneCandidate(TransactionViewModel transaction) throw break; case INCOMPLETE: - milestoneSolidifier.add(transaction.getHash(), milestoneIndex); - - transaction.isMilestone(tangle, snapshotProvider.getInitialSnapshot(), true); - return false; + case INVALID: + // do not re-analyze anymore + return true; + default: // we can consider the milestone candidate processed and move on w/o farther action } @@ -247,11 +243,12 @@ public boolean isInitialScanComplete() { /** * {@inheritDoc} - *
    + *

    * We repeatedly call {@link #latestMilestoneTrackerThread()} to actively look for new milestones in our database. * This is a bit inefficient and should at some point maybe be replaced with a check on transaction arrival, but * this would required adjustments in the whole way IRI handles transactions and is therefore postponed for - * now.
    + * now. + *

    */ @Override public void start() { @@ -265,12 +262,15 @@ public void shutdown() { } /** + *

    * This method contains the logic for scanning for new latest milestones that gets executed in a background - * worker.
    - *
    + * worker. + *

    + *

    * It first collects all new milestone candidates that need to be analyzed, then analyzes them and finally checks if * the initialization is complete. In addition to this scanning logic it also issues regular log messages about the - * progress of the scanning.
    + * progress of the scanning. + *

    */ private void latestMilestoneTrackerThread() { try { @@ -292,12 +292,15 @@ private void latestMilestoneTrackerThread() { } /** - * This method emits a log message about the scanning progress.
    - *
    + *

    + * This method emits a log message about the scanning progress. + *

    + *

    * It only emits a log message if we have more than one {@link #milestoneCandidatesToAnalyze}, which means that the * very first call to this method in the "first run" on {@link #latestMilestoneTrackerThread()} will not produce any * output (which is the reason why we call this method a second time after we have collected all the - * candidates in the "first run").
    + * candidates in the "first run"). + *

    */ private void logProgress() { if (milestoneCandidatesToAnalyze.size() > 1) { @@ -306,11 +309,14 @@ private void logProgress() { } /** + *

    * This method collects the new milestones that have not been "seen" before, by collecting them in the {@link - * #milestoneCandidatesToAnalyze} queue.
    - *
    + * #milestoneCandidatesToAnalyze} queue. + *

    + *

    * We simply request all transaction that are originating from the coordinator address and treat them as potential - * milestone candidates.
    + * milestone candidates. + *

    * * @throws MilestoneException if anything unexpected happens while collecting the new milestone candidates */ @@ -331,12 +337,15 @@ private void collectNewMilestoneCandidates() throws MilestoneException { } /** + *

    * This method analyzes the milestone candidates by working through the {@link #milestoneCandidatesToAnalyze} - * queue.
    - *
    + * queue. + *

    + *

    * We only process {@link #MAX_CANDIDATES_TO_ANALYZE} at a time, to give the caller the option to terminate early * and pick up new milestones as fast as possible without being stuck with analyzing the old ones for too - * long.
    + * long. + *

    * * @throws MilestoneException if anything unexpected happens while analyzing the milestone candidates */ @@ -355,11 +364,14 @@ private void analyzeMilestoneCandidates() throws MilestoneException { } /** - * This method checks if the initialization is complete.
    - *
    + *

    + * This method checks if the initialization is complete. + *

    + *

    * It simply checks if the {@link #initialized} flag is not set yet and there are no more {@link * #milestoneCandidatesToAnalyze}. If the initialization was complete, we issue a log message and set the - * corresponding flag to {@code true}.
    + * corresponding flag to {@code true}. + *

    */ private void checkIfInitializationComplete() { if (!initialized && milestoneCandidatesToAnalyze.size() == 0) { @@ -370,12 +382,15 @@ private void checkIfInitializationComplete() { } /** + *

    * This method bootstraps this tracker with the latest milestone values that can easily be retrieved without - * analyzing any transactions (for faster startup).
    - *
    + * analyzing any transactions (for faster startup). + *

    + *

    * It first sets the latest milestone to the values found in the latest snapshot and then check if there is a younger * milestone at the end of our database. While this last entry in the database doesn't necessarily have to be the - * latest one we know it at least gives a reasonable value most of the times.
    + * latest one we know it at least gives a reasonable value most of the times. + *

    */ private void bootstrapLatestMilestoneValue() { Snapshot latestSnapshot = snapshotProvider.getLatestSnapshot(); diff --git a/src/main/java/com/iota/iri/service/milestone/impl/LatestSolidMilestoneTrackerImpl.java b/src/main/java/com/iota/iri/service/milestone/impl/LatestSolidMilestoneTrackerImpl.java index 52ba12b367..a2ddd867c4 100644 --- a/src/main/java/com/iota/iri/service/milestone/impl/LatestSolidMilestoneTrackerImpl.java +++ b/src/main/java/com/iota/iri/service/milestone/impl/LatestSolidMilestoneTrackerImpl.java @@ -14,93 +14,93 @@ import com.iota.iri.utils.log.interval.IntervalLogger; import com.iota.iri.utils.thread.DedicatedScheduledExecutorService; import com.iota.iri.utils.thread.SilentScheduledExecutorService; -import com.iota.iri.zmq.MessageQ; import java.util.concurrent.TimeUnit; /** + *

    * Creates a manager that keeps track of the latest solid milestones and that triggers the application of these * milestones and their corresponding balance changes to the latest {@link Snapshot} by incorporating a background - * worker that periodically checks for new solid milestones.
    - *
    + * worker that periodically checks for new solid milestones. + *

    + *

    * It extends this with a mechanisms to recover from database corruptions by using a backoff strategy that reverts the * changes introduced by previous milestones whenever an error is detected until the problem causing milestone was - * found.
    + * found. + *

    */ public class LatestSolidMilestoneTrackerImpl implements LatestSolidMilestoneTracker { /** * Holds the interval (in milliseconds) in which the {@link #trackLatestSolidMilestone()} method gets - * called by the background worker.
    + * called by the background worker. */ private static final int RESCAN_INTERVAL = 5000; /** - * Holds the logger of this class (a rate limited logger than doesn't spam the CLI output).
    + * Holds the logger of this class (a rate limited logger than doesn't spam the CLI output). */ private static final IntervalLogger log = new IntervalLogger(LatestSolidMilestoneTrackerImpl.class); /** - * Holds the Tangle object which acts as a database interface.
    + * Holds the Tangle object which acts as a database interface. */ private Tangle tangle; /** * The snapshot provider which gives us access to the relevant snapshots that the node uses (for the ledger - * state).
    + * state). */ private SnapshotProvider snapshotProvider; /** - * Holds a reference to the service instance containing the business logic of the milestone package.
    + * Holds a reference to the service instance containing the business logic of the milestone package. */ private MilestoneService milestoneService; /** - * Holds a reference to the manager that keeps track of the latest milestone.
    + * Holds a reference to the manager that keeps track of the latest milestone. */ private LatestMilestoneTracker latestMilestoneTracker; /** - * Holds a reference to the service that contains the logic for applying milestones to the ledger state.
    + * Holds a reference to the service that contains the logic for applying milestones to the ledger state. */ private LedgerService ledgerService; /** - * Holds a reference to the ZeroMQ interface that allows us to emit messages for external recipients.
    - */ - private MessageQ messageQ; - - /** - * Holds a reference to the manager of the background worker.
    + * Holds a reference to the manager of the background worker. */ private final SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService( "Latest Solid Milestone Tracker", log.delegate()); /** - * Boolean flag that is used to identify the first iteration of the background worker.
    + * Boolean flag that is used to identify the first iteration of the background worker. */ private boolean firstRun = true; /** - * Holds the milestone index of the milestone that caused the repair logic to get started.
    + * Holds the milestone index of the milestone that caused the repair logic to get started. */ private int errorCausingMilestoneIndex = Integer.MAX_VALUE; /** - * Counter for the backoff repair strategy (see {@link #repairCorruptedMilestone(MilestoneViewModel)}.
    + * Counter for the backoff repair strategy (see {@link #repairCorruptedMilestone(MilestoneViewModel)}. */ private int repairBackoffCounter = 0; /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It stores the passed in values in their corresponding private properties. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code latestSolidMilestoneTracker = new LatestSolidMilestoneTrackerImpl().init(...);} * * @param tangle Tangle object which acts as a database interface @@ -108,19 +108,17 @@ public class LatestSolidMilestoneTrackerImpl implements LatestSolidMilestoneTrac * @param milestoneService contains the important business logic when dealing with milestones * @param ledgerService the manager for * @param latestMilestoneTracker the manager that keeps track of the latest milestone - * @param messageQ ZeroMQ interface that allows us to emit messages for external recipients * @return the initialized instance itself to allow chaining */ public LatestSolidMilestoneTrackerImpl init(Tangle tangle, SnapshotProvider snapshotProvider, MilestoneService milestoneService, LedgerService ledgerService, - LatestMilestoneTracker latestMilestoneTracker, MessageQ messageQ) { + LatestMilestoneTracker latestMilestoneTracker) { this.tangle = tangle; this.snapshotProvider = snapshotProvider; this.milestoneService = milestoneService; this.ledgerService = ledgerService; this.latestMilestoneTracker = latestMilestoneTracker; - this.messageQ = messageQ; return this; } @@ -138,9 +136,11 @@ public void shutdown() { /** * {@inheritDoc} - *
    + * + *

    * In addition to applying the found milestones to the ledger state it also issues log messages and keeps the - * {@link LatestMilestoneTracker} in sync (if we happen to process a new latest milestone faster).
    + * {@link LatestMilestoneTracker} in sync (if we happen to process a new latest milestone faster). + *

    */ @Override public void trackLatestSolidMilestone() throws MilestoneException { @@ -168,10 +168,13 @@ public void trackLatestSolidMilestone() throws MilestoneException { } /** - * Contains the logic for the background worker.
    - *
    + *

    + * Contains the logic for the background worker. + *

    + *

    * It simply calls {@link #trackLatestSolidMilestone()} and wraps with a log handler that prevents the {@link - * MilestoneException} to crash the worker.
    + * MilestoneException} to crash the worker. + *

    */ private void latestSolidMilestoneTrackerThread() { try { @@ -189,11 +192,14 @@ private void latestSolidMilestoneTrackerThread() { } /** - * Applies the given milestone to the ledger.
    - *
    + *

    + * Applies the given milestone to the ledger. + *

    + *

    * If the application of the milestone fails, we start a repair routine which will revert the milestones preceding * our current milestone and consequently try to reapply them in the next iteration of the {@link - * #trackLatestSolidMilestone()} method (until the problem is solved).
    + * #trackLatestSolidMilestone()} method (until the problem is solved). + *

    * * @param milestone the milestone that shall be applied to the ledger state * @throws Exception if anything unexpected goes wrong while applying the milestone to the ledger @@ -209,9 +215,12 @@ private void applySolidMilestoneToLedger(MilestoneViewModel milestone) throws Ex } /** - * Checks if we are currently trying to repair a milestone.
    - *
    - * We simply use the {@link #repairBackoffCounter} as an indicator if a repair routine is running.
    + *

    + * Checks if we are currently trying to repair a milestone. + *

    + *

    + * We simply use the {@link #repairBackoffCounter} as an indicator if a repair routine is running. + *

    * * @return {@code true} if we are trying to repair a milestone and {@code false} otherwise */ @@ -220,10 +229,13 @@ private boolean isRepairRunning() { } /** - * Checks if we successfully repaired the corrupted milestone.
    - *
    + *

    + * Checks if we successfully repaired the corrupted milestone. + *

    + *

    * To determine if the repair routine was successful we check if the processed milestone has a higher index than the - * one that initially could not get applied to the ledger.
    + * one that initially could not get applied to the ledger. + *

    * * @param processedMilestone the currently processed milestone * @return {@code true} if we advanced to a milestone following the corrupted one and {@code false} otherwise @@ -233,10 +245,13 @@ private boolean isRepairSuccessful(MilestoneViewModel processedMilestone) { } /** - * Resets the internal variables that are used to keep track of the repair process.
    - *
    + *

    + * Resets the internal variables that are used to keep track of the repair process. + *

    + *

    * It gets called whenever we advance to a milestone that has a higher milestone index than the milestone that - * initially caused the repair routine to kick in (see {@link #repairCorruptedMilestone(MilestoneViewModel)}.
    + * initially caused the repair routine to kick in (see {@link #repairCorruptedMilestone(MilestoneViewModel)}. + *

    */ private void stopRepair() { repairBackoffCounter = 0; @@ -244,11 +259,13 @@ private void stopRepair() { } /** - * Keeps the {@link LatestMilestoneTracker} in sync with our current progress.
    - *
    + *

    + * Keeps the {@link LatestMilestoneTracker} in sync with our current progress. + *

    + *

    * Since the {@link LatestMilestoneTracker} scans all old milestones during its startup (which can take a while to - * finish) it can happen that we see a newer latest milestone faster than this manager.
    - *
    + * finish) it can happen that we see a newer latest milestone faster than this manager. + *

    * Note: This method ensures that the latest milestone index is always bigger or equals the latest solid milestone * index. * @@ -262,10 +279,13 @@ private void syncLatestMilestoneTracker(Hash milestoneHash, int milestoneIndex) } /** - * Emits a log message whenever the latest solid milestone changes.
    - *
    + *

    + * Emits a log message whenever the latest solid milestone changes. + *

    + *

    * It simply compares the current latest milestone index against the previous milestone index and emits the log - * messages using the {@link #log} and the {@link #messageQ} instances if it differs.
    + * messages using the {@link #log} and the {@link #messageQ} instances if it differs. + *

    * * @param prevSolidMilestoneIndex the milestone index before the change */ @@ -277,26 +297,31 @@ private void logChange(int prevSolidMilestoneIndex) { if (prevSolidMilestoneIndex != latestMilestoneIndex) { log.info("Latest SOLID milestone index changed from #" + prevSolidMilestoneIndex + " to #" + latestMilestoneIndex); - messageQ.publish("lmsi %d %d", prevSolidMilestoneIndex, latestMilestoneIndex); - messageQ.publish("lmhs %s", latestMilestoneHash); + tangle.publish("lmsi %d %d", prevSolidMilestoneIndex, latestMilestoneIndex); + tangle.publish("lmhs %s", latestMilestoneHash); } } /** - * Tries to actively repair the ledger by reverting the milestones preceding the given milestone.
    - *
    + *

    + * Tries to actively repair the ledger by reverting the milestones preceding the given milestone. + *

    + *

    * It gets called when a milestone could not be applied to the ledger state because of problems like "inconsistent * balances". While this should theoretically never happen (because milestones are by definition "consistent"), it * can still happen because IRI crashed or got stopped in the middle of applying a milestone or if a milestone - * was processed in the wrong order.
    - *
    + * was processed in the wrong order. + *

    + *

    * Every time we call this method the internal {@link #repairBackoffCounter} is incremented which causes the next * call of this method to repair an additional milestone. This means that whenever we face an error we first try to * reset only the last milestone, then the two last milestones, then the three last milestones (and so on ...) until - * the problem was fixed.
    - *
    + * the problem was fixed. + *

    + *

    * To be able to tell when the problem is fixed and the {@link #repairBackoffCounter} can be reset, we store the - * milestone index that caused the problem the first time we call this method.
    + * milestone index that caused the problem the first time we call this method. + *

    * * @param errorCausingMilestone the milestone that failed to be applied * @throws MilestoneException if we failed to reset the corrupted milestone diff --git a/src/main/java/com/iota/iri/service/milestone/impl/MilestoneServiceImpl.java b/src/main/java/com/iota/iri/service/milestone/impl/MilestoneServiceImpl.java index 5e81a9c55c..f393172f45 100644 --- a/src/main/java/com/iota/iri/service/milestone/impl/MilestoneServiceImpl.java +++ b/src/main/java/com/iota/iri/service/milestone/impl/MilestoneServiceImpl.java @@ -1,7 +1,6 @@ package com.iota.iri.service.milestone.impl; import com.iota.iri.BundleValidator; -import com.iota.iri.conf.ConsensusConfig; import com.iota.iri.conf.MilestoneConfig; import com.iota.iri.controllers.MilestoneViewModel; import com.iota.iri.controllers.TransactionViewModel; @@ -21,8 +20,6 @@ import com.iota.iri.storage.Tangle; import com.iota.iri.utils.Converter; import com.iota.iri.utils.dag.DAGHelper; -import com.iota.iri.zmq.MessageQ; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,70 +33,70 @@ import static com.iota.iri.service.milestone.MilestoneValidity.*; /** - * Creates a service instance that allows us to perform milestone specific operations.
    - *
    - * This class is stateless and does not hold any domain specific models.
    + *

    + * Creates a service instance that allows us to perform milestone specific operations. + *

    + *

    + * This class is stateless and does not hold any domain specific models. + *

    */ public class MilestoneServiceImpl implements MilestoneService { /** - * Holds the logger of this class.
    + * Holds the logger of this class. */ private final static Logger log = LoggerFactory.getLogger(MilestoneServiceImpl.class); /** - * Holds the tangle object which acts as a database interface.
    + * Holds the tangle object which acts as a database interface. */ private Tangle tangle; /** - * Holds the snapshot provider which gives us access to the relevant snapshots.
    + * Holds the snapshot provider which gives us access to the relevant snapshots. */ private SnapshotProvider snapshotProvider; /** - * Holds a reference to the service instance of the snapshot package that allows us to rollback ledger states.
    + * Holds a reference to the service instance of the snapshot package that allows us to roll back ledger states. */ private SnapshotService snapshotService; - /** - * Holds the ZeroMQ interface that allows us to emit messages for external recipients.
    - */ - private MessageQ messageQ; - /** * Configurations for milestone */ private MilestoneConfig config; - private BundleValidator bundleValidator; /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It stores the passed in values in their corresponding private properties. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code milestoneService = new MilestoneServiceImpl().init(...);} - *te + * * @param tangle Tangle object which acts as a database interface * @param snapshotProvider snapshot provider which gives us access to the relevant snapshots - * @param messageQ ZeroMQ interface that allows us to emit messages for external recipients + * @param snapshotService service for modifying and generating snapshots + * @param bundleValidator Validator to use when checking milestones * @param config config with important milestone specific settings * @return the initialized instance itself to allow chaining */ public MilestoneServiceImpl init(Tangle tangle, SnapshotProvider snapshotProvider, SnapshotService snapshotService, - BundleValidator bundleValidator, MessageQ messageQ, MilestoneConfig config) { + BundleValidator bundleValidator, MilestoneConfig config) { this.tangle = tangle; this.snapshotProvider = snapshotProvider; this.snapshotService = snapshotService; this.bundleValidator = bundleValidator; - this.messageQ = messageQ; this.config = config; return this; @@ -109,10 +106,12 @@ public MilestoneServiceImpl init(Tangle tangle, SnapshotProvider snapshotProvide /** * {@inheritDoc} - *
    + * + *

    * We first check the trivial case where the node was fully synced. If no processed solid milestone could be found * within the last two milestones of the node, we perform a binary search from present to past, which reduces the - * amount of database requests to a minimum (even with a huge amount of milestones in the database).
    + * amount of database requests to a minimum (even with a huge amount of milestones in the database). + *

    */ @Override public Optional findLatestProcessedSolidMilestoneInDatabase() throws MilestoneException { @@ -155,10 +154,12 @@ public void updateMilestoneIndexOfMilestoneTransactions(Hash milestoneHash, int /** * {@inheritDoc} - *
    + * + *

    * We redirect the call to {@link #resetCorruptedMilestone(int, Set)} while initiating the set of {@code * processedTransactions} with an empty {@link HashSet} which will ensure that we reset all found - * transactions.
    + * transactions. + *

    */ @Override public void resetCorruptedMilestone(int index) throws MilestoneException { @@ -174,9 +175,9 @@ public MilestoneValidity validateMilestone(TransactionViewModel transactionViewM } try { - if (MilestoneViewModel.get(tangle, milestoneIndex) != null) { - // Already validated. - return VALID; + MilestoneViewModel existingMilestone = MilestoneViewModel.get(tangle, milestoneIndex); + if(existingMilestone != null){ + return existingMilestone.getHash().equals(transactionViewModel.getHash()) ? VALID : INVALID; } final List> bundleTransactions = bundleValidator.validate(tangle, @@ -191,10 +192,11 @@ public MilestoneValidity validateMilestone(TransactionViewModel transactionViewM //the signed transaction - which references the confirmed transactions and contains // the Merkle tree siblings. int coordinatorSecurityLevel = config.getCoordinatorSecurityLevel(); - final TransactionViewModel siblingsTx = - bundleTransactionViewModels.get(coordinatorSecurityLevel); if (isMilestoneBundleStructureValid(bundleTransactionViewModels, coordinatorSecurityLevel)) { + final TransactionViewModel siblingsTx = bundleTransactionViewModels + .get(coordinatorSecurityLevel); + //milestones sign the normalized hash of the sibling transaction. byte[] signedHash = ISS.normalizedBundle(siblingsTx.getHash().trits()); @@ -268,11 +270,14 @@ public boolean isTransactionConfirmed(TransactionViewModel transaction) { //region [PRIVATE UTILITY METHODS] ///////////////////////////////////////////////////////////////////////////////// /** + *

    * Performs a binary search for the latest solid milestone which was already processed by the node and applied to - * the ledger state at some point in the past (i.e. before IRI got restarted).
    - *
    + * the ledger state at some point in the past (i.e. before IRI got restarted). + *

    + *

    * It searches from present to past using a binary search algorithm which quickly narrows down the amount of - * candidates even for big databases.
    + * candidates even for big databases. + *

    * * @param latestMilestone the latest milestone in the database (used to define the search range) * @return the latest solid milestone that was previously processed by IRI or an empty value if no previously @@ -310,14 +315,16 @@ private Optional binarySearchLatestProcessedSolidMilestoneIn } /** - * Determines the milestone in the middle of the range defined by {@code rangeStart} and {@code rangeEnd}.
    - *
    + *

    + * Determines the milestone in the middle of the range defined by {@code rangeStart} and {@code rangeEnd}. + *

    + *

    * It is used by the binary search algorithm of {@link #findLatestProcessedSolidMilestoneInDatabase()}. It first * calculates the index that represents the middle of the range and then tries to find the milestone that is closest - * to this index.
    - *
    + * to this index. + *

    * Note: We start looking for younger milestones first, because most of the times the latest processed solid - * milestone is close to the end.
    + * milestone is close to the end. * * @param rangeStart the milestone index representing the start of our search range * @param rangeEnd the milestone index representing the end of our search range @@ -338,11 +345,14 @@ private MilestoneViewModel getMilestoneInMiddleOfRange(int rangeStart, int range } /** - * Checks if the milestone was applied to the ledger at some point in the past (before a restart of IRI).
    - *
    + *

    + * Checks if the milestone was applied to the ledger at some point in the past (before a restart of IRI). + *

    + *

    * Since the {@code snapshotIndex} value is used as a flag to determine if the milestone was already applied to the * ledger, we can use it to determine if it was processed by IRI in the past. If this value is set we should also - * have a corresponding {@link StateDiff} entry in the database.
    + * have a corresponding {@link StateDiff} entry in the database. + *

    * * @param milestone the milestone that shall be checked * @return {@code true} if the milestone has been processed by IRI before and {@code false} otherwise @@ -356,7 +366,7 @@ private boolean wasMilestoneAppliedToLedger(MilestoneViewModel milestone) throws /** * This method implements the logic described by {@link #updateMilestoneIndexOfMilestoneTransactions(Hash, int)} but - * accepts some additional parameters that allow it to be reused by different parts of this service.
    + * accepts some additional parameters that allow it to be reused by different parts of this service. * * @param milestoneHash the hash of the transaction * @param correctIndex the milestone index of the milestone that would be set if all transactions are marked @@ -408,11 +418,14 @@ private void updateMilestoneIndexOfMilestoneTransactions(Hash milestoneHash, int } /** - * This method resets the {@code milestoneIndex} of a single transaction.
    - *
    + *

    + * This method resets the {@code milestoneIndex} of a single transaction. + *

    + *

    * In addition to setting the corresponding value, we also publish a message to the ZeroMQ message provider, which - * allows external recipients to get informed about this change.
    - * + * allows external recipients to get informed about this change. + *

    + * * @param transaction the transaction that shall have its {@code milestoneIndex} reset * @param index the milestone index that is set for the given transaction * @throws MilestoneException if anything unexpected happens while updating the transaction @@ -426,22 +439,26 @@ private void updateMilestoneIndexOfSingleTransaction(TransactionViewModel transa throw new MilestoneException("error while updating the snapshotIndex of " + transaction, e); } - messageQ.publish("%s %s %d sn", transaction.getAddressHash(), transaction.getHash(), index); - messageQ.publish("sn %d %s %s %s %s %s", index, transaction.getHash(), transaction.getAddressHash(), + tangle.publish("%s %s %d sn", transaction.getAddressHash(), transaction.getHash(), index); + tangle.publish("sn %d %s %s %s %s %s", index, transaction.getHash(), transaction.getAddressHash(), transaction.getTrunkTransactionHash(), transaction.getBranchTransactionHash(), transaction.getBundleHash()); } /** + *

    * This method prepares the update of the milestone index by checking the current {@code snapshotIndex} of the given - * transaction.
    - *
    + * transaction. + *

    + *

    * If the {@code snapshotIndex} is higher than the "correct one", we know that we applied the milestones in the * wrong order and need to reset the corresponding milestone that wrongly approved this transaction. We therefore - * add its index to the {@code corruptMilestones} set.
    - *
    + * add its index to the {@code corruptMilestones} set. + *

    + *

    * If the milestone does not have the new value set already we add it to the set of {@code transactionsToUpdate} so - * it can be updated by the caller accordingly.
    + * it can be updated by the caller accordingly. + *

    * * @param transaction the transaction that shall get its milestoneIndex updated * @param correctMilestoneIndex the milestone index that this transaction should be associated to (the index of the @@ -463,11 +480,14 @@ private void prepareMilestoneIndexUpdate(TransactionViewModel transaction, int c } /** - * This method patches the solid entry points if a back-referencing transaction is detected.
    - *
    + *

    + * This method patches the solid entry points if a back-referencing transaction is detected. + *

    + *

    * While we iterate through the approvees of a milestone we stop as soon as we arrive at a transaction that has a * smaller {@code snapshotIndex} than the milestone. If this {@code snapshotIndex} is also smaller than the index of - * the milestone of our local snapshot, we have detected a back-referencing transaction.
    + * the milestone of our local snapshot, we have detected a back-referencing transaction. + *

    * * @param initialSnapshot the initial snapshot holding the solid entry points * @param transaction the transactions that was referenced by the processed milestone @@ -481,12 +501,15 @@ private void patchSolidEntryPointsIfNecessary(Snapshot initialSnapshot, Transact } /** + *

    * This method is a utility method that checks if the transactions belonging to the potential milestone bundle have - * a valid structure (used during the validation of milestones).
    - *
    + * a valid structure (used during the validation of milestones). + *

    + *

    * It first checks if the bundle has enough transactions to conform to the given {@code securityLevel} and then * verifies that the {@code branchTransactionsHash}es are pointing to the {@code trunkTransactionHash} of the head - * transactions.
    + * transactions. + *

    * * @param bundleTransactions all transactions belonging to the milestone * @param securityLevel the security level used for the signature @@ -505,17 +528,21 @@ private boolean isMilestoneBundleStructureValid(List bundl } /** + *

    * This method does the same as {@link #resetCorruptedMilestone(int)} but additionally receives a set of {@code * processedTransactions} that will allow us to not process the same transactions over and over again while - * resetting additional milestones in recursive calls.
    - *
    + * resetting additional milestones in recursive calls. + *

    + *

    * It first checks if the desired {@code milestoneIndex} is reachable by this node and then triggers the reset - * by:
    - *
    - * 1. resetting the ledger state if it addresses a milestone before the current latest solid milestone
    - * 2. resetting the {@code milestoneIndex} of all transactions that were confirmed by the current milestone
    - * 3. deleting the corresponding {@link StateDiff} entry from the database
    - * + * by: + *

    + *
      + *
    1. resetting the ledger state if it addresses a milestone before the current latest solid milestone
    2. + *
    3. resetting the {@code milestoneIndex} of all transactions that were confirmed by the current milestone
    4. + *
    5. deleting the corresponding {@link StateDiff} entry from the database
    6. + *
    + * * @param index milestone index that shall be reverted * @param processedTransactions a set of transactions that have been processed already * @throws MilestoneException if anything goes wrong while resetting the corrupted milestone diff --git a/src/main/java/com/iota/iri/service/milestone/impl/MilestoneSolidifierImpl.java b/src/main/java/com/iota/iri/service/milestone/impl/MilestoneSolidifierImpl.java index 377ae6b0d9..c6fbf182b6 100644 --- a/src/main/java/com/iota/iri/service/milestone/impl/MilestoneSolidifierImpl.java +++ b/src/main/java/com/iota/iri/service/milestone/impl/MilestoneSolidifierImpl.java @@ -13,93 +13,109 @@ import java.util.concurrent.TimeUnit; /** - * This class implements the basic contract of the {@link MilestoneSolidifier} interface.
    - *
    + *

    + * This class implements the basic contract of the {@link MilestoneSolidifier} interface. + *

    + *

    * It manages a map of unsolid milestones to collect all milestones that have to be solidified. It then periodically - * issues checkSolidity calls on the earliest milestones to solidify them.
    - *
    + * issues checkSolidity calls on the earliest milestones to solidify them. + *

    + *

    * To save resources and make the call a little bit more efficient, we cache the earliest milestones in a separate map, * so the relatively expensive task of having to search for the next earliest milestone in the pool only has to be - * performed after a milestone has become solid or irrelevant for our node.
    + * performed after a milestone has become solid or irrelevant for our node. + *

    */ public class MilestoneSolidifierImpl implements MilestoneSolidifier { /** - * Defines the amount of milestones that we "simultaneously" try to solidify in one pass.
    + * Defines the amount of milestones that we "simultaneously" try to solidify in one pass. */ private static final int SOLIDIFICATION_QUEUE_SIZE = 10; /** - * Defines the interval in which solidity checks are issued (in milliseconds).
    + * Defines the interval in which solidity checks are issued (in milliseconds). */ private static final int SOLIDIFICATION_INTERVAL = 100; /** + *

    * Defines the maximum amount of transactions that are allowed to get processed while trying to solidify a - * milestone.
    - *
    + * milestone. + *

    + *

    * Note: We want to find the next previous milestone and not get stuck somewhere at the end of the tangle with a - * long running {@link TransactionValidator#checkSolidity(Hash, boolean)} call.
    + * long running {@link TransactionValidator#checkSolidity(Hash, boolean)} call. + *

    */ private static final int SOLIDIFICATION_TRANSACTIONS_LIMIT = 50000; /** - * Logger for this class allowing us to dump debug and status messages.
    + * Logger for this class allowing us to dump debug and status messages. */ private static final IntervalLogger log = new IntervalLogger(MilestoneSolidifier.class); /** - * Holds the snapshot provider which gives us access to the relevant snapshots.
    + * Holds the snapshot provider which gives us access to the relevant snapshots. */ private SnapshotProvider snapshotProvider; /** - * Holds a reference to the TransactionValidator which allows us to issue solidity checks.
    + * Holds a reference to the TransactionValidator which allows us to issue solidity checks. */ private TransactionValidator transactionValidator; /** - * Holds a reference to the manager of the background worker.
    + * Holds a reference to the manager of the background worker. */ private final SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService( "Milestone Solidifier", log.delegate()); /** - * Holds the milestones that were newly added, but not examined yet.
    - *
    + *

    + * Holds the milestones that were newly added, but not examined yet. + *

    + *

    * Note: This is used to be able to add milestones to the solidifier without having to synchronize the access to the - * underlying Maps.
    + * underlying Maps. + *

    */ private final Map newlyAddedMilestones = new ConcurrentHashMap<>(); /** - * Holds all unsolid milestones that shall be solidified (the transaction hash mapped to its milestone index).
    + * Holds all unsolid milestones that shall be solidified (the transaction hash mapped to its milestone index). */ private final Map unsolidMilestonesPool = new ConcurrentHashMap<>(); /** * Holds the milestones that are actively trying to be solidified by the background {@link Thread} (acts as a - * Queue).
    + * Queue). */ private final Map milestonesToSolidify = new HashMap<>(); /** - * Holds and entry that represents the youngest milestone in the {@link #milestonesToSolidify} Map.
    - *
    + *

    + * Holds and entry that represents the youngest milestone in the {@link #milestonesToSolidify} Map. + *

    + *

    * Note: It is used to check if new milestones that are being added, are older that the currently processed ones and - * should replace them in the queue (we solidify from oldest to youngest).
    + * should replace them in the queue (we solidify from oldest to youngest). + *

    */ private Map.Entry youngestMilestoneInQueue = null; /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It stores the passed in values in their corresponding private properties. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code milestoneSolidifier = new MilestoneSolidifierImpl().init(...);} * * @param snapshotProvider snapshot provider which gives us access to the relevant snapshots @@ -115,10 +131,12 @@ public MilestoneSolidifierImpl init(SnapshotProvider snapshotProvider, Transacti /** * {@inheritDoc} - *
    + * + *

    * Since this method might be called from a performance critical context, we simply add the milestone to a temporary * pool, that gets examined later by the background process. This doesn't just speed up the addition of new jobs but - * also prevents us from having to synchronize the access to the underlying maps.
    + * also prevents us from having to synchronize the access to the underlying maps. + *

    */ @Override public void add(Hash milestoneHash, int milestoneIndex) { @@ -141,14 +159,17 @@ public void shutdown() { } /** + *

    * This method takes an entry from the {@link #unsolidMilestonesPool} and adds it to the - * {@link #milestonesToSolidify} queue.
    - *
    + * {@link #milestonesToSolidify} queue. + *

    + *

    * It first checks if the given milestone is already part of the queue and then tries to add it. If the queue is not * full yet, the addition to the queue is relatively cheap, because the {@link #youngestMilestoneInQueue} marker can * be updated without iterating over all entries. If the queue reached its capacity already, we replace the entry * marked by the {@link #youngestMilestoneInQueue} marker and update the marker by recalculating it using - * {@link #determineYoungestMilestoneInQueue()}.
    + * {@link #determineYoungestMilestoneInQueue()}. + *

    * * @param milestoneEntry entry from the {@link #unsolidMilestonesPool} that shall get added to the queue */ @@ -172,11 +193,14 @@ private void addToSolidificationQueue(Map.Entry milestoneEntry) { } /** + *

    * This method contains the logic for the milestone solidification, that gets executed in a separate - * {@link Thread}.
    - *
    - * It simply executes the necessary steps periodically while waiting a short time to give the nodes the ability to - * answer to the issued transaction requests.
    + * {@link Thread}. + *

    + *

    + * It executes the necessary steps periodically while waiting a short time to give the nodes the ability to + * answer to the issued transaction requests. + *

    */ private void milestoneSolidificationThread() { processNewlyAddedMilestones(); @@ -185,13 +209,17 @@ private void milestoneSolidificationThread() { } /** - * This method processes the newly added milestones.
    - *
    + *

    + * This method processes the newly added milestones. + *

    + *

    * We process them lazy to decrease the synchronization requirements and speed up the addition of milestones from - * outside {@link Thread}s.
    - *
    - * It simply iterates over the milestones and adds them to the pool. If they are older than the - * {@link #youngestMilestoneInQueue}, we add the to the solidification queue.
    + * outside {@link Thread}s. + *

    + *

    + * It iterates over the milestones and adds them to the pool. If they are older than the + * {@link #youngestMilestoneInQueue}, we add the to the solidification queue. + *

    */ private void processNewlyAddedMilestones() { for (Iterator> iterator = newlyAddedMilestones.entrySet().iterator(); @@ -210,11 +238,14 @@ private void processNewlyAddedMilestones() { } /** - * This method contains the logic for processing the {@link #milestonesToSolidify}.
    - *
    + *

    + * This method contains the logic for processing the {@link #milestonesToSolidify}. + *

    + *

    * It iterates through the queue and checks if the corresponding milestones are still relevant for our node, or if * they could be successfully solidified. If the milestones become solid or irrelevant, we remove them from the - * pool and the queue and reset the {@link #youngestMilestoneInQueue} marker (if necessary).
    + * pool and the queue and reset the {@link #youngestMilestoneInQueue} marker (if necessary). + *

    */ private void processSolidificationQueue() { for (Iterator> iterator = milestonesToSolidify.entrySet().iterator(); @@ -236,11 +267,14 @@ private void processSolidificationQueue() { } /** + *

    * This method takes care of adding new milestones from the pool to the solidification queue, and filling it up - * again after it was processed / emptied before.
    - *
    + * again after it was processed / emptied before. + *

    + *

    * It first updates the {@link #youngestMilestoneInQueue} marker and then just adds new milestones as long as there - * is still space in the {@link #milestonesToSolidify} queue.
    + * is still space in the {@link #milestonesToSolidify} queue. + *

    */ private void refillSolidificationQueue() { if(youngestMilestoneInQueue == null && !milestonesToSolidify.isEmpty()) { @@ -256,10 +290,13 @@ private void refillSolidificationQueue() { } /** - * This method determines the youngest milestone in the solidification queue.
    - *
    + *

    + * This method determines the youngest milestone in the solidification queue. + *

    + *

    * It iterates over all milestones in the Queue and keeps track of the youngest one found (the one with the highest - * milestone index).
    + * milestone index). + *

    */ private void determineYoungestMilestoneInQueue() { youngestMilestoneInQueue = null; @@ -271,11 +308,14 @@ private void determineYoungestMilestoneInQueue() { } /** + *

    * This method returns the earliest seen Milestone from the unsolid milestones pool, that is not part of the - * {@link #milestonesToSolidify} queue yet.
    - *
    + * {@link #milestonesToSolidify} queue yet. + *

    + *

    * It simply iterates over all milestones in the pool and looks for the one with the lowest index, that is not - * getting actively solidified, yet.
    + * getting actively solidified, yet. + *

    * * @return the Map.Entry holding the earliest milestone or null if the pool does not contain any new candidates. */ @@ -293,16 +333,20 @@ private Map.Entry getNextSolidificationCandidate() { } /** - * This method performs the actual solidity check on the selected milestone.
    - *
    + *

    + * This method performs the actual solidity check on the selected milestone. + *

    + *

    * It first dumps a log message to keep the node operator informed about the progress of solidification, and then * issues the {@link TransactionValidator#checkSolidity(Hash, boolean, int)} call that starts the solidification - * process.
    - *
    + * process. + *

    + *

    * We limit the amount of transactions that may be processed during the solidity check, since we want to solidify * from the oldest milestone to the newest one and not "block" the solidification with a very recent milestone that * needs to traverse huge chunks of the tangle. The main goal of this is to give the solidification just enough - * "resources" to discover the previous milestone while at the same time allowing fast solidity checks.
    + * "resources" to discover the previous milestone while at the same time allowing fast solidity checks. + *

    * * @param currentEntry milestone entry that shall be checked * @return true if the given milestone is solid or false otherwise diff --git a/src/main/java/com/iota/iri/service/milestone/impl/SeenMilestonesRetrieverImpl.java b/src/main/java/com/iota/iri/service/milestone/impl/SeenMilestonesRetrieverImpl.java index b48b26e5ac..31fd66fafe 100644 --- a/src/main/java/com/iota/iri/service/milestone/impl/SeenMilestonesRetrieverImpl.java +++ b/src/main/java/com/iota/iri/service/milestone/impl/SeenMilestonesRetrieverImpl.java @@ -15,69 +15,76 @@ import java.util.concurrent.TimeUnit; /** - * Creates a manager that proactively requests the missing "seen milestones" (defined in the local snapshot file).
    - *
    - * It simply stores the passed in dependencies in their corresponding properties and then makes a copy of the {@code - * seenMilestones} of the initial snapshot which will consequently be requested.
    - *
    + *

    + * Creates a manager that proactively requests the missing "seen milestones" (defined in the local snapshot file). + *

    + *

    + * It stores the passed in dependencies in their corresponding properties and then makes a copy of the {@code + * seenMilestones} of the initial snapshot which will consequently be requested. + *

    + *

    * Once the manager finishes to request all "seen milestones" it will automatically {@link #shutdown()} (when being - * {@link #start()}ed before).
    + * {@link #start()}ed before). + *

    */ public class SeenMilestonesRetrieverImpl implements SeenMilestonesRetriever { /** - * Defines how far ahead of the latest solid milestone we are requesting the missing milestones.
    + * Defines how far ahead of the latest solid milestone we are requesting the missing milestones. */ private static final int RETRIEVE_RANGE = 50; /** * Defines the interval (in milliseconds) in which the background worker will check for new milestones to - * request.
    + * request. */ private static final int RESCAN_INTERVAL = 1000; /** - * Holds the logger of this class (a rate limited logger than doesn't spam the CLI output).
    + * Holds the logger of this class (a rate limited logger than doesn't spam the CLI output). */ private static final IntervalLogger log = new IntervalLogger(SeenMilestonesRetrieverImpl.class); /** - * Tangle object which acts as a database interface.
    + * Tangle object which acts as a database interface. */ private Tangle tangle; /** - * The snapshot provider which gives us access to the relevant snapshots to calculate our range.
    + * The snapshot provider which gives us access to the relevant snapshots to calculate our range. */ private SnapshotProvider snapshotProvider; /** * Holds a reference to the {@link TransactionRequester} that allows us to issue requests for the missing - * milestones.
    + * milestones. */ private TransactionRequester transactionRequester; /** - * Holds a reference to the manager of the background worker.
    + * Holds a reference to the manager of the background worker. */ private final SilentScheduledExecutorService executorService = new DedicatedScheduledExecutorService( "Seen Milestones Retriever", log.delegate()); /** - * The list of seen milestones that need to be requested.
    + * The list of seen milestones that need to be requested. */ private Map seenMilestones; /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties and creates a working copy of the - * seen milestones (which will get processed by the background worker).
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It stores the passed in values in their corresponding private properties and creates a working copy of the + * seen milestones (which will get processed by the background worker). + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code seenMilestonesRetriever = new SeenMilestonesRetrieverImpl().init(...);} * * @param tangle Tangle object which acts as a database interface @@ -99,16 +106,20 @@ public SeenMilestonesRetrieverImpl init(Tangle tangle, SnapshotProvider snapshot /** * {@inheritDoc} - *
    + * + *

    * It simply iterates over the set of seenMilestones and requests them if they are in the range of * [genesisMilestone ... latestSolidMilestone + RETRIEVE_RANGE]. Milestones that are older than this range get * deleted because they are irrelevant for the ledger state and milestones that are younger than this range get - * ignored to be processed later.
    - *
    + * ignored to be processed later. + *

    + *

    * This gives the node enough resources to solidify the next milestones without getting its requests queue filled - * with milestone requests that will become relevant only much later (this achieves a linear sync speed).
    - *
    - * Note: If no more seen milestones have to be requested, this manager shuts down automatically.
    + * with milestone requests that will become relevant only much later (this achieves a linear sync speed). + *

    + *

    + * Note: If no more seen milestones have to be requested, this manager shuts down automatically. + *

    */ @Override public void retrieveSeenMilestones() { diff --git a/src/main/java/com/iota/iri/service/restserver/ApiProcessor.java b/src/main/java/com/iota/iri/service/restserver/ApiProcessor.java new file mode 100644 index 0000000000..2a7131f475 --- /dev/null +++ b/src/main/java/com/iota/iri/service/restserver/ApiProcessor.java @@ -0,0 +1,23 @@ +package com.iota.iri.service.restserver; + +import java.net.InetAddress; + +import com.iota.iri.service.dto.AbstractResponse; + +/** + * + * Interface that defines the API call handling + * + */ +@FunctionalInterface +public interface ApiProcessor { + + /** + * Processes the request according to the + * + * @param request the request body, unprocessed + * @param inetAddress the address from the API caller + * @return The response for this request + */ + AbstractResponse processFunction(String request, InetAddress inetAddress); +} diff --git a/src/main/java/com/iota/iri/service/restserver/RestConnector.java b/src/main/java/com/iota/iri/service/restserver/RestConnector.java new file mode 100644 index 0000000000..815b6e10e6 --- /dev/null +++ b/src/main/java/com/iota/iri/service/restserver/RestConnector.java @@ -0,0 +1,27 @@ +package com.iota.iri.service.restserver; + +/** + * + * Connector interface which contains logic for starting and stopping a REST server + * + */ +public interface RestConnector { + + /** + * Initializes the REST server. + * + * @param processFunction the function/class we call after dependency specific handling + */ + void init(ApiProcessor processFunction); + + /** + * Starts the server. + * If {@link #init(ApiProcessor)} has not been called, nothing happens + */ + void start(); + + /** + * Stops the REST server, so no more API calls can be made + */ + void stop(); +} diff --git a/src/main/java/com/iota/iri/service/restserver/resteasy/RestEasy.java b/src/main/java/com/iota/iri/service/restserver/resteasy/RestEasy.java new file mode 100644 index 0000000000..e1b30e67e1 --- /dev/null +++ b/src/main/java/com/iota/iri/service/restserver/resteasy/RestEasy.java @@ -0,0 +1,315 @@ +package com.iota.iri.service.restserver.resteasy; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.ws.rs.ApplicationPath; +import javax.ws.rs.core.Application; +import org.jboss.resteasy.plugins.server.undertow.UndertowJaxrsServer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.xnio.channels.StreamSinkChannel; +import org.xnio.streams.ChannelInputStream; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.iota.iri.Iota; +import com.iota.iri.conf.APIConfig; +import com.iota.iri.service.dto.AbstractResponse; +import com.iota.iri.service.dto.AccessLimitedResponse; +import com.iota.iri.service.dto.ErrorResponse; +import com.iota.iri.service.dto.ExceptionResponse; +import com.iota.iri.service.restserver.ApiProcessor; +import com.iota.iri.service.restserver.RestConnector; +import com.iota.iri.utils.IotaIOUtils; +import com.iota.iri.utils.MapIdentityManager; + +import io.undertow.Handlers; +import io.undertow.Undertow; +import io.undertow.security.api.AuthenticationMechanism; +import io.undertow.security.api.AuthenticationMode; +import io.undertow.security.handlers.AuthenticationCallHandler; +import io.undertow.security.handlers.AuthenticationConstraintHandler; +import io.undertow.security.handlers.AuthenticationMechanismsHandler; +import io.undertow.security.handlers.SecurityInitialHandler; +import io.undertow.security.idm.IdentityManager; +import io.undertow.security.impl.BasicAuthenticationMechanism; +import io.undertow.server.HandlerWrapper; +import io.undertow.server.HttpHandler; +import io.undertow.server.HttpServerExchange; +import io.undertow.servlet.api.DeploymentInfo; +import io.undertow.util.HeaderMap; +import io.undertow.util.Headers; +import io.undertow.util.HttpString; +import io.undertow.util.Methods; +import io.undertow.util.MimeMappings; +import io.undertow.util.StatusCodes; + +/** + * + * Rest connector based on RestEasy, which uses Jaxrs server under the hood. + * This will not actually have any REST endpoints, but rather handle all incoming connections from one point. + * + */ +@ApplicationPath("") +public class RestEasy extends Application implements RestConnector { + + private static final Logger log = LoggerFactory.getLogger(RestEasy.class); + + private final Gson gson = new GsonBuilder().create(); + + private UndertowJaxrsServer server; + + private DeploymentInfo info; + + private ApiProcessor processFunction; + + private int maxBodyLength; + + private String remoteAuth; + + private String apiHost; + + private int port; + + /** + * Required for every {@link Application} + * Will be instantiated once and is supposed to provide REST api classes + * Do not call manually + * + * We handle all calls manually without ever using the {@link Application} functionality using a + * {@link DeploymentInfo#addInnerHandlerChainWrapper} + */ + public RestEasy() { + + } + + /** + * + * @param configuration + */ + public RestEasy(APIConfig configuration) { + maxBodyLength = configuration.getMaxBodyLength(); + port = configuration.getPort(); + apiHost = configuration.getApiHost(); + + remoteAuth = configuration.getRemoteAuth(); + } + + /** + * Prepares the IOTA API for usage. Until this method is called, no HTTP requests can be made. + * The order of loading is as follows + *
      + *
    1. + * Read the spend addresses from the previous epoch. Used in {@link #wasAddressSpentFrom(Hash)}. + * This only happens if {@link APIConfig#isTestnet()} is false + * If reading from the previous epoch fails, a log is printed. The API will continue to initialize. + *
    2. + *
    3. + * Get the {@link APIConfig} from the {@link Iota} instance, + * and read {@link APIConfig#getPort()} and {@link APIConfig#getApiHost()} + *
    4. + *
    5. + * Builds a secure {@link Undertow} server with the port and host. + * If {@link APIConfig#getRemoteAuth()} is defined, remote authentication is blocked for anyone except + * those defined in {@link APIConfig#getRemoteAuth()} or localhost. + * This is done with {@link BasicAuthenticationMechanism} in a {@link AuthenticationMode#PRO_ACTIVE} mode. + * By default, this authentication is disabled. + *
    6. + *
    7. + * Starts the server, opening it for HTTP API requests + *
    8. + *
    + */ + @Override + public void init(ApiProcessor processFunction) { + log.debug("Binding JSON-REST API Undertow server on {}:{}", apiHost, port); + this.processFunction = processFunction; + + server = new UndertowJaxrsServer(); + + info = server.undertowDeployment(RestEasy.class); + info.setDisplayName("Iota Realm"); + info.setDeploymentName("Iota Realm"); + info.setContextPath("/"); + + info.addSecurityWrapper(new HandlerWrapper() { + @Override + public HttpHandler wrap(HttpHandler toWrap) { + String credentials = remoteAuth; + if (credentials == null || credentials.isEmpty()) { + return toWrap; + } + + final Map users = new HashMap<>(2); + users.put(credentials.split(":")[0], credentials.split(":")[1].toCharArray()); + + IdentityManager identityManager = new MapIdentityManager(users); + HttpHandler handler = toWrap; + handler = new AuthenticationCallHandler(handler); + handler = new AuthenticationConstraintHandler(handler); + final List mechanisms = + Collections.singletonList(new BasicAuthenticationMechanism("Iota Realm")); + + handler = new AuthenticationMechanismsHandler(handler, mechanisms); + handler = new SecurityInitialHandler(AuthenticationMode.PRO_ACTIVE, identityManager, handler); + return handler; + } + }); + + info.addInnerHandlerChainWrapper(handler -> { + return Handlers.path().addPrefixPath("/", new HttpHandler() { + @Override + public void handleRequest(final HttpServerExchange exchange) throws Exception { + HttpString requestMethod = exchange.getRequestMethod(); + if (Methods.OPTIONS.equals(requestMethod)) { + String allowedMethods = "GET,HEAD,POST,PUT,DELETE,TRACE,OPTIONS,CONNECT,PATCH"; + //return list of allowed methods in response headers + exchange.setStatusCode(StatusCodes.OK); + exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, MimeMappings.DEFAULT_MIME_MAPPINGS.get("txt")); + exchange.getResponseHeaders().put(Headers.CONTENT_LENGTH, 0); + exchange.getResponseHeaders().put(Headers.ALLOW, allowedMethods); + exchange.getResponseHeaders().put(new HttpString("Access-Control-Allow-Origin"), "*"); + exchange.getResponseHeaders().put(new HttpString("Access-Control-Allow-Headers"), "User-Agent, Origin, X-Requested-With, Content-Type, Accept, X-IOTA-API-Version"); + exchange.getResponseSender().close(); + return; + } + + if (exchange.isInIoThread()) { + exchange.dispatch(this); + return; + } + processRequest(exchange); + } + }); + }); + } + + /** + * {@inheritDoc} + */ + @Override + public void start() { + if (info != null) { + Undertow.Builder builder = Undertow.builder() + .addHttpListener(port, apiHost); + server.start(builder); + server.deploy(info); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void stop() { + server.stop(); + } + + /** + * Sends the API response back as JSON to the requester. + * Status code of the HTTP request is also set according to the type of response. + *
      + *
    • {@link ErrorResponse}: 400
    • + *
    • {@link AccessLimitedRprocessRequestesponse}: 401
    • + *
    • {@link ExceptionResponse}: 500
    • + *
    • Default: 200
    • + *
    + * + * @param exchange Contains information about what the client sent to us + * @param res The response of the API. + * See {@link #processRequest(HttpServerExchange)} + * and {@link #process(String, InetSocketAddress)} for the different responses in each case. + * @param beginningTime The time when we received the request, in milliseconds. + * This will be used to set the response duration in {@link AbstractResponse#setDuration(Integer)} + * @throws IOException When connection to client has been lost - Currently being caught. + */ + private void sendResponse(HttpServerExchange exchange, AbstractResponse res, long beginningTime) throws IOException { + res.setDuration((int) (System.currentTimeMillis() - beginningTime)); + final String response = gson.toJson(res); + + if (res instanceof ErrorResponse) { + // bad request or invalid parameters + exchange.setStatusCode(400); + } else if (res instanceof AccessLimitedResponse) { + // API method not allowed + exchange.setStatusCode(401); + } else if (res instanceof ExceptionResponse) { + // internal error + exchange.setStatusCode(500); + } + + setupResponseHeaders(exchange); + + ByteBuffer responseBuf = ByteBuffer.wrap(response.getBytes(StandardCharsets.UTF_8)); + exchange.setResponseContentLength(responseBuf.array().length); + StreamSinkChannel sinkChannel = exchange.getResponseChannel(); + sinkChannel.getWriteSetter().set( channel -> { + if (responseBuf.remaining() > 0) { + try { + sinkChannel.write(responseBuf); + if (responseBuf.remaining() == 0) { + exchange.endExchange(); + } + } catch (IOException e) { + log.error("Lost connection to client - cannot send response"); + exchange.endExchange(); + sinkChannel.getWriteSetter().set(null); + } + } + else { + exchange.endExchange(); + } + }); + sinkChannel.resumeWrites(); + } + + /** + *

    + * Processes an API HTTP request. + * No checks have been done until now, except that it is not an OPTIONS request. + * We can be sure that we are in a thread that allows blocking. + *

    + *

    + * The request process duration is recorded. + * During this the request gets verified. If it is incorrect, an {@link ErrorResponse} is made. + * Otherwise it is processed in {@link #process(String, InetSocketAddress)}. + * The result is sent back to the requester. + *

    + * + * @param exchange Contains the data the client sent to us + * @throws IOException If the body of this HTTP request cannot be read + */ + private void processRequest(final HttpServerExchange exchange) throws IOException { + final ChannelInputStream cis = new ChannelInputStream(exchange.getRequestChannel()); + exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, "application/json"); + + final long beginningTime = System.currentTimeMillis(); + final String body = IotaIOUtils.toString(cis, StandardCharsets.UTF_8); + AbstractResponse response; + + if (!exchange.getRequestHeaders().contains("X-IOTA-API-Version")) { + response = ErrorResponse.create("Invalid API Version"); + } else if (body.length() > maxBodyLength) { + response = ErrorResponse.create("Request too long"); + } else { + response = this.processFunction.processFunction(body, exchange.getSourceAddress().getAddress()); + } + + sendResponse(exchange, response, beginningTime); + } + + /** + * Updates the {@link HttpServerExchange} {@link HeaderMap} with the proper response settings. + * @param exchange Contains information about what the client has send to us + */ + private static void setupResponseHeaders(HttpServerExchange exchange) { + final HeaderMap headerMap = exchange.getResponseHeaders(); + headerMap.add(new HttpString("Access-Control-Allow-Origin"),"*"); + headerMap.add(new HttpString("Keep-Alive"), "timeout=500, max=100"); + } +} diff --git a/src/main/java/com/iota/iri/service/snapshot/Snapshot.java b/src/main/java/com/iota/iri/service/snapshot/Snapshot.java index 9972117f54..d76754dbcf 100644 --- a/src/main/java/com/iota/iri/service/snapshot/Snapshot.java +++ b/src/main/java/com/iota/iri/service/snapshot/Snapshot.java @@ -1,15 +1,19 @@ package com.iota.iri.service.snapshot; /** + *

    * Represents a complete Snapshot of the ledger state. - * + *

    + *

    * The Snapshot is a container for the {@link SnapshotMetaData} and the {@link SnapshotState} and therefore fulfills * both of these contracts while offering some additional utility methods to manipulate them. - * + *

    + *

    * Important: Since we are only dealing with Snapshots outside of the snapshot package (the underlying meta data and * state objects are not exposed via getters) this class takes care of making the exposed methods thread * safe. The logic of the underlying objects is not thread-safe (for performance and simplicity reasons) but * we don't need to worry about this since we do not have access to them. + *

    */ public interface Snapshot extends SnapshotMetaData, SnapshotState { /** @@ -43,8 +47,10 @@ public interface Snapshot extends SnapshotMetaData, SnapshotState { /** * This methods allows us to keep track when we skip a milestone when applying changes. * + *

    * Since we can only rollback changes if we know which milestones have lead to the current state, we need to keep * track of the milestones that were previously skipped. + *

    * * @param skippedMilestoneIndex index of the milestone that was skipped while applying the ledger state * @return true if the index was added and false if it was already part of the set @@ -54,8 +60,10 @@ public interface Snapshot extends SnapshotMetaData, SnapshotState { /** * This methods allows us to remove a milestone index from the internal list of skipped milestone indexes. * + *

    * Since we can only rollback changes if we know which milestones have lead to the current state, we need to keep * track of the milestones that were previously skipped. + *

    * * @param skippedMilestoneIndex index of the milestone that was skipped while applying the ledger state * @return true if the skipped milestone was removed from the internal list and false if it was not present @@ -65,19 +73,24 @@ public interface Snapshot extends SnapshotMetaData, SnapshotState { /** * Replaces the values of this instance with the values of another snapshot object. * + *

    * This can for example be used to "reset" the snapshot after a failed modification attempt (while being able to * keep the same instance). + *

    * * @param snapshot the new snapshot details that shall overwrite the current ones */ void update(Snapshot snapshot); /** + *

    * Creates a deep clone of the Snapshot which can be modified without affecting the values of the original - * one.
    - *
    + * one. + *

    + *

    * Since the data structures inside the Snapshot are extremely big, this method is relatively expensive and should - * only be used when it is really necessary.
    + * only be used when it is really necessary. + *

    * * @return a deep clone of the snapshot */ diff --git a/src/main/java/com/iota/iri/service/snapshot/SnapshotService.java b/src/main/java/com/iota/iri/service/snapshot/SnapshotService.java index 5bb3d56aa2..cdd7592c2a 100644 --- a/src/main/java/com/iota/iri/service/snapshot/SnapshotService.java +++ b/src/main/java/com/iota/iri/service/snapshot/SnapshotService.java @@ -1,32 +1,35 @@ package com.iota.iri.service.snapshot; -import com.iota.iri.conf.SnapshotConfig; import com.iota.iri.controllers.MilestoneViewModel; import com.iota.iri.model.Hash; import com.iota.iri.service.milestone.LatestMilestoneTracker; import com.iota.iri.service.transactionpruning.TransactionPruner; -import com.iota.iri.storage.Tangle; import java.util.Map; /** + *

    * Represents the service for snapshots that contains the relevant business logic for modifying {@link Snapshot}s and * generating new local {@link Snapshot}s. - * + *

    * This class is stateless and does not hold any domain specific models. */ public interface SnapshotService { /** - * This method applies the balance changes that are introduced by future milestones to the current Snapshot.
    - *
    + *

    + * This method applies the balance changes that are introduced by future milestones to the current Snapshot. + *

    + *

    * It iterates over the milestone indexes starting from the current index to the target index and applies all found * milestone balances. If it can not find a milestone for a certain index it keeps track of these skipped * milestones, which allows us to revert the changes even if the missing milestone was received and processed in the * mean time. If the application of changes fails, we restore the state of the snapshot to the one it had before the - * application attempt so this method only modifies the Snapshot if it succeeds.
    - *
    - * Note: the changes done by this method can be reverted by using {@link #rollBackMilestones(Snapshot, int)}
    - * + * application attempt so this method only modifies the Snapshot if it succeeds. + *

    + *

    + * Note: the changes done by this method can be reverted by using {@link #rollBackMilestones(Snapshot, int)}. + *

    + * * @param snapshot the Snapshot that shall get modified * @param targetMilestoneIndex the index of the milestone that should be applied * @throws SnapshotException if something goes wrong while applying the changes @@ -34,16 +37,19 @@ public interface SnapshotService { void replayMilestones(Snapshot snapshot, int targetMilestoneIndex) throws SnapshotException; /** + *

    * This method rolls back the latest milestones until it reaches the state that the snapshot had before applying * the milestone indicated by the given parameter. - * + *

    + *

    * When rolling back the milestones we take previously skipped milestones into account, so this method should give * the correct result, even if the missing milestones were received and processed in the mean time. If the rollback * fails, we restore the state of the snapshot to the one it had before the rollback attempt so this method only * modifies the Snapshot if it succeeds. - * + *

    + *

    * Note: this method is used to reverse the changes introduced by {@link #replayMilestones(Snapshot, int)} - * + *

    * @param snapshot the Snapshot that shall get modified * @param targetMilestoneIndex the index of the milestone that should be rolled back (including all following * milestones that were applied) @@ -52,15 +58,19 @@ public interface SnapshotService { void rollBackMilestones(Snapshot snapshot, int targetMilestoneIndex) throws SnapshotException; /** + *

    * This method takes a "full" local snapshot according to the configuration of the node. - * + *

    + *

    * It first determines the necessary configuration parameters and which milestone to us as a reference. It then * generates the local {@link Snapshot}, issues the the required {@link TransactionPruner} jobs and writes the * resulting {@link Snapshot} to the disk. - * + *

    + *

    * After persisting the local snapshot on the hard disk of the node, it updates the {@link Snapshot} instances used * by the {@code snapshotProvider} to reflect the newly created {@link Snapshot}. - * + *

    + * * @param latestMilestoneTracker milestone tracker that allows us to retrieve information about the known milestones * @param transactionPruner manager for the pruning jobs that takes care of cleaning up the old data that * @throws SnapshotException if anything goes wrong while creating the local snapshot @@ -69,12 +79,15 @@ void takeLocalSnapshot(LatestMilestoneTracker latestMilestoneTracker, Transactio SnapshotException; /** + *

    * This method generates a local snapshot of the full ledger state at the given milestone. - * + *

    + *

    * The generated {@link Snapshot} contains the balances and meta data plus the derived values like the solid entry * points and all seen milestones, that were issued after the snapshot and can therefore be used to generate the * local snapshot files. - * + *

    + * * @param latestMilestoneTracker milestone tracker that allows us to retrieve information about the known milestones * @param targetMilestone milestone that is used as a reference point for the snapshot * @return a local snapshot of the full ledger state at the given milestone @@ -84,12 +97,15 @@ Snapshot generateSnapshot(LatestMilestoneTracker latestMilestoneTracker, Milesto SnapshotException; /** + *

    * This method generates the solid entry points for a snapshot that belong to the given milestone. - * + *

    + *

    * A solid entry point is a confirmed transaction that had non-orphaned approvers during the time of the snapshot * creation and therefore a connection to the most recent part of the tangle. The solid entry points allow us to * stop the solidification process without having to go all the way back to the genesis. - * + *

    + * * @param targetMilestone milestone that is used as a reference point for the snapshot * @return a map of solid entry points associating their hash to the milestone index that confirmed them * @throws SnapshotException if anything goes wrong while generating the solid entry points @@ -97,12 +113,15 @@ Snapshot generateSnapshot(LatestMilestoneTracker latestMilestoneTracker, Milesto Map generateSolidEntryPoints(MilestoneViewModel targetMilestone) throws SnapshotException; /** + *

    * This method generates the map of seen milestones that happened after the given target milestone. - * + *

    + *

    * The map contains the hashes of the milestones associated to their milestone index and is used to allow nodes * that use local snapshot files to bootstrap their nodes, to faster request the missing milestones when syncing the * very first time. - * + *

    + * * @param latestMilestoneTracker milestone tracker that allows us to retrieve information about the known milestones * @param targetMilestone milestone that is used as a reference point for the snapshot * @return a map of solid entry points associating their hash to the milestone index that confirmed them diff --git a/src/main/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImpl.java b/src/main/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImpl.java index d55cfdcc1a..f6562d1617 100644 --- a/src/main/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImpl.java +++ b/src/main/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImpl.java @@ -1,5 +1,6 @@ package com.iota.iri.service.snapshot.impl; +import com.google.common.annotations.VisibleForTesting; import com.iota.iri.conf.SnapshotConfig; import com.iota.iri.service.milestone.LatestMilestoneTracker; import com.iota.iri.service.snapshot.LocalSnapshotManager; @@ -14,11 +15,14 @@ import org.slf4j.LoggerFactory; /** + *

    * Creates a manager for the local snapshots, that takes care of automatically creating local snapshots when the defined - * intervals have passed.
    - *
    + * intervals have passed. + *

    + *

    * It incorporates a background worker that periodically checks if a new snapshot is due (see {@link - * #start(LatestMilestoneTracker)} and {@link #shutdown()}).
    + * #start(LatestMilestoneTracker)} and {@link #shutdown()}). + *

    */ public class LocalSnapshotManagerImpl implements LocalSnapshotManager { /** @@ -31,7 +35,8 @@ public class LocalSnapshotManagerImpl implements LocalSnapshotManager { * To prevent jumping back and forth in and out of sync, there is a buffer in between. * Only when the latest milestone and latest snapshot differ more than this number, we fall out of sync */ - private static final int LOCAL_SNAPSHOT_SYNC_BUFFER = 5; + @VisibleForTesting + static final int LOCAL_SNAPSHOT_SYNC_BUFFER = 5; /** * Logger for this class allowing us to dump debug and status messages. @@ -72,15 +77,18 @@ public class LocalSnapshotManagerImpl implements LocalSnapshotManager { private ThreadIdentifier monitorThreadIdentifier = new ThreadIdentifier("Local Snapshots Monitor"); /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It simply stores the passed in values in their corresponding private properties. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code localSnapshotManager = new LocalSnapshotManagerImpl().init(...);} * * @param snapshotProvider data provider for the snapshots that are relevant for the node @@ -128,7 +136,8 @@ public void shutdown() { * * @param latestMilestoneTracker tracker for the milestones to determine when a new local snapshot is due */ - private void monitorThread(LatestMilestoneTracker latestMilestoneTracker) { + @VisibleForTesting + void monitorThread(LatestMilestoneTracker latestMilestoneTracker) { while (!Thread.currentThread().isInterrupted()) { int localSnapshotInterval = getSnapshotInterval(isInSync(latestMilestoneTracker)); @@ -154,38 +163,42 @@ private void monitorThread(LatestMilestoneTracker latestMilestoneTracker) { * @param inSync if this node is in sync * @return the current interval in which we take local snapshots */ - private int getSnapshotInterval(boolean inSync) { + @VisibleForTesting + int getSnapshotInterval(boolean inSync) { return inSync ? config.getLocalSnapshotsIntervalSynced() : config.getLocalSnapshotsIntervalUnsynced(); } - + /** - * A node is defined in sync when the latest snapshot milestone index and the latest milestone index are equal. - * In order to prevent a bounce between in and out of sync, a buffer is added when a node became in sync. + * A node is defined in sync when the latest snapshot milestone index and the + * latest milestone index are equal. In order to prevent a bounce between in and + * out of sync, a buffer is added when a node became in sync. * - * This will always return false if we are not done scanning milestone candidates during initialization. + * This will always return false if we are not done scanning milestone + * candidates during initialization. * * @param latestMilestoneTracker tracker we use to determine milestones * @return true if we are in sync, otherwise false */ - private boolean isInSync(LatestMilestoneTracker latestMilestoneTracker) { + @VisibleForTesting + boolean isInSync(LatestMilestoneTracker latestMilestoneTracker) { if (!latestMilestoneTracker.isInitialScanComplete()) { return false; } - + int latestIndex = latestMilestoneTracker.getLatestMilestoneIndex(); int latestSnapshot = snapshotProvider.getLatestSnapshot().getIndex(); - + // If we are out of sync, only a full sync will get us in if (!isInSync && latestIndex == latestSnapshot) { isInSync = true; - - // When we are in sync, only dropping below the buffer gets us out of sync + + // When we are in sync, only dropping below the buffer gets us out of sync } else if (latestSnapshot < latestIndex - LOCAL_SNAPSHOT_SYNC_BUFFER) { isInSync = false; } - + return isInSync; } } diff --git a/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImpl.java b/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImpl.java index 9114f8333b..b4a63881be 100644 --- a/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImpl.java +++ b/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImpl.java @@ -1,12 +1,12 @@ package com.iota.iri.service.snapshot.impl; +import com.google.common.annotations.VisibleForTesting; import com.iota.iri.SignedFiles; import com.iota.iri.conf.SnapshotConfig; import com.iota.iri.model.Hash; import com.iota.iri.model.HashFactory; import com.iota.iri.service.snapshot.*; import com.iota.iri.service.spentaddresses.SpentAddressesException; -import com.iota.iri.service.spentaddresses.SpentAddressesProvider; import java.io.*; import java.nio.file.Files; @@ -20,9 +20,11 @@ import org.slf4j.LoggerFactory; /** - * Creates a data provider for the two {@link Snapshot} instances that are relevant for the node.
    - *
    - * It provides access to the two relevant {@link Snapshot} instances:
    + *

    + * Creates a data provider for the two {@link Snapshot} instances that are relevant for the node. + *

    + *

    + * It provides access to the two relevant {@link Snapshot} instances: *

      *
    • * the {@link #initialSnapshot} (the starting point of the ledger based on the last global or local Snapshot) @@ -32,6 +34,7 @@ * milestone) *
    • *
    + *

    */ public class SnapshotProviderImpl implements SnapshotProvider { /** @@ -56,13 +59,15 @@ public class SnapshotProviderImpl implements SnapshotProvider { private static final Logger log = LoggerFactory.getLogger(SnapshotProviderImpl.class); /** + *

    * Holds a cached version of the builtin snapshot. - * + *

    * Note: The builtin snapshot is embedded in the iri.jar and will not change. To speed up tests that need the * snapshot multiple times while creating their own version of the LocalSnapshotManager, we cache the instance * here so they don't have to rebuild it from the scratch every time (massively speeds up the unit tests). */ - private static SnapshotImpl builtinSnapshot = null; + @VisibleForTesting + static SnapshotImpl builtinSnapshot = null; /** * Holds Snapshot related configuration parameters. @@ -80,15 +85,18 @@ public class SnapshotProviderImpl implements SnapshotProvider { private Snapshot latestSnapshot; /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties and loads the snapshots.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It simply stores the passed in values in their corresponding private properties and loads the snapshots. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code snapshotProvider = new SnapshotProviderImpl().init(...);} * * @param config Snapshot related configuration parameters @@ -121,15 +129,18 @@ public Snapshot getLatestSnapshot() { } /** - * {@inheritDoc}
    - *
    + * {@inheritDoc} + * + *

    * It first writes two temporary files, then renames the current files by appending them with a ".bkp" extension and * finally renames the temporary files. This mechanism reduces the chances of the files getting corrupted if IRI * crashes during the snapshot creation and always leaves the node operator with a set of backup files that can be - * renamed to resume node operation prior to the failed snapshot.
    - *
    + * renamed to resume node operation prior to the failed snapshot. + *

    + *

    * Note: We create the temporary files in the same folder as the "real" files to allow the operating system to - * perform a "rename" instead of a "copy" operation.
    + * perform a "rename" instead of a "copy" operation. + *

    */ @Override public void writeSnapshotToDisk(Snapshot snapshot, String basePath) throws SnapshotException { @@ -172,15 +183,18 @@ public void shutdown() { //region SNAPSHOT RELATED UTILITY METHODS ////////////////////////////////////////////////////////////////////////// /** + *

    * Loads the snapshots that are provided by this data provider. - * + *

    + *

    * We first check if a valid local {@link Snapshot} exists by trying to load it. If we fail to load the local * {@link Snapshot}, we fall back to the builtin one. - * + *

    + *

    * After the {@link #initialSnapshot} was successfully loaded we create a copy of it that will act as the "working * copy" that will keep track of the latest changes that get applied while the node operates and processes the new * confirmed transactions. - * + *

    * @throws SnapshotException if anything goes wrong while loading the snapshots */ private void loadSnapshots() throws SnapshotException, SpentAddressesException { @@ -193,11 +207,14 @@ private void loadSnapshots() throws SnapshotException, SpentAddressesException { } /** + *

    * Loads the last local snapshot from the disk. - * + *

    + *

    * This method checks if local snapshot files are available on the hard disk of the node and tries to load them. If * no local snapshot files exist or local snapshots are not enabled we simply return null. - * + *

    + * * @return local snapshot of the node * @throws SnapshotException if local snapshot files exist but are malformed */ @@ -247,14 +264,17 @@ private void assertSpentAddressesDbExist() throws SpentAddressesException { } /** + *

    * Loads the builtin snapshot (last global snapshot) that is embedded in the jar (if a different path is provided it * can also load from the disk). - * + *

    + *

    * We first verify the integrity of the snapshot files by checking the signature of the files and then construct * a {@link Snapshot} from the retrieved information. - * + *

    + *

    * We add the NULL_HASH as the only solid entry point and an empty list of seen milestones. - * + *

    * @return the builtin snapshot (last global snapshot) that is embedded in the jar * @throws SnapshotException if anything goes wrong while loading the builtin {@link Snapshot} */ @@ -310,11 +330,14 @@ private Snapshot loadBuiltInSnapshot() throws SnapshotException { //region SNAPSHOT STATE RELATED UTILITY METHODS //////////////////////////////////////////////////////////////////// /** + *

    * This method reads the balances from the given file on the disk and creates the corresponding SnapshotState. - * - * It simply creates the corresponding reader and for the file on the given location and passes it on to + *

    + *

    + * It creates the corresponding reader and for the file on the given location and passes it on to * {@link #readSnapshotState(BufferedReader)}. - * + *

    + * * @param snapshotStateFilePath location of the snapshot state file * @return the unserialized version of the state file * @throws SnapshotException if anything goes wrong while reading the state file @@ -328,10 +351,13 @@ private SnapshotState readSnapshotStatefromFile(String snapshotStateFilePath) th } /** + *

    * This method reads the balances from the given file in the JAR and creates the corresponding SnapshotState. - * - * It simply creates the corresponding reader and for the file on the given location in the JAR and passes it on to + *

    + *

    + * It creates the corresponding reader and for the file on the given location in the JAR and passes it on to * {@link #readSnapshotState(BufferedReader)}. + *

    * * @param snapshotStateFilePath location of the snapshot state file * @return the unserialized version of the state file @@ -340,17 +366,20 @@ private SnapshotState readSnapshotStatefromFile(String snapshotStateFilePath) th private SnapshotState readSnapshotStateFromJAR(String snapshotStateFilePath) throws SnapshotException { try (BufferedReader reader = new BufferedReader(new InputStreamReader(new BufferedInputStream(SnapshotProviderImpl.class.getResourceAsStream(snapshotStateFilePath))))) { return readSnapshotState(reader); - } catch (IOException e) { + } catch (NullPointerException | IOException e) { throw new SnapshotException("failed to read the snapshot file from JAR at " + snapshotStateFilePath, e); } } /** + *

    * This method reads the balances from the given reader. - * + *

    + *

    * The format of the input is pairs of "address;balance" separated by newlines. It simply reads the input line by * line, adding the corresponding values to the map. - * + *

    + * * @param reader reader allowing us to retrieve the lines of the {@link SnapshotState} file * @return the unserialized version of the snapshot state state file * @throws IOException if something went wrong while trying to access the file @@ -373,10 +402,13 @@ private SnapshotState readSnapshotState(BufferedReader reader) throws IOExceptio } /** + *

    * This method dumps the current state to a file. - * + *

    + *

    * It is used by local snapshots to persist the in memory states and allow IRI to resume from the local snapshot. - * + *

    + * * @param snapshotState state object that shall be written * @param snapshotPath location of the file that shall be written * @throws SnapshotException if anything goes wrong while writing the file @@ -402,10 +434,13 @@ private void writeSnapshotStateToDisk(SnapshotState snapshotState, String snapsh //region SNAPSHOT METADATA RELATED UTILITY METHODS ///////////////////////////////////////////////////////////////// /** + *

    * This method retrieves the metadata of a snapshot from a file. - * + *

    + *

    * It is used by local snapshots to determine the relevant information about the saved snapshot. - * + *

    + * * @param snapshotMetaDataFile File object with the path to the snapshot metadata file * @return SnapshotMetaData instance holding all the relevant details about the snapshot * @throws SnapshotException if anything goes wrong while reading and parsing the file @@ -608,11 +643,13 @@ private Map readSeenMilestonesFromMetaDataFile(BufferedReader rea } /** + *

    * This method writes a file containing a serialized version of the metadata object. - * + *

    + *

    * It can be used to store the current values and read them on a later point in time. It is used by the local * snapshot manager to generate and maintain the snapshot files. - * + *

    * @param snapshotMetaData metadata object that shall be written * @param filePath location of the file that shall be written * @throws SnapshotException if anything goes wrong while writing the file diff --git a/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotServiceImpl.java b/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotServiceImpl.java index 83e077e23a..bd505542d9 100644 --- a/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotServiceImpl.java +++ b/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotServiceImpl.java @@ -31,9 +31,12 @@ import org.slf4j.LoggerFactory; /** - * Creates a service instance that allows us to access the business logic for {@link Snapshot}s.
    - *
    - * The service instance is stateless and can be shared by multiple other consumers.
    + *

    + * Creates a service instance that allows us to access the business logic for {@link Snapshot}s. + *

    + *

    + * The service instance is stateless and can be shared by multiple other consumers. + *

    */ public class SnapshotServiceImpl implements SnapshotService { /** @@ -42,9 +45,11 @@ public class SnapshotServiceImpl implements SnapshotService { private static final Logger log = LoggerFactory.getLogger(SnapshotServiceImpl.class); /** + *

    * Holds a limit for the amount of milestones we go back in time when generating the solid entry points (to speed up - * the snapshot creation).
    - *
    + * the snapshot creation). + *

    + *

    * Note: Since the snapshot creation is a "continuous" process where we build upon the information gathered during * the creation of previous snapshots, we do not need to analyze all previous milestones but can rely on * slowly gathering the missing information over time. While this might lead to a situation where the very @@ -52,31 +57,35 @@ public class SnapshotServiceImpl implements SnapshotService { * to sync it is still a reasonable trade-off to reduce the load on the nodes. We just assume that anybody who * wants to share his snapshots with the community as a way to bootstrap new nodes will run his snapshot * enabled node for a few hours before sharing his files (this is a problem in very rare edge cases when - * having back-referencing transactions anyway).
    + * having back-referencing transactions anyway). + *

    */ private static final int OUTER_SHELL_SIZE = 100; /** + *

    * Maximum age in milestones since creation of solid entry points. - * + *

    + *

    * Since it is possible to artificially keep old solid entry points alive by periodically attaching new transactions * to them, we limit the life time of solid entry points and ignore them whenever they become too old. This is a * measure against a potential attack vector where somebody might try to blow up the meta data of local snapshots. + *

    */ private static final int SOLID_ENTRY_POINT_LIFETIME = 1000; /** - * Holds the tangle object which acts as a database interface.
    + * Holds the tangle object which acts as a database interface. */ private Tangle tangle; /** - * Holds the snapshot provider which gives us access to the relevant snapshots.
    + * Holds the snapshot provider which gives us access to the relevant snapshots. */ private SnapshotProvider snapshotProvider; /** - * Holds the config with important snapshot specific settings.
    + * Holds the config with important snapshot specific settings. */ private SnapshotConfig config; @@ -85,15 +94,18 @@ public class SnapshotServiceImpl implements SnapshotService { private SpentAddressesProvider spentAddressesProvider; /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It stores the passed in values in their corresponding private properties. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code snapshotService = new SnapshotServiceImpl().init(...);} * * @param tangle Tangle object which acts as a database interface @@ -116,11 +128,13 @@ public SnapshotServiceImpl init(Tangle tangle, SnapshotProvider snapshotProvider /** * {@inheritDoc} - *
    + * + *

    * To increase the performance of this operation, we do not apply every single milestone separately but first * accumulate all the necessary changes and then apply it to the snapshot in a single run. This allows us to * modify its values without having to create a "copy" of the initial state to possibly roll back the changes if - * anything unexpected happens (creating a backup of the state requires a lot of memory).
    + * anything unexpected happens (creating a backup of the state requires a lot of memory). + *

    */ @Override public void replayMilestones(Snapshot snapshot, int targetMilestoneIndex) throws SnapshotException { @@ -318,12 +332,14 @@ public Map generateSeenMilestones(LatestMilestoneTracker latestMi } /** + *

    * This method reverts the changes caused by the last milestone that was applied to this snapshot. - * + *

    + *

    * It first checks if we didn't arrive at the initial index yet and then reverts the balance changes that were * caused by the last milestone. Then it checks if any milestones were skipped while applying the last milestone and * determines the {@link SnapshotMetaData} that this Snapshot had before and restores it. - * + *

    * @param tangle Tangle object which acts as a database interface * @return true if the snapshot was rolled back or false otherwise * @throws SnapshotException if anything goes wrong while accessing the database @@ -390,11 +406,14 @@ private boolean rollbackLastMilestone(Tangle tangle, Snapshot snapshot) throws S } /** + *

    * This method determines the milestone that shall be used for the local snapshot. - * + *

    + *

    * It determines the milestone by subtracting the {@link SnapshotConfig#getLocalSnapshotsDepth()} from the latest * solid milestone index and retrieving the next milestone before this point. - * + *

    + * * @param tangle Tangle object which acts as a database interface * @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node * @param config important snapshot related configuration parameters @@ -421,15 +440,18 @@ private MilestoneViewModel determineMilestoneForLocalSnapshot(Tangle tangle, Sna } /** + *

    * This method creates {@link com.iota.iri.service.transactionpruning.TransactionPrunerJob}s for the expired solid * entry points, which removes the unconfirmed subtangles branching off of these transactions. - * + *

    + *

    * We only clean up these subtangles if the transaction that they are branching off has been cleaned up already by a * {@link MilestonePrunerJob}. If the corresponding milestone has not been processed we leave them in the database * so we give the node a little bit more time to "use" these transaction for references from future milestones. This * is used to correctly reflect the {@link SnapshotConfig#getLocalSnapshotsPruningDelay()}, where we keep old data * prior to a snapshot. - * + *

    + * * @param tangle Tangle object which acts as a database interface * @param oldSolidEntryPoints solid entry points of the current initial {@link Snapshot} * @param newSolidEntryPoints solid entry points of the new initial {@link Snapshot} @@ -456,12 +478,15 @@ private void cleanupExpiredSolidEntryPoints(Tangle tangle, Map ol } /** + *

    * This method creates the {@link com.iota.iri.service.transactionpruning.TransactionPrunerJob}s that are * responsible for removing the old data. - * + *

    + *

    * It first calculates the range of milestones that shall be deleted and then issues a {@link MilestonePrunerJob} * for this range (if it is not empty). - * + *

    + * * @param config important snapshot related configuration parameters * @param transactionPruner manager for the pruning jobs that takes care of cleaning up the old data that * @param targetMilestone milestone that was used as a reference point for the local snapshot @@ -483,11 +508,14 @@ private void cleanupOldData(SnapshotConfig config, TransactionPruner transaction } /** + *

    * This method persists the local snapshot on the disk and updates the instances used by the * {@link SnapshotProvider}. - * + *

    + *

    * It first writes the files to the disk and then updates the two {@link Snapshot}s accordingly. - * + *

    + * * @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node * @param newSnapshot Snapshot that shall be persisted * @param config important snapshot related configuration parameters @@ -508,15 +536,19 @@ private void persistLocalSnapshot(SnapshotProvider snapshotProvider, Snapshot ne } /** + *

    * This method determines if a transaction is orphaned. - * + *

    + *

    * Since there is no hard definition for when a transaction can be considered to be orphaned, we define orphaned in * relation to a referenceTransaction. If the transaction or any of its direct or indirect approvers saw a * transaction being attached to it, that arrived after our reference transaction, we consider it "not orphaned". - * + *

    + *

    * Since we currently use milestones as reference transactions that are sufficiently old, this definition in fact is * a relatively safe way to determine if a subtangle "above" a transaction got orphaned. - * + *

    + * * @param tangle Tangle object which acts as a database interface * @param transaction transaction that shall be checked * @param referenceTransaction transaction that acts as a judge to the other transaction @@ -527,18 +559,13 @@ private void persistLocalSnapshot(SnapshotProvider snapshotProvider, Snapshot ne private boolean isOrphaned(Tangle tangle, TransactionViewModel transaction, TransactionViewModel referenceTransaction, Set processedTransactions) throws SnapshotException { - long arrivalTime = transaction.getArrivalTime() / 1000L; - if (arrivalTime > referenceTransaction.getTimestamp()) { - return false; - } - AtomicBoolean nonOrphanedTransactionFound = new AtomicBoolean(false); try { DAGHelper.get(tangle).traverseApprovers( transaction.getHash(), currentTransaction -> !nonOrphanedTransactionFound.get(), currentTransaction -> { - if (arrivalTime > referenceTransaction.getTimestamp()) { + if (currentTransaction.getArrivalTime() / 1000L > referenceTransaction.getTimestamp()) { nonOrphanedTransactionFound.set(true); } }, @@ -552,19 +579,24 @@ private boolean isOrphaned(Tangle tangle, TransactionViewModel transaction, } /** + *

    * This method checks if a transaction is a solid entry point for the targetMilestone. - * + *

    + *

    * A transaction is considered a solid entry point if it has non-orphaned approvers. - * + *

    + *

    * To check if the transaction has non-orphaned approvers we first check if any of its approvers got confirmed by a * future milestone, since this is very cheap. If none of them got confirmed by another milestone we do the more * expensive check from {@link #isOrphaned(Tangle, TransactionViewModel, TransactionViewModel, Set)}. - * + *

    + *

    * Since solid entry points have a limited life time and to prevent potential problems due to temporary errors in * the database, we assume that the checked transaction is a solid entry point if any error occurs while determining * its status. This is a storage <=> reliability trade off, since the only bad effect of having too many solid entry * points) is a bigger snapshot file. - * + *

    + * * @param tangle Tangle object which acts as a database interface * @param transactionHash hash of the transaction that shall be checked * @param targetMilestone milestone that is used as an anchor for our checks @@ -601,11 +633,14 @@ private boolean isSolidEntryPoint(Tangle tangle, Hash transactionHash, Milestone } /** + *

    * This method analyzes the old solid entry points and determines if they are still not orphaned. - * + *

    + *

    * It simply iterates through the old solid entry points and checks them one by one. If an old solid entry point * is found to still be relevant it is added to the passed in map. - * + *

    + * * @param tangle Tangle object which acts as a database interface * @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node * @param targetMilestone milestone that is used to generate the solid entry points @@ -633,12 +668,15 @@ && isSolidEntryPoint(tangle, hash, targetMilestone)) { } /** + *

    * This method retrieves the new solid entry points of the snapshot reference given by the target milestone. - * + *

    + *

    * It iterates over all unprocessed milestones and analyzes their directly and indirectly approved transactions. * Every transaction is checked for being a solid entry point and added to the passed in map (if it was found to be * one). - * + *

    + * * @param tangle Tangle object which acts as a database interface * @param snapshotProvider data provider for the {@link Snapshot}s that are relevant for the node * @param targetMilestone milestone that is used to generate the solid entry points diff --git a/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotStateImpl.java b/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotStateImpl.java index 800142288c..3a0ba36d1a 100644 --- a/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotStateImpl.java +++ b/src/main/java/com/iota/iri/service/snapshot/impl/SnapshotStateImpl.java @@ -2,18 +2,12 @@ import com.iota.iri.controllers.TransactionViewModel; import com.iota.iri.model.Hash; -import com.iota.iri.model.HashFactory; import com.iota.iri.service.snapshot.SnapshotException; import com.iota.iri.service.snapshot.SnapshotState; import com.iota.iri.service.snapshot.SnapshotStateDiff; -import com.iota.iri.utils.IotaIOUtils; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.*; -import java.nio.file.Files; -import java.nio.file.Paths; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -166,8 +160,7 @@ private Map getInconsistentAddresses() { HashMap result = new HashMap<>(); balances.forEach((key, value) -> { if (value < 0) { - log.info("negative value for address " + key + ": " + value); - + log.debug("negative value for address {}: {}", key, value); result.put(key, value); } }); diff --git a/src/main/java/com/iota/iri/service/spentaddresses/SpentAddressesProvider.java b/src/main/java/com/iota/iri/service/spentaddresses/SpentAddressesProvider.java index e816731bda..e0a4deb2b5 100644 --- a/src/main/java/com/iota/iri/service/spentaddresses/SpentAddressesProvider.java +++ b/src/main/java/com/iota/iri/service/spentaddresses/SpentAddressesProvider.java @@ -3,6 +3,7 @@ import com.iota.iri.model.Hash; import java.util.Collection; +import java.util.List; /** * Find, mark and store spent addresses @@ -32,5 +33,14 @@ public interface SpentAddressesProvider { * @throws SpentAddressesException If the provider fails to add an address */ void saveAddressesBatch(Collection addressHashes) throws SpentAddressesException; + + /** + * Loads all spent addresses we know of in a collection + * + * @return The spent addresses + * @throws SpentAddressesException If the provider fails read + */ + //used by IXI + List getAllAddresses(); } diff --git a/src/main/java/com/iota/iri/service/spentaddresses/impl/SpentAddressesProviderImpl.java b/src/main/java/com/iota/iri/service/spentaddresses/impl/SpentAddressesProviderImpl.java index c59c61dc6e..f2ea50f38d 100644 --- a/src/main/java/com/iota/iri/service/spentaddresses/impl/SpentAddressesProviderImpl.java +++ b/src/main/java/com/iota/iri/service/spentaddresses/impl/SpentAddressesProviderImpl.java @@ -14,8 +14,10 @@ import com.iota.iri.utils.Pair; import java.io.*; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.stream.Collectors; import org.slf4j.Logger; @@ -106,7 +108,6 @@ public void saveAddressesBatch(Collection addressHash) throws SpentAddress try { // Its bytes are always new byte[0], therefore identical in storage SpentAddress spentAddressModel = new SpentAddress(); - rocksDBPersistenceProvider.saveBatch(addressHash .stream() .map(address -> new Pair(address, spentAddressModel)) @@ -116,4 +117,13 @@ public void saveAddressesBatch(Collection addressHash) throws SpentAddress throw new SpentAddressesException(e); } } + + @Override + public List getAllAddresses() { + List addresses = new ArrayList<>(); + for (byte[] bytes : rocksDBPersistenceProvider.loadAllKeysFromTable(SpentAddress.class)) { + addresses.add(HashFactory.ADDRESS.create(bytes)); + } + return addresses; + } } diff --git a/src/main/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculator.java b/src/main/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculator.java index 29e4963219..a788e6fc0f 100644 --- a/src/main/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculator.java +++ b/src/main/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculator.java @@ -111,6 +111,9 @@ private UnIterableMap calculateCwInOrder(LinkedHashSet tx Iterator txHashIterator = txsToRate.iterator(); while (txHashIterator.hasNext()) { + if (Thread.interrupted()) { + throw new InterruptedException(); + } Hash txHash = txHashIterator.next(); txHashToCumulativeWeight = updateCw(txHashToApprovers, txHashToCumulativeWeight, txHash); txHashToApprovers = updateApproversAndReleaseMemory(txHashToApprovers, txHash); @@ -121,7 +124,7 @@ private UnIterableMap calculateCwInOrder(LinkedHashSet tx private UnIterableMap> updateApproversAndReleaseMemory(UnIterableMap> txHashToApprovers, Hash txHash) throws Exception { + Set> txHashToApprovers, Hash txHash) throws Exception { Set approvers = SetUtils.emptyIfNull(txHashToApprovers.get(txHash)); TransactionViewModel transactionViewModel = TransactionViewModel.fromHash(tangle, txHash); @@ -156,7 +159,7 @@ private static UnIterableMap updateCw( } private static UnIterableMap> createTxHashToApproversPrefixMap() { - return new TransformingMap<>(HashPrefix::createPrefix, null); + return new TransformingMap<>(HashPrefix::createPrefix, null); } private static UnIterableMap createTxHashToCumulativeWeightMap(int size) { diff --git a/src/main/java/com/iota/iri/service/tipselection/impl/TipSelectionCancelledException.java b/src/main/java/com/iota/iri/service/tipselection/impl/TipSelectionCancelledException.java new file mode 100644 index 0000000000..0029f2f5e4 --- /dev/null +++ b/src/main/java/com/iota/iri/service/tipselection/impl/TipSelectionCancelledException.java @@ -0,0 +1,16 @@ +package com.iota.iri.service.tipselection.impl; + +/** + * Thrown when an ongoing tip-selection is cancelled. + */ +public class TipSelectionCancelledException extends Exception { + + /** + * Creates a new {@link TipSelectionCancelledException} + * + * @param msg the specific message. + */ + public TipSelectionCancelledException(String msg) { + super(msg); + } +} diff --git a/src/main/java/com/iota/iri/service/tipselection/impl/WalkerAlpha.java b/src/main/java/com/iota/iri/service/tipselection/impl/WalkerAlpha.java index 982b4b2c75..f2ae75d698 100644 --- a/src/main/java/com/iota/iri/service/tipselection/impl/WalkerAlpha.java +++ b/src/main/java/com/iota/iri/service/tipselection/impl/WalkerAlpha.java @@ -9,7 +9,6 @@ import com.iota.iri.service.tipselection.Walker; import com.iota.iri.storage.Tangle; import com.iota.iri.utils.collections.interfaces.UnIterableMap; -import com.iota.iri.zmq.MessageQ; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -24,14 +23,13 @@ public class WalkerAlpha implements Walker { /** - * {@code alpha}: a positive number that controls the randomness of the walk. + * {@code alpha}: a positive number that controls the randomness of the walk. * The closer it is to 0, the less bias the random walk will be. */ private double alpha; private final Random random; private final Tangle tangle; - private final MessageQ messageQ; private final Logger log = LoggerFactory.getLogger(Walker.class); private final TailFinder tailFinder; @@ -41,13 +39,11 @@ public class WalkerAlpha implements Walker { * * @param tailFinder instance of tailFinder, used to step from tail to tail in random walk. * @param tangle Tangle object which acts as a database interface - * @param messageQ ZMQ handle to publish telemetrics. * @param random a source of randomness. * @param config configurations to set internal parameters. */ - public WalkerAlpha(TailFinder tailFinder, Tangle tangle, MessageQ messageQ, Random random, TipSelConfig config) { + public WalkerAlpha(TailFinder tailFinder, Tangle tangle, Random random, TipSelConfig config) { this.tangle = tangle; - this.messageQ = messageQ; this.tailFinder = tailFinder; this.random = random; this.alpha = config.getAlpha(); @@ -72,19 +68,22 @@ public Hash walk(Hash entryPoint, UnIterableMap ratings, WalkVa if (!walkValidator.isValid(entryPoint)) { throw new IllegalStateException("entry point failed consistency check: " + entryPoint.toString()); } - + Optional nextStep; Deque traversedTails = new LinkedList<>(); traversedTails.add(entryPoint); //Walk do { + if(Thread.interrupted()){ + throw new InterruptedException(); + } nextStep = selectApprover(traversedTails.getLast(), ratings, walkValidator); nextStep.ifPresent(traversedTails::add); } while (nextStep.isPresent()); - + log.debug("{} tails traversed to find tip", traversedTails.size()); - messageQ.publish("mctn %d", traversedTails.size()); + tangle.publish("mctn %d", traversedTails.size()); return traversedTails.getLast(); } diff --git a/src/main/java/com/iota/iri/service/transactionpruning/TransactionPrunerJob.java b/src/main/java/com/iota/iri/service/transactionpruning/TransactionPrunerJob.java index 5398fcf4a5..619b59a5e9 100644 --- a/src/main/java/com/iota/iri/service/transactionpruning/TransactionPrunerJob.java +++ b/src/main/java/com/iota/iri/service/transactionpruning/TransactionPrunerJob.java @@ -2,6 +2,7 @@ import com.iota.iri.controllers.TipsViewModel; import com.iota.iri.service.snapshot.Snapshot; +import com.iota.iri.service.spentaddresses.SpentAddressesProvider; import com.iota.iri.service.spentaddresses.SpentAddressesService; import com.iota.iri.storage.Tangle; @@ -37,6 +38,14 @@ public interface TransactionPrunerJob { */ void setSpentAddressesService(SpentAddressesService spentAddressesService); + /** + * Allows to set the {@link SpentAddressesProvider} that will ensure that spent addresses are written + * to the persistence layer when their corresponding transactions are pruned. + * + * @param spentAddressesProvider service to be injected + */ + void setSpentAddressesProvider(SpentAddressesProvider spentAddressesProvider); + /** * Allows to set the {@link Tangle} object that this job should work on. * diff --git a/src/main/java/com/iota/iri/service/transactionpruning/async/AsyncTransactionPruner.java b/src/main/java/com/iota/iri/service/transactionpruning/async/AsyncTransactionPruner.java index 796afa0c57..1cd0e902f4 100644 --- a/src/main/java/com/iota/iri/service/transactionpruning/async/AsyncTransactionPruner.java +++ b/src/main/java/com/iota/iri/service/transactionpruning/async/AsyncTransactionPruner.java @@ -3,6 +3,7 @@ import com.iota.iri.conf.SnapshotConfig; import com.iota.iri.controllers.TipsViewModel; import com.iota.iri.service.snapshot.SnapshotProvider; +import com.iota.iri.service.spentaddresses.SpentAddressesProvider; import com.iota.iri.service.spentaddresses.SpentAddressesService; import com.iota.iri.service.transactionpruning.TransactionPruner; import com.iota.iri.service.transactionpruning.TransactionPrunerJob; @@ -13,23 +14,22 @@ import com.iota.iri.utils.thread.ThreadIdentifier; import com.iota.iri.utils.thread.ThreadUtils; -import java.io.*; -import java.nio.file.Files; -import java.nio.file.Paths; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** + *

    * Creates a {@link TransactionPruner} that is able to process it's jobs asynchronously in the background and persists - * its state in a file on the hard disk of the node.
    - *
    + * its state in a file on the hard disk of the node. + *

    + *

    * The asynchronous processing of the jobs is done through {@link Thread}s that are started and stopped by invoking the * corresponding {@link #start()} and {@link #shutdown()} methods. Since some of the builtin jobs require a special - * logic for the way they are executed, we register the builtin job types here.
    + * logic for the way they are executed, we register the builtin job types here. + *

    */ public class AsyncTransactionPruner implements TransactionPruner { /** @@ -39,8 +39,9 @@ public class AsyncTransactionPruner implements TransactionPruner { private static final int GARBAGE_COLLECTOR_RESCAN_INTERVAL = 10000; /** + *

    * The interval (in milliseconds) in which the {@link AsyncTransactionPruner} will persist its state. - * + *

    * Note: Since the worst thing that could happen when not having a 100% synced state file is to have a few floating * "zombie" transactions in the database, we do not persist the state immediately but in intervals in a * separate {@link Thread} (to save performance - until a db-based version gets introduced). @@ -67,6 +68,11 @@ public class AsyncTransactionPruner implements TransactionPruner { */ private SpentAddressesService spentAddressesService; + /** + * Used to check whether an address is already persisted in the persistence layer. + */ + private SpentAddressesProvider spentAddressesProvider; + /** * Manager for the tips (required for removing pruned transactions from this manager). */ @@ -79,17 +85,19 @@ public class AsyncTransactionPruner implements TransactionPruner { /** * Holds a reference to the {@link ThreadIdentifier} for the cleanup thread. - * + *

    * Using a {@link ThreadIdentifier} for spawning the thread allows the {@link ThreadUtils} to spawn exactly one * thread for this instance even when we call the {@link #start()} method multiple times. + *

    */ private final ThreadIdentifier cleanupThreadIdentifier = new ThreadIdentifier("Transaction Pruner"); /** * Holds a reference to the {@link ThreadIdentifier} for the state persistence thread. - * + *

    * Using a {@link ThreadIdentifier} for spawning the thread allows the {@link ThreadUtils} to spawn exactly one * thread for this instance even when we call the {@link #start()} method multiple times. + *

    */ private final ThreadIdentifier persisterThreadIdentifier = new ThreadIdentifier("Transaction Pruner Persister"); @@ -105,15 +113,18 @@ public class AsyncTransactionPruner implements TransactionPruner { private final Map, JobQueue> jobQueues = new HashMap<>(); /** - * This method initializes the instance and registers its dependencies.
    - *
    - * It simply stores the passed in values in their corresponding private properties.
    - *
    + *

    + * This method initializes the instance and registers its dependencies. + *

    + *

    + * It simply stores the passed in values in their corresponding private properties. + *

    + *

    * Note: Instead of handing over the dependencies in the constructor, we register them lazy. This allows us to have * circular dependencies because the instantiation is separated from the dependency injection. To reduce the * amount of code that is necessary to correctly instantiate this class, we return the instance itself which - * allows us to still instantiate, initialize and assign in one line - see Example:
    - *
    + * allows us to still instantiate, initialize and assign in one line - see Example: + *

    * {@code asyncTransactionPruner = new AsyncTransactionPruner().init(...);} * * @param tangle Tangle object which acts as a database interface @@ -123,12 +134,15 @@ public class AsyncTransactionPruner implements TransactionPruner { * @return the initialized instance itself to allow chaining */ public AsyncTransactionPruner init(Tangle tangle, SnapshotProvider snapshotProvider, - SpentAddressesService spentAddressesService, TipsViewModel tipsViewModel, + SpentAddressesService spentAddressesService, + SpentAddressesProvider spentAddressesProvider, + TipsViewModel tipsViewModel, SnapshotConfig config) { this.tangle = tangle; this.snapshotProvider = snapshotProvider; this.spentAddressesService = spentAddressesService; + this.spentAddressesProvider = spentAddressesProvider; this.tipsViewModel = tipsViewModel; this.config = config; @@ -150,6 +164,7 @@ public AsyncTransactionPruner init(Tangle tangle, SnapshotProvider snapshotProvi public void addJob(TransactionPrunerJob job) throws TransactionPruningException { job.setTransactionPruner(this); job.setSpentAddressesService(spentAddressesService); + job.setSpentAddressesProvider(spentAddressesProvider); job.setTangle(tangle); job.setTipsViewModel(tipsViewModel); job.setSnapshot(snapshotProvider.getInitialSnapshot()); @@ -162,8 +177,10 @@ public void addJob(TransactionPrunerJob job) throws TransactionPruningException /** * {@inheritDoc} * + *

    * It iterates through all available queues and triggers the processing of their jobs. - * + *

    + * * @throws TransactionPruningException if anything goes wrong while processing the cleanup jobs */ @Override @@ -179,9 +196,10 @@ public void processJobs() throws TransactionPruningException { /** + *

    * This method removes all queued jobs and resets the state of the {@link TransactionPruner}. It can for example be * used to cleanup after tests. - * + *

    * It cycles through all registered {@link JobQueue}s and clears them before persisting the state. */ void clear() { @@ -191,11 +209,14 @@ void clear() { } /** + *

    * This method starts the cleanup and persistence {@link Thread}s that asynchronously process the queued jobs in the * background. - * + *

    + *

    * Note: This method is thread safe since we use a {@link ThreadIdentifier} to address the {@link Thread}. The * {@link ThreadUtils} take care of only launching exactly one {@link Thread} that is not terminated. + *

    */ public void start() { ThreadUtils.spawnThread(this::processJobsThread, cleanupThreadIdentifier); @@ -242,9 +263,11 @@ private void addJobQueue(Class jobClass, JobQueu /** * This method allows to register a {@link JobParser} for a given job type. * + *

    * When we serialize the pending jobs to save the current state, we also dump their class names, which allows us to * generically parse their serialized representation using the registered parser function back into the * corresponding job. + *

    * * @param jobClass class of the job that the TransactionPruner shall be able to handle * @param jobParser parser function for the serialized version of jobs of the given type diff --git a/src/main/java/com/iota/iri/service/transactionpruning/jobs/AbstractTransactionPrunerJob.java b/src/main/java/com/iota/iri/service/transactionpruning/jobs/AbstractTransactionPrunerJob.java index ebaf0744df..04795c272b 100644 --- a/src/main/java/com/iota/iri/service/transactionpruning/jobs/AbstractTransactionPrunerJob.java +++ b/src/main/java/com/iota/iri/service/transactionpruning/jobs/AbstractTransactionPrunerJob.java @@ -2,6 +2,7 @@ import com.iota.iri.controllers.TipsViewModel; import com.iota.iri.service.snapshot.Snapshot; +import com.iota.iri.service.spentaddresses.SpentAddressesProvider; import com.iota.iri.service.spentaddresses.SpentAddressesService; import com.iota.iri.service.transactionpruning.TransactionPruner; import com.iota.iri.service.transactionpruning.TransactionPrunerJob; @@ -24,10 +25,15 @@ public abstract class AbstractTransactionPrunerJob implements TransactionPrunerJ private TransactionPruner transactionPruner; /** - * Ascertains that pruned transactions are recorded as spent addresses where necessary + * Ascertains that pruned transactions are recorded as spent addresses where necessary. */ protected SpentAddressesService spentAddressesService; + /** + * Ascertains whether transactions are already added to the underlying persistence layer. + */ + protected SpentAddressesProvider spentAddressesProvider; + /** * Holds a reference to the tangle object which acts as a database interface. */ @@ -96,6 +102,11 @@ public void setSpentAddressesService(SpentAddressesService spentAddressesService this.spentAddressesService = spentAddressesService; } + @Override + public void setSpentAddressesProvider(SpentAddressesProvider spentAddressesProvider) { + this.spentAddressesProvider = spentAddressesProvider; + } + /** * {@inheritDoc} */ diff --git a/src/main/java/com/iota/iri/service/transactionpruning/jobs/MilestonePrunerJob.java b/src/main/java/com/iota/iri/service/transactionpruning/jobs/MilestonePrunerJob.java index b182cc37c9..a32c342286 100644 --- a/src/main/java/com/iota/iri/service/transactionpruning/jobs/MilestonePrunerJob.java +++ b/src/main/java/com/iota/iri/service/transactionpruning/jobs/MilestonePrunerJob.java @@ -269,7 +269,7 @@ private void cleanupMilestoneTransactions() throws TransactionPruningException { approvedTransaction -> approvedTransaction.snapshotIndex() >= milestoneViewModel.index(), approvedTransaction -> { if (approvedTransaction.value() < 0 && - !spentAddressesService.wasAddressSpentFrom(approvedTransaction.getAddressHash())) { + !spentAddressesProvider.containsAddress(approvedTransaction.getAddressHash())) { log.warn("Pruned spend transaction " + approvedTransaction.getHash() + " did not have its spent address recorded. Persisting it now"); spentAddressesService diff --git a/src/main/java/com/iota/iri/storage/Tangle.java b/src/main/java/com/iota/iri/storage/Tangle.java index 5e444bb173..b88ab25c0f 100644 --- a/src/main/java/com/iota/iri/storage/Tangle.java +++ b/src/main/java/com/iota/iri/storage/Tangle.java @@ -1,13 +1,21 @@ package com.iota.iri.storage; +import com.iota.iri.model.Hash; import com.iota.iri.model.StateDiff; -import com.iota.iri.model.persistables.*; +import com.iota.iri.model.persistables.Address; +import com.iota.iri.model.persistables.Approvee; +import com.iota.iri.model.persistables.Bundle; +import com.iota.iri.model.persistables.Milestone; +import com.iota.iri.model.persistables.ObsoleteTag; +import com.iota.iri.model.persistables.Tag; +import com.iota.iri.model.persistables.Transaction; import com.iota.iri.utils.Pair; import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; +import com.iota.iri.zmq.MessageQueueProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,23 +41,33 @@ public class Tangle { new AbstractMap.SimpleImmutableEntry<>("transaction-metadata", Transaction.class); private final List persistenceProviders = new ArrayList<>(); - + private final List messageQueueProviders = new ArrayList<>(); public void addPersistenceProvider(PersistenceProvider provider) { this.persistenceProviders.add(provider); } + /** + * Adds {@link com.iota.iri.storage.MessageQueueProvider} that should be notified. + * @param provider that should be notified. + */ + public void addMessageQueueProvider(MessageQueueProvider provider) { + this.messageQueueProviders.add(provider); + } + public void init() throws Exception { for(PersistenceProvider provider: this.persistenceProviders) { provider.init(); } } - public void shutdown() throws Exception { log.info("Shutting down Tangle Persistence Providers... "); this.persistenceProviders.forEach(PersistenceProvider::shutdown); this.persistenceProviders.clear(); + log.info("Shutting down Tangle MessageQueue Providers... "); + this.messageQueueProviders.forEach(MessageQueueProvider::shutdown); + this.messageQueueProviders.clear(); } public Persistable load(Class model, Indexable index) throws Exception { @@ -107,16 +125,43 @@ public Pair getLatest(Class model, Class index) th return latest; } - public Boolean update(Persistable model, Indexable index, String item) throws Exception { - boolean success = false; - for(PersistenceProvider provider: this.persistenceProviders) { - if(success) { - provider.update(model, index, item); - } else { - success = provider.update(model, index, item); - } - } - return success; + /** + * Updates all {@link PersistenceProvider} and publishes message to all {@link com.iota.iri.storage.MessageQueueProvider}. + * + * @param model with transaction data + * @param index {@link Hash} identifier of the {@link Transaction} set + * @param item identifying the purpose of the update + * @throws Exception when updating the {@link PersistenceProvider} fails + */ + public void update(Persistable model, Indexable index, String item) throws Exception { + updatePersistenceProvider(model, index, item); + updateMessageQueueProvider(model, index, item); + } + + private void updatePersistenceProvider(Persistable model, Indexable index, String item) throws Exception { + for(PersistenceProvider provider: this.persistenceProviders) { + provider.update(model, index, item); + } + } + + private void updateMessageQueueProvider(Persistable model, Indexable index, String item) { + for(MessageQueueProvider provider: this.messageQueueProviders) { + provider.publishTransaction(model, index, item); + } + } + + /** + * Notifies all registered {@link com.iota.iri.storage.MessageQueueProvider} and publishes message to MessageQueue. + * + * @param message that can be formatted by {@link String#format(String, Object...)} + * @param objects that should replace the placeholder in message. + * @see com.iota.iri.zmq.ZmqMessageQueueProvider#publish(String, Object...) + * @see String#format(String, Object...) + */ + public void publish(String message, Object... objects) { + for(MessageQueueProvider provider: this.messageQueueProviders) { + provider.publish(message, objects); + } } public Set keysWithMissingReferences(Class modelClass, Class referencedClass) throws Exception { @@ -237,18 +282,4 @@ public void clearMetadata(Class column) throws Exception { provider.clearMetadata(column); } } - - /* - public boolean merge(Persistable model, Indexable index) throws Exception { - boolean exists = false; - for(PersistenceProvider provider: persistenceProviders) { - if(exists) { - provider.save(model, index); - } else { - exists = provider.merge(model, index); - } - } - return exists; - } - */ } diff --git a/src/main/java/com/iota/iri/storage/rocksDB/RocksDBPersistenceProvider.java b/src/main/java/com/iota/iri/storage/rocksDB/RocksDBPersistenceProvider.java index af50a58af5..f4d96fdbdb 100644 --- a/src/main/java/com/iota/iri/storage/rocksDB/RocksDBPersistenceProvider.java +++ b/src/main/java/com/iota/iri/storage/rocksDB/RocksDBPersistenceProvider.java @@ -10,13 +10,38 @@ import java.io.File; import java.nio.file.Paths; import java.security.SecureRandom; -import java.util.*; -import java.util.stream.Collectors; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; import org.apache.commons.lang3.SystemUtils; -import org.rocksdb.*; +import org.rocksdb.BackupEngine; +import org.rocksdb.BackupableDBOptions; +import org.rocksdb.BlockBasedTableConfig; +import org.rocksdb.BloomFilter; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.ColumnFamilyOptions; +import org.rocksdb.DBOptions; +import org.rocksdb.Env; +import org.rocksdb.MergeOperator; +import org.rocksdb.RestoreOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksEnv; +import org.rocksdb.RocksIterator; +import org.rocksdb.StringAppendOperator; +import org.rocksdb.WriteBatch; +import org.rocksdb.WriteOptions; import org.rocksdb.util.SizeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -296,16 +321,16 @@ public void deleteBatch(Collection { + for (Pair> entry : models) { Indexable indexable = entry.low; byte[] keyBytes = indexable.bytes(); ColumnFamilyHandle handle = classTreeMap.get(entry.hi); - writeBatch.remove(handle, keyBytes); + writeBatch.delete(handle, keyBytes); ColumnFamilyHandle metadataHandle = metadataReference.get(entry.hi); if (metadataHandle != null) { - writeBatch.remove(metadataHandle, keyBytes); + writeBatch.delete(metadataHandle, keyBytes); } - }); + } WriteOptions writeOptions = new WriteOptions() //We are explicit about what happens if the node reboots before a flush to the db @@ -350,7 +375,7 @@ private void flushHandle(ColumnFamilyHandle handle) throws RocksDBException { itemsToDelete.add(iterator.key()); } } - if (itemsToDelete.size() > 0) { + if (!itemsToDelete.isEmpty()) { log.info("Amount to delete: " + itemsToDelete.size()); } int counter = 0; @@ -505,38 +530,4 @@ private void initClassTreeMap(List columnFamilyDescripto classTreeMap = MapUtils.unmodifiableMap(classMap); } - // 2018 March 28 - Unused Code - private void fillMissingColumns(List familyDescriptors, String path) throws Exception { - - List columnFamilies = RocksDB.listColumnFamilies(new Options().setCreateIfMissing(true), path) - .stream() - .map(b -> new ColumnFamilyDescriptor(b, new ColumnFamilyOptions())) - .collect(Collectors.toList()); - - columnFamilies.add(0, familyDescriptors.get(0)); - - List missingFromDatabase = familyDescriptors.stream().filter(d -> columnFamilies.stream().filter(desc -> new String(desc.columnFamilyName()).equals(new String(d.columnFamilyName()))).toArray().length == 0).collect(Collectors.toList()); - List missingFromDescription = columnFamilies.stream().filter(d -> familyDescriptors.stream().filter(desc -> new String(desc.columnFamilyName()).equals(new String(d.columnFamilyName()))).toArray().length == 0).collect(Collectors.toList()); - - if (missingFromDatabase.size() != 0) { - missingFromDatabase.remove(familyDescriptors.get(0)); - - try (RocksDB rocksDB = db = RocksDB.open(options, path, columnFamilies, columnFamilyHandles)) { - for (ColumnFamilyDescriptor description : missingFromDatabase) { - addColumnFamily(description.columnFamilyName(), rocksDB); - } - } - } - if (missingFromDescription.size() != 0) { - familyDescriptors.addAll(missingFromDescription); - } - } - - // 2018 March 28 - Unused Code - private void addColumnFamily(byte[] familyName, RocksDB db) throws RocksDBException { - final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily( - new ColumnFamilyDescriptor(familyName, new ColumnFamilyOptions())); - - assert (columnFamilyHandle != null); - } } diff --git a/src/main/java/com/iota/iri/utils/BitSetUtils.java b/src/main/java/com/iota/iri/utils/BitSetUtils.java new file mode 100644 index 0000000000..fb49318c2e --- /dev/null +++ b/src/main/java/com/iota/iri/utils/BitSetUtils.java @@ -0,0 +1,108 @@ +package com.iota.iri.utils; + +import java.util.BitSet; + +/** + * This class offers utility methods to transform BitSets into different data types. + */ +public class BitSetUtils { + /** + * This method converts a byte array to a {@link BitSet} of the given size ({@code sizeOfBitSet}) by copying the + * bits of every byte into the {@link BitSet} in reverse order (starting with the given {@code startOffset}. + * + * It first checks if the byte array is big enough to provide enough bits for the provided parameters and then + * starts the copying process. + * + * @param byteArray byte array that shall be converted + * @param startOffset the amount of bytes to skip at the start + * @param sizeOfBitSet the desired amount of bits in the resulting {@link BitSet} + * @return the {@link BitSet} containing the extracted bytes + */ + public static BitSet convertByteArrayToBitSet(byte[] byteArray, int startOffset, int sizeOfBitSet) { + if((byteArray.length - startOffset) * 8 < sizeOfBitSet) { + throw new IllegalArgumentException("the byte[] is too small to create a BitSet of length " + sizeOfBitSet); + } + + BitSet result = new BitSet(sizeOfBitSet); + + int bitMask = 128; + for(int i = 0; i < sizeOfBitSet; i++) { + // insert the bits in reverse order + result.set(i, (byteArray[i / 8 + startOffset] & bitMask) != 0); + + bitMask = bitMask / 2; + + if(bitMask == 0) { + bitMask = 128; + } + } + + return result; + } + + /** + * Does the same as {@link #convertByteArrayToBitSet(byte[], int, int)} but defaults to copy all remaining bytes + * following the {@code startOffset}. + * + * @param byteArray byte array that shall be converted + * @param startOffset the amount of bytes to skip at the start + * @return the {@link BitSet} containing the extracted bytes + */ + public static BitSet convertByteArrayToBitSet(byte[] byteArray, int startOffset) { + return convertByteArrayToBitSet(byteArray, startOffset, (byteArray.length - startOffset) * 8); + } + + /** + * Does the same as {@link #convertByteArrayToBitSet(byte[], int, int)} but defaults to a {@code startOffset} of 0 + * and the full length for {@code sizeOfBitSet} resulting in converting the full byte array. + * + * @param byteArray byte array that shall be converted + * @return the {@link BitSet} containing the bytes of the byte array + */ + public static BitSet convertByteArrayToBitSet(byte[] byteArray) { + return convertByteArrayToBitSet(byteArray, 0); + } + + /** + * Converts a {@link BitSet} into a byte array by copying the bits in groups of 8 into the resulting bytes of the + * array. + * + * It first calculates the size of the resulting array and then iterates over the bits of the {@link BitSet} to + * write them into the correct index of the byte array. We write the bits in reverse order, shifting them to the + * left before every step. + * + * If the {@link BitSet} is not big enough to fill up the last byte, we fill the remaining bits with zeros by + * shifting the previously written bits to the left accordingly. + * + * @param bitSet the {@link BitSet} that shall be converted. + * @return the byte array containing the bits of the {@link BitSet} in groups of 8 + */ + public static byte[] convertBitSetToByteArray(BitSet bitSet) { + int lengthOfBitSet = bitSet.length(); + int lengthOfArray = (int) Math.ceil(lengthOfBitSet / 8.0); + + byte[] result = new byte[lengthOfArray]; + + for(int i = 0; i < lengthOfBitSet; i++) { + // for every new index -> start with a 1 so the shifting keeps track of the position we are on (gets shifted + // out when we arrive at the last bit of the current byte) + if(i % 8 == 0) { + result[i / 8] = 1; + } + + // shift the existing bits to the left to make space for the bit that gets written now + result[i / 8] <<= 1; + + // write the current bit + result[i / 8] ^= bitSet.get(i) ? 1 : 0; + + // if we are at the last bit of the BitSet -> shift the missing bytes to "fill up" the remaining space (in + // case the BitSet was not long enough to fill up a full byte) + if(i == (lengthOfBitSet - 1)) { + result[i / 8] <<= (8 - (i % 8) - 1); + } + } + + return result; + } +} diff --git a/src/main/java/com/iota/iri/utils/Converter.java b/src/main/java/com/iota/iri/utils/Converter.java index 7befc8f763..e8c2d49359 100644 --- a/src/main/java/com/iota/iri/utils/Converter.java +++ b/src/main/java/com/iota/iri/utils/Converter.java @@ -296,7 +296,7 @@ public static String asciiToTrytes(String input) { * every 2 Trytes are converted to a ASCII character. *

    * - * @param trytes Trytes string + * @param input Trytes string * @return ASCII string. */ public static String trytesToAscii(String input) { diff --git a/src/main/java/com/iota/iri/utils/IotaUtils.java b/src/main/java/com/iota/iri/utils/IotaUtils.java index 7a751e4d65..56b6fcf4c1 100644 --- a/src/main/java/com/iota/iri/utils/IotaUtils.java +++ b/src/main/java/com/iota/iri/utils/IotaUtils.java @@ -1,9 +1,15 @@ package com.iota.iri.utils; import org.apache.commons.lang3.StringUtils; +import org.apache.maven.model.Model; +import org.apache.maven.model.io.xpp3.MavenXpp3Reader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import com.iota.iri.IRI; import com.iota.iri.model.Hash; +import java.io.FileReader; import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -17,6 +23,29 @@ public class IotaUtils { + private static final Logger log = LoggerFactory.getLogger(IotaUtils.class); + + /** + * Returns the current version IRI is running by reading the Jar manifest. + * If we run not from a jar or the manifest is missing we read straight from the pom + * + * @return the implementation version of IRI + */ + public static String getIriVersion() { + String implementationVersion = IRI.class.getPackage().getImplementationVersion(); + //If not in manifest (can happen when running from IDE) + if (implementationVersion == null) { + MavenXpp3Reader reader = new MavenXpp3Reader(); + try { + Model model = reader.read(new FileReader("pom.xml")); + implementationVersion = model.getVersion(); + } catch (Exception e) { + log.error("Failed to parse version from pom", e); + } + } + return implementationVersion; + } + public static List splitStringToImmutableList(String string, String regexSplit) { return Arrays.stream(string.split(regexSplit)) .filter(StringUtils::isNoneBlank) diff --git a/src/main/java/com/iota/iri/utils/datastructure/CuckooFilter.java b/src/main/java/com/iota/iri/utils/datastructure/CuckooFilter.java new file mode 100644 index 0000000000..9807ff2119 --- /dev/null +++ b/src/main/java/com/iota/iri/utils/datastructure/CuckooFilter.java @@ -0,0 +1,86 @@ +package com.iota.iri.utils.datastructure; + +/** + * The Cuckoo Filter is a probabilistic data structure that supports fast set membership testing. + * + * It is very similar to a bloom filter in that they both are very fast and space efficient. Both the bloom filter and + * cuckoo filter also report false positives on set membership. + * + * Cuckoo filters are a relatively new data structure, described in a paper in 2014 by Fan, Andersen, Kaminsky, and + * Mitzenmacher (https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf). They improve upon the design of the bloom + * filter by offering deletion, limited counting, and a bounded false positive probability, while still maintaining a + * similar space complexity. + * + * They use cuckoo hashing to resolve collisions and are essentially a compact cuckoo hash table. + */ +public interface CuckooFilter { + /** + * Adds a new elements to the filter that then can be queried with {@link #contains(String)}. + * + * @param item element that shall be stored in the filter + * @return true if the insertion was successful (if the filter is too full this can return false) + * @throws IndexOutOfBoundsException if we try to add an element to an already too full filter + */ + boolean add(String item) throws IndexOutOfBoundsException; + + /** + * Adds a new elements to the filter that then can be queried with {@link #contains(byte[])}. + * + * @param item element that shall be stored in the filter + * @return true if the insertion was successful (if the filter is too full this can return false) + * @throws IndexOutOfBoundsException if we try to add an element to an already too full filter + */ + boolean add(byte[] item) throws IndexOutOfBoundsException; + + /** + * Queries for the existence of an element in the filter. + * + * @param item element that shall be checked + * @return true if it is "probably" in the filter (~3% false positives) or false if it is "definitely" not in there + */ + boolean contains(String item); + + /** + * Queries for the existence of an element in the filter. + * + * @param item element that shall be checked + * @return true if it is "probably" in the filter (~3% false positives) or false if it is "definitely" not in there + */ + boolean contains(byte[] item); + + /** + * Deletes an element from the filter. + * + * @param item element that shall be deleted from filter + * @return true if something was deleted matching the element or false otherwise + */ + boolean delete(String item); + + /** + * Deletes an element from the filter. + * + * @param item element that shall be deleted from filter + * @return true if something was deleted matching the element or false otherwise + */ + boolean delete(byte[] item); + + /** + * This method returns the actual capacity of the filter. + * + * Since the capacity has to be a power of two and we want to reach a load factor of less than 0.955, the actual + * capacity is bigger than the amount of items we passed into the constructor. + * + * @return the actual capacity of the filter + */ + int getCapacity(); + + /** + * This method returns the amount of elements that are stored in the filter. + * + * Since a cuckoo filter can have collisions the size is not necessarily identical with the amount of items that we + * added. + * + * @return the amount of stored items + */ + int size(); +} diff --git a/src/main/java/com/iota/iri/utils/datastructure/impl/CuckooFilterImpl.java b/src/main/java/com/iota/iri/utils/datastructure/impl/CuckooFilterImpl.java new file mode 100644 index 0000000000..8efa4e2aae --- /dev/null +++ b/src/main/java/com/iota/iri/utils/datastructure/impl/CuckooFilterImpl.java @@ -0,0 +1,589 @@ +package com.iota.iri.utils.datastructure.impl; + +import com.iota.iri.utils.BitSetUtils; +import com.iota.iri.utils.datastructure.CuckooFilter; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.BitSet; + +/** + * This class implements the basic contract of the {@link CuckooFilter}. + */ +public class CuckooFilterImpl implements CuckooFilter { + /** + * The amount of times we try to kick elements when inserting before we consider the index to be too full. + */ + private static final int MAX_NUM_KICKS = 500; + + /** + * A reference to the last element that didn't fit into the filter (used for "soft failure" on first attempt). + */ + private CuckooFilterItem lastVictim; + + /** + * The hash function that is used to generate finger prints and indexes (defaults to SHA1). + */ + private MessageDigest hashFunction; + + /** + * the amount of buckets in our table (get's calculated from the itemCount that we want to store) + */ + private int tableSize = 1; + + /** + * The amount of items that can be stored in each bucket. + */ + private int bucketSize; + + /** + * The amount of bits per fingerprint for each entry (the optimum is around 7 bits with a load of ~0.955) + */ + private int fingerPrintSize; + + /** + * Holds the amount if items that are stored in the filter. + */ + private int storedItems = 0; + + /** + * Holds the capacity of the filter. + */ + private int capacity = 1; + + /** + * The actual underlying data structure holding the elements. + */ + private CuckooFilterTable cuckooFilterTable; + + /** + * Simplified constructor that automatically chooses the values with best space complexity and false positive rate. + * + * The optimal values are a bucket size of 4 and a fingerprint size of 7.2 bits (we round up to 8 bits). For more + * info regarding those values see + * cuckoo-conext2014.pdf and + * Cuckoo Hashing. + * + * NOTE: The actual size will be slightly bigger since the size has to be a power of 2 and take the optimal load + * factor of 0.955 into account. + * + * @param itemCount the minimum amount of items that should fit into the filter + */ + public CuckooFilterImpl(int itemCount) { + this(itemCount, 4, 8); + } + + /** + * Advanced constructor that allows for fine tuning of the desired filter. + * + * It first saves a reference to the hash function and then checks the parameters - the finger print size cannot + * be bigger than 128 bits because SHA1 generates 160 bits and we use 128 of that for the fingerprint and the rest + * for the index. + * + * After verifying that the passed in parameters are reasonable, we calculate the required size of the + * {@link CuckooFilterTable} by increasing the table size exponentially until we can fit the desired item count with + * a load factor of <= 0.955. Finally we create the {@link CuckooFilterTable} that will hold our data. + * + * NOTE: The actual size will be slightly bigger since the size has to be a power of 2 and take the optimal load + * factor of 0.955 into account. + * + * @param itemCount the minimum amount of items that should fit into the filter + * @param bucketSize the amount of items that can be stored in each bucket + * @param fingerPrintSize the amount of bits per fingerprint (it has to be bigger than 0 and smaller than 128) + * @throws IllegalArgumentException if the finger print size is too small or too big + * @throws InternalError if the SHA1 hashing function can not be found with this java version [should never happen] + */ + public CuckooFilterImpl(int itemCount, int bucketSize, int fingerPrintSize) throws IllegalArgumentException, + InternalError { + + try { + hashFunction = MessageDigest.getInstance("SHA1"); + } catch(NoSuchAlgorithmException e) { + throw new InternalError("missing SHA1 support - please check your JAVA installation"); + } + + if(fingerPrintSize <= 0 || fingerPrintSize > 128) { + throw new IllegalArgumentException("invalid finger print size \"" + fingerPrintSize + + "\" [expected value between 0 and 129]"); + } + + while((tableSize * bucketSize) < itemCount || itemCount * 1.0 / (tableSize * bucketSize) > 0.955) { + tableSize <<= 1; + } + + this.bucketSize = bucketSize; + this.fingerPrintSize = fingerPrintSize; + this.capacity = tableSize * bucketSize + 1; + + cuckooFilterTable = new CuckooFilterTable(tableSize, bucketSize, fingerPrintSize); + } + + /** + * {@inheritDoc} + * + * It retrieves the necessary details by passing it into the Item class and then executes the internal add logic. + */ + @Override + public boolean add(String item) throws IndexOutOfBoundsException { + return add(new CuckooFilterItem(item)); + } + + /** + * {@inheritDoc} + * + * It retrieves the necessary details by passing it into the Item class and then executes the internal add logic. + */ + @Override + public boolean add(byte[] item) throws IndexOutOfBoundsException { + return add(new CuckooFilterItem(hashFunction.digest(item))); + } + + /** + * {@inheritDoc} + * + * It retrieves the necessary details by passing it into the Item class and then executes the internal contains + * logic. + */ + @Override + public boolean contains(String item) { + return contains(new CuckooFilterItem(item)); + } + + /** + * {@inheritDoc} + * + * It retrieves the necessary details by passing it into the Item class and then executes the internal contains + * logic. + */ + @Override + public boolean contains(byte[] item) { + return contains(new CuckooFilterItem(hashFunction.digest(item))); + } + + /** + * {@inheritDoc} + * + * It retrieves the necessary details by passing it into the Item class and then executes the internal delete logic. + */ + @Override + public boolean delete(String item) { + return delete(new CuckooFilterItem(item)); + } + + /** + * {@inheritDoc} + * + * It retrieves the necessary details by passing it into the Item class and then executes the internal delete logic. + */ + @Override + public boolean delete(byte[] item) { + return delete(new CuckooFilterItem(hashFunction.digest(item))); + } + + /** + * {@inheritDoc} + */ + @Override + public int getCapacity() { + return capacity; + } + + /** + * {@inheritDoc} + */ + @Override + public int size() { + return storedItems; + } + + /** + * Adds a new elements to the filter that then can be queried for existence. + * + * It first checks if the item is already a part of the filter and skips the insertion if that is the case. If the + * element is not part of the filter, we check if the table is too full already by checking if we have a kicked out + * victim. + * + * If the filter is not too full, we insert the element by trying to place it in its associated position (moving + * existing elements if space is needed). + * + * @param item item to be stored in the filter + * @return true if the insertion was successful (if the filter is too full this can return false) + * @throws IndexOutOfBoundsException if we try to add an item to an already too full filter + */ + private boolean add(CuckooFilterItem item) throws IndexOutOfBoundsException { + if(contains(item)) { + return true; + } + + if(lastVictim != null) { + throw new IndexOutOfBoundsException("the filter is too full"); + } + + // try to insert the item into the first free slot of its cuckooFilterTable (trivial) + for(int i = 0; i < bucketSize; i++) { + if(cuckooFilterTable.get(item.index, i) == null) { + cuckooFilterTable.set(item.index, i, item.fingerPrint); + + storedItems++; + + return true; + } + + if(cuckooFilterTable.get(item.altIndex, i) == null) { + cuckooFilterTable.set(item.altIndex, i, item.fingerPrint); + + storedItems++; + + return true; + } + } + + // filter is full -> start moving MAX_NUM_KICKS times (randomly select which bucket to start with) + int indexOfDestinationBucket = Math.random() < 0.5 ? item.index : item.altIndex; + for(int i = 0; i < MAX_NUM_KICKS; i++) { + // select a random item to kick + int indexOfItemToKick = (int) (Math.random() * bucketSize); + + // swap the items + BitSet kickedFingerPrint = cuckooFilterTable.get(indexOfDestinationBucket, indexOfItemToKick); + cuckooFilterTable.set(indexOfDestinationBucket, indexOfItemToKick, item.fingerPrint); + item = new CuckooFilterItem(kickedFingerPrint, indexOfDestinationBucket); + indexOfDestinationBucket = item.altIndex; + + // try to insert the items into its alternate location + for(int n = 0; n < bucketSize; n++) { + if(cuckooFilterTable.get(indexOfDestinationBucket, n) == null) { + cuckooFilterTable.set(indexOfDestinationBucket, n, item.fingerPrint); + + storedItems++; + + return true; + } + } + } + + // store the last item that didn't fit, so we can provide a soft failure option + lastVictim = item; + + storedItems++; + + // return false to indicate that the addition failed + return false; + } + + /** + * Queries for the existence of an element in the filter. + * + * It simply checks if the item exists in one of it's associated buckets or if it equals the lastVictim which is set + * in case the filter ever gets too full. + * + * @param item element to be checked + * @return true if it is "probably" in the filter (~3% false positives) or false if it is "definitely" not in there + */ + private boolean contains(CuckooFilterItem item) { + if(lastVictim != null && item.fingerPrint.equals(lastVictim.fingerPrint)) { + return true; + } + + // check existence of our finger print in the first index + for(int i = 0; i < bucketSize; i++) { + if(item.fingerPrint.equals(cuckooFilterTable.get(item.index, i))) { + return true; + } + } + + // check existence of our finger print the alternate index + for(int i = 0; i < bucketSize; i++) { + if(item.fingerPrint.equals(cuckooFilterTable.get(item.altIndex, i))) { + return true; + } + } + + return false; + } + + /** + * Deletes an element from the filter. + * + * It first tries to delete the item from the lastVictim slot if it matches and in case of failure cycles through + * the corresponding buckets, to remove a copy of it's fingerprint if one is found. + * + * @param item element that shall be deleted from filter + * @return true if something was deleted matching the item + */ + public boolean delete(CuckooFilterItem item) { + if(lastVictim != null && item.fingerPrint.equals(lastVictim.fingerPrint)) { + lastVictim = null; + + storedItems--; + + return true; + } + + // check existence of our finger print in the first index + for(int i = 0; i < bucketSize; i++) { + if(item.fingerPrint.equals(cuckooFilterTable.get(item.index, i))) { + cuckooFilterTable.delete(item.index, i); + + storedItems--; + + return true; + } + } + + // check existence of our finger print the alternate index + for(int i = 0; i < bucketSize; i++) { + if(item.fingerPrint.equals(cuckooFilterTable.get(item.altIndex, i))) { + cuckooFilterTable.delete(item.altIndex, i); + + storedItems--; + + return true; + } + } + + return false; + } + + /** + * This method derives the index of an element by the full hash of the item. + * + * It is primarily used to calculate the original position of the item, when the item is freshly inserted into the + * filter. Since we only store the finger print of the item we have to use the 2nd version of this method to later + * retrieve the alternate index through (partial-key cuckoo hashing). + * + * @param elementHash hash of the element + * @return the primary index of the element + */ + private int getIndex(byte[] elementHash) { + // initialize the new address with an empty bit sequence + long index = 0; + + // process all address bytes (first 4 bytes) + for(int i = 0; i < 4; i++) { + // copy the bits from the hash into the index + index |= (elementHash[i] & 0xff); + + // shift the bits to make space for the next iteration + if(i < 3) { + index <<= 8; + } + } + + // extract the relevant last 8 bits + index &= 0x00000000ffffffffL; + + // map the result to the domain of possible table addresses + return (int) (index % (long) tableSize); + } + + /** + * This method allows us to retrieve the "alternate" index of an item based on it's current position in the table + * and it's fingerprint. + * + * It is used to move items around that are already part of the table and where the original hash value is not known + * anymore. The mechanism used to derive the new position is called partial-key cuckoo hashing and while being + * relatively bad "in theory", it turns out to be better in practice than the math would suggest when it comes to + * distributing the entries equally in the table. + * + * The operation is bi-directional, allowing us to also get the original position by passing the alternate index + * into this function. + * + * @param fingerPrint finger print of the item + * @param oldIndex the old position of the item in the cuckoo hash table + * @return the alternate index of the element + */ + private int getIndex(BitSet fingerPrint, long oldIndex) { + // calculate the hash of the finger print (partial-key cuckoo hashing) + byte[] fingerPrintHash = hashFunction.digest(BitSetUtils.convertBitSetToByteArray(fingerPrint)); + + // initialize the new address with an empty bit sequence + long index = 0; + + // process all address bytes (first 4 bytes) + for (int i=0; i < 4; i++) { + // shift the relevant oldIndex byte into position + byte oldIndexByte = (byte) (((0xffL << (i*8)) & (long) oldIndex) >> (i * 8)); + + // xor the finger print and the oldIndex bytes and insert the result into the new index + index |= (((fingerPrintHash[i] ^ oldIndexByte) & 0xff) << (i * 8)); + } + + // extract the relevant last 8 bits + index &= 0x00000000ffffffffL; + + // map the result to the domain of possible table addresses + return (int) (index % (long) tableSize); + } + + /** + * This method allows us to calculate the finger print of an item based on it's hash value. + * + * It is used when inserting an item for the first time into the filter and to check the existence of items in the + * filter. + * + * @param hash full hash of the item only known when inserting or checking if an item is contained in the filter + * @return a BitSet representing the first n bits of the hash starting from index 4 up to the necessary length + * @throws IllegalArgumentException if the hash value provided to the method is too short + */ + private BitSet generateFingerPrint(byte[] hash) throws IllegalArgumentException { + if(hash.length < 20) { + throw new IllegalArgumentException("invalid hash [expected hash to contain at least 20 bytes]"); + } + + // do a simple conversion of the byte array to a BitSet of the desired length + return BitSetUtils.convertByteArrayToBitSet(hash, 4, fingerPrintSize); + } + + /** + * Internal helper class to represent items that are stored in the filter. + * + * It bundles the logic for generating the correct indexes and eases the access to all the properties that are + * related to managing those items while moving and inserting them. By having this little wrapper we only have to do + * the expensive calculations (like generating the hashes) once and can then pass them around. + */ + private class CuckooFilterItem { + private BitSet fingerPrint; + + private int index; + + private int altIndex; + + public CuckooFilterItem(String item) { + this(hashFunction.digest(item.getBytes())); + } + + public CuckooFilterItem(byte[] hash) { + fingerPrint = generateFingerPrint(hash); + index = getIndex(hash); + altIndex = getIndex(fingerPrint, index); + } + + public CuckooFilterItem(BitSet fingerPrint, int index) { + this.fingerPrint = fingerPrint; + this.index = index; + altIndex = getIndex(fingerPrint, index); + } + } + + /** + * This class implements a 2 dimensional table holding BitSets, whereas the first dimension represents the bucket + * index and the 2nd dimension represents the slot in the bucket. + * + * It maps this 2-dimensional data structure to a 1-dimensional BitSet holding the actual values so even for huge + * first-level dimensions, we only call the constructor once - making it very fast. + */ + private class CuckooFilterTable { + /** + * Holds the actual data in a "flattened" way to improve performance. + */ + private BitSet data; + + /** + * Holds the number of buckets (first dimension). + */ + private int bucketAmount; + + /** + * Holds the size of the buckets (second dimension). + */ + private int bucketSize; + + /** + * Holds the amount of bits stored in each slot of the bucket. + */ + private int bitSetSize; + + /** + * This method initializes our underlying data structure and saves all the relevant parameters. + * + * @param bucketAmount number of buckets + * @param bucketSize size of the buckets + * @param bitSetSize amount of bits stored in each slot of the bucket + */ + public CuckooFilterTable(int bucketAmount, int bucketSize, int bitSetSize) { + this.bucketAmount = bucketAmount; + this.bucketSize = bucketSize; + this.bitSetSize = bitSetSize; + + data = new BitSet(bucketAmount * bucketSize * (bitSetSize + 1)); + } + + /** + * This method allows us to retrieve elements from the table. + * + * It creates a new BitSet with the value that is stored underneath. Every consequent call of this method + * creates a new Object so we don't waste any memory with caching objects. + * + * Note: It is not possible to retrieve a bucket as a whole since it gets mapped to the 1-dimensional structure + * but this is also not necessary for the implementation of the filter. + * + * @param bucketIndex index of the bucket (1st dimension) + * @param slotIndex slot in the bucket (2nd dimension) + * @return stored BitSet or null if the slot is empty + */ + public BitSet get(int bucketIndex, int slotIndex) { + // calculates the mapped indexes + int nullIndex = bucketIndex * bucketSize * (bitSetSize + 1) + slotIndex * (bitSetSize + 1); + if(!data.get(nullIndex)) { + return null; + } + + // creates the result object + BitSet result = new BitSet(bitSetSize); + + // copies the bits from our underlying data structure to the result + for(int i = nullIndex + 1; i <= nullIndex + bitSetSize; i++) { + int relativeIndex = i - (nullIndex + 1); + + result.set(relativeIndex, data.get(i)); + } + + // returns the final result object + return result; + } + + /** + * This method allows us to store a new BitSet at the defined location. + * + * If we pass null as the object to store, the old items gets deleted and the flag representing "if the slot is + * filled" get's set to false. + * + * @param bucketIndex index of the bucket (1st dimension) + * @param slotIndex slot in the bucket (2nd dimension) + * @param bitSet object to store + * @return the table itself so we can chain calls + */ + public CuckooFilterTable set(int bucketIndex, int slotIndex, BitSet bitSet) { + // calculates the mapped indexes + int nullIndex = bucketIndex * bucketSize * (bitSetSize + 1) + slotIndex * (bitSetSize + 1); + + // mark the location as set or unset + data.set(nullIndex, bitSet != null); + + // copy the bits of the source BitSet to the mapped data structure + if(bitSet != null) { + for(int i = nullIndex + 1; i <= nullIndex + bitSetSize; i++) { + int relativeIndex = i - (nullIndex + 1); + + data.set(i, bitSet.get(relativeIndex)); + } + } + + return this; + } + + /** + * This method allows us to remove elements from the table. + * + * It internally calls the set method with null as the item to store. + * + * @param bucketIndex index of the bucket (1st dimension) + * @param slotIndex slot in the bucket (2nd dimension) + * @return the table itself so we can chain calls + */ + public CuckooFilterTable delete(int bucketIndex, int slotIndex) { + return set(bucketIndex, slotIndex, null); + } + } +} diff --git a/src/main/java/com/iota/iri/utils/log/interval/IntervalLogger.java b/src/main/java/com/iota/iri/utils/log/interval/IntervalLogger.java index 50c16b5911..bcb3a8e947 100644 --- a/src/main/java/com/iota/iri/utils/log/interval/IntervalLogger.java +++ b/src/main/java/com/iota/iri/utils/log/interval/IntervalLogger.java @@ -11,11 +11,14 @@ import java.util.concurrent.atomic.AtomicBoolean; /** + *

    * This class represents a wrapper for the {@link org.slf4j.Logger} used by IRI that implements a logic to rate limits * the output on the console. - * + *

    + *

    * Instead of printing all messages immediately and unnecessarily spamming the console, it only prints messages every * few seconds and if the message has changed since the last output. + *

    */ public class IntervalLogger implements Logger { /** @@ -102,7 +105,7 @@ public IntervalLogger(Class clazz, int logInterval) { /** * Creates a {@link Logger} for the given class that prints messages only every {@code logInterval} milliseconds. * - * It simply stores the passed in parameters in its private properties to be able to access them later on. + * It stores the passed in parameters in its private properties to be able to access them later on. * * @param delegate logback logger for issuing the messages * @param logInterval time in milliseconds between log messages @@ -113,11 +116,12 @@ public IntervalLogger(org.slf4j.Logger delegate, int logInterval) { } /** - * This method returns the underlying logback Logger.
    - *
    + * This method returns the underlying logback Logger. + *

    * It can be used to issue log entries directly to the underlying logger without interfering with the logic of this - * class.
    - * + * class. + *

    + * * @return the underlying logback Logger */ public org.slf4j.Logger delegate() { @@ -127,8 +131,10 @@ public org.slf4j.Logger delegate() { /** * {@inheritDoc} * + *

    * It checks if the given message is new and then triggers the output. - * + *

    + * * @param message info message that shall get printed */ @Override @@ -147,7 +153,9 @@ public IntervalLogger info(String message) { /** * {@inheritDoc} * + *

    * It checks if the given message is new and then triggers the output. + *

    */ @Override public IntervalLogger debug(String message) { @@ -165,11 +173,14 @@ public IntervalLogger debug(String message) { /** * {@inheritDoc} * + *

    * It checks if the given message is new and then triggers the output. - * + *

    + *

    * Error messages will always get dumped immediately instead of scheduling their output. Since error messages are * usually a sign of some misbehaviour of the node we want to be able to see them when they appear (to be able to * track down bugs more easily). + *

    */ @Override public IntervalLogger error(String message) { @@ -178,12 +189,15 @@ public IntervalLogger error(String message) { /** * {@inheritDoc} - * + * + *

    * It checks if the given message is new and then triggers the output. - * + *

    + *

    * Error messages will always get dumped immediately instead of scheduling their output. Since error messages are * usually a sign of some misbehaviour of the node we want to be able to see them when they appear (to be able to * track down bugs more easily). + *

    */ @Override public IntervalLogger error(String message, Throwable cause) { @@ -225,11 +239,14 @@ public void triggerOutput() { } /** + *

    * Triggers the output of the last received message. - * + *

    + *

    * It either prints the message immediately (if enough time has passed since the last output or requested by the * caller) or schedules it for the next interval. - * + *

    + * * @param printImmediately flag indicating if the messages should be scheduled or printed immediately */ public void triggerOutput(boolean printImmediately) { @@ -243,11 +260,14 @@ public void triggerOutput(boolean printImmediately) { } /** + *

    * This method schedules the output of the last received message by spawning a {@link Thread} that will print the * new message after the given timeout. - * + *

    + *

    * When creating the {@link Thread} it copies its name so the output is "transparent" to the user and the effect * is the same as dumping the message manually through the {@link org.slf4j.Logger} object itself. + *

    */ private void scheduleOutput() { if (outputScheduled.compareAndSet(false, true)) { @@ -273,7 +293,7 @@ private abstract class Message { /** * Creates a message that gets managed by this logger. * - * It simply stores the provided message in the internal property. + * It stores the provided message in the internal property. * * @param message message that shall get printed */ @@ -284,9 +304,11 @@ public Message(String message) { /** * This method triggers the output of the given message. * + *

    * It first cancels any scheduled job because the latest provided message will be printed by this call already * and then triggers the printing of the message through the instance specific {@link #print()} method. - * + *

    + * * We only print the message if the same method was not printed already, before. */ public void output() { @@ -340,10 +362,11 @@ public boolean equals(Object obj) { /** * This method handles the actual output of messages through the {@link org.slf4j.Logger} instance. - * + *

    * It first checks if the message that shall get printed differs from the last message that was printed and then * issues the output of the message. After printing the message, it updates the internal variables to handle the * next message accordingly. + *

    */ protected abstract void print(); } diff --git a/src/main/java/com/iota/iri/utils/thread/BoundedScheduledExecutorService.java b/src/main/java/com/iota/iri/utils/thread/BoundedScheduledExecutorService.java index d0b19f4cea..7b81a50dfe 100644 --- a/src/main/java/com/iota/iri/utils/thread/BoundedScheduledExecutorService.java +++ b/src/main/java/com/iota/iri/utils/thread/BoundedScheduledExecutorService.java @@ -8,50 +8,58 @@ import java.util.concurrent.atomic.AtomicInteger; /** + *

    * This class represents a {@link SilentScheduledExecutorService} that accepts only a pre-defined amount of tasks that * can be queued or executed at the same time. All tasks exceeding the defined limit will be ignored (instead of being * queued) by either throwing a {@link RejectedExecutionException} or returning {@code null} depending on the method we - * call (non-silent vs silent).
    - *
    + * call (non-silent vs silent). + *

    + *

    * Whenever a non-recurring task finishes (or a recurring one is cancelled through its {@link Future}), it makes space * for a new task. This is useful for classes like the {@link com.iota.iri.utils.log.interval.IntervalLogger} that want - * to delay an action if and only if there is no other delayed action queued already.
    - *
    + * to delay an action if and only if there is no other delayed action queued already. + *

    + *

    * Note: In contrast to other existing implementations like the SizedScheduledExecutorService of the apache package, * this class is thread-safe and will only allow to spawn and queue the exact amount of tasks defined during its - * creation (since it does not rely on approximate numbers like the queue size).
    + * creation (since it does not rely on approximate numbers like the queue size). + *

    */ public class BoundedScheduledExecutorService implements SilentScheduledExecutorService, ReportingExecutorService { /** - * Holds the maximum amount of tasks that can be submitted for execution.
    + * Holds the maximum amount of tasks that can be submitted for execution. */ private final int capacity; /** - * Holds the underlying {@link ScheduledExecutorService} that manages the Threads in the background.
    + * Holds the underlying {@link ScheduledExecutorService} that manages the Threads in the background. */ private final ScheduledExecutorService delegate; /** * Holds a set of scheduled tasks tasks that are going to be executed by this - * {@link ScheduledExecutorService}.
    + * {@link ScheduledExecutorService}. */ private final Set scheduledTasks = ConcurrentHashMap.newKeySet(); /** - * Thread-safe counter that is used to determine how many tasks were exactly scheduled already.
    - *
    - * Note: Whenever a task finishes, we clean up the used resources and make space for new tasks.
    + * Thread-safe counter that is used to determine how many tasks were exactly scheduled already. + *

    + * Note: Whenever a task finishes, we clean up the used resources and make space for new tasks. + *

    */ private AtomicInteger scheduledTasksCounter = new AtomicInteger(0); /** + *

    * Creates an executor service that that accepts only a pre-defined amount of tasks that can be queued and run at - * the same time.
    - *
    + * the same time. + *

    + *

    * All tasks exceeding the defined limit will be ignored (instead of being queued) by either throwing a * {@link RejectedExecutionException} or returning {@code null} depending on the method we call (non-silent vs - * silent).
    + * silent). + *

    * * @param capacity the amount of tasks that can be scheduled simultaneously */ @@ -65,8 +73,10 @@ public BoundedScheduledExecutorService(int capacity) { /** * {@inheritDoc} - *
    - * It simply adds the task to the internal set of scheduled tasks.
    + * + *

    + * It simply adds the task to the internal set of scheduled tasks. + *

    */ @Override public void onScheduleTask(TaskDetails taskDetails) { @@ -90,9 +100,11 @@ public void onCancelTask(TaskDetails taskDetails) { /** * {@inheritDoc} - *
    + * + *

    * It frees the reserved resources by decrementing the {@link #scheduledTasksCounter} and removing the task from the - * {@link #scheduledTasks} set.
    + * {@link #scheduledTasks} set. + *

    */ @Override public void onCompleteTask(TaskDetails taskDetails, Throwable error) { @@ -106,10 +118,12 @@ public void onCompleteTask(TaskDetails taskDetails, Throwable error) { /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding * logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Runnable} to correctly - * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
    + * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called. + *

    */ @Override public ScheduledFuture silentSchedule(Runnable task, long delay, TimeUnit unit) { @@ -124,10 +138,12 @@ public ScheduledFuture silentSchedule(Runnable task, long delay, TimeUnit uni /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding * logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Callable} to correctly - * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
    + * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called. + *

    */ @Override public ScheduledFuture silentSchedule(Callable task, long delay, TimeUnit unit) { @@ -142,10 +158,12 @@ public ScheduledFuture silentSchedule(Callable task, long delay, TimeU /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding * logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Runnable} to correctly - * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
    + * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called. + *

    */ @Override public ScheduledFuture silentScheduleAtFixedRate(Runnable task, long initialDelay, long period, @@ -163,10 +181,12 @@ public ScheduledFuture silentScheduleAtFixedRate(Runnable task, long initialD /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link ScheduledFuture} returned by this method allows to cancel jobs without their unwinding * logic being executed, we wrap the returned {@link ScheduledFuture} AND the {@link Runnable} to correctly - * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called.
    + * free the resources if its {@link ScheduledFuture#cancel(boolean)} method is called. + *

    */ @Override public ScheduledFuture silentScheduleWithFixedDelay(Runnable task, long initialDelay, long delay, @@ -184,10 +204,12 @@ public ScheduledFuture silentScheduleWithFixedDelay(Runnable task, long initi /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link Future} returned by this method allows to cancel jobs without their unwinding logic being * executed, we wrap the returned {@link Future} AND the {@link Callable} to correctly free the resources if - * its {@link Future#cancel(boolean)} method is called.
    + * its {@link Future#cancel(boolean)} method is called. + *

    */ @Override public Future silentSubmit(Callable task) { @@ -200,10 +222,12 @@ public Future silentSubmit(Callable task) { /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link Future} returned by this method allows to cancel jobs without their unwinding logic being * executed, we wrap the returned {@link Future} AND the {@link Runnable} to correctly free the resources if - * its {@link Future#cancel(boolean)} method is called.
    + * its {@link Future#cancel(boolean)} method is called. + *

    */ @Override public Future silentSubmit(Runnable task) { @@ -216,10 +240,12 @@ public Future silentSubmit(Runnable task) { /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link Future} returned by this method allows to cancel jobs without their unwinding logic being * executed, we wrap the returned {@link Future} AND the {@link Runnable} to correctly free the resources if - * its {@link Future#cancel(boolean)} method is called.
    + * its {@link Future#cancel(boolean)} method is called. + *

    */ @Override public Future silentSubmit(Runnable task, T result) { @@ -232,10 +258,12 @@ public Future silentSubmit(Runnable task, T result) { /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link Future}s will all be finished (and cannot be cancelled anymore) when the underlying method * returns, we only wrap the {@link Callable}s to correctly free the resources and omit wrapping the returned - * {@link Future}s.
    + * {@link Future}s. + *

    */ @Override public List> silentInvokeAll(Collection> tasks) throws InterruptedException { @@ -244,10 +272,12 @@ public List> silentInvokeAll(Collection> tas /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link Future}s will all be finished (and cannot be cancelled anymore) when the underlying method * returns, we only wrap the {@link Callable}s to correctly free the resources and omit wrapping the returned - * {@link Future}s.
    + * {@link Future}s. + *

    */ @Override public List> silentInvokeAll(Collection> tasks, long timeout, TimeUnit unit) @@ -267,9 +297,11 @@ public List> silentInvokeAll(Collection> tas /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link Future}s are not passed to the caller, we only wrap the {@link Callable}s to correctly - * free the resources and omit wrapping the {@link Future}s as well.
    + * free the resources and omit wrapping the {@link Future}s as well. + *

    */ @Override public T silentInvokeAny(Collection> tasks) throws InterruptedException, @@ -280,9 +312,11 @@ public T silentInvokeAny(Collection> tasks) throws Int /** * {@inheritDoc} - *
    + * + *

    * Note: Since the {@link Future}s are not passed to the caller, we only wrap the {@link Callable}s to correctly - * free the resources and omit wrapping the related {@link Future}s as well.
    + * free the resources and omit wrapping the related {@link Future}s as well. + *

    */ @Override public T silentInvokeAny(Collection> tasks, long timeout, TimeUnit unit) throws @@ -302,9 +336,11 @@ public T silentInvokeAny(Collection> tasks, long timeo /** * {@inheritDoc} - *
    + * + *

    * Note: Since there is no {@link Future} passed to the caller, we only wrap the {@link Runnable} to correctly free - * the resources.
    + * the resources. + *

    */ @Override public void silentExecute(Runnable task) { @@ -387,12 +423,14 @@ public void execute(Runnable command) { /** * {@inheritDoc} - *
    + * + *

    * In addition to delegating the method call to the internal {@link ScheduledExecutorService}, we call the cancel * logic for recurring tasks because shutdown prevents them from firing again. If these "cancelled" jobs are * scheduled for execution (and not running right now), we also call their * {@link #onCompleteTask(TaskDetails, Throwable)} callback to "report" that hey have finished (otherwise this will - * be fired inside the wrapped task).
    + * be fired inside the wrapped task). + *

    */ @Override public void shutdown() { @@ -411,11 +449,13 @@ public void shutdown() { /** * {@inheritDoc} - *
    + * + *

    * Before delegating the method call to the internal {@link ScheduledExecutorService}, we call the * {@link #onCancelTask(TaskDetails)} callback for all scheduled tasks and fire the * {@link #onCompleteTask(TaskDetails, Throwable)} callback for all tasks that are not being executed right now - * (otherwise this will be fired inside the wrapped task).
    + * (otherwise this will be fired inside the wrapped task). + *

    */ @Override public List shutdownNow() { @@ -453,7 +493,7 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE /** * This interface is used to generically describe the lambda that is used to create the unwrapped future from the - * delegated {@link ScheduledExecutorService} by passing in the wrapped command.
    + * delegated {@link ScheduledExecutorService} by passing in the wrapped command. * * @param the kind of future returned by this factory ({@link ScheduledFuture} vs {@link Future}) * @param type of the wrapped command that is passed in ({@link Runnable} vs {@link Callable}) @@ -473,30 +513,36 @@ private interface FutureFactory { /** * This is a wrapper for the {@link Future}s returned by the {@link ScheduledExecutorService} that allows us to * override the behaviour of the {@link #cancel(boolean)} method (to be able to free the resources of a task that - * gets cancelled without being executed).
    + * gets cancelled without being executed). * * @param the type of the result returned by the task that is the origin of this {@link Future}. */ private class WrappedFuture implements Future { /** - * Holds the metadata of the task that this {@link Future} belongs to.
    + * Holds the metadata of the task that this {@link Future} belongs to. */ protected final TaskDetails taskDetails; /** - * "Original" unwrapped {@link Future} that is returned by the delegated {@link ScheduledExecutorService}.
    - *
    + *

    + * "Original" unwrapped {@link Future} that is returned by the delegated {@link ScheduledExecutorService}. + *

    + *

    * Note: All methods except the {@link #cancel(boolean)} are getting passed through without any - * modifications.
    + * modifications. + *

    */ private Future delegate; /** + *

    * This creates a {@link WrappedFuture} that cleans up the reserved resources when it is cancelled while the - * task is still pending.
    - *
    + * task is still pending. + *

    + *

    * We do not hand in the {@link #delegate} in the constructor because we need to populate this instance to the - * wrapped task before we "launch" the processing of the task (see {@link #delegate(Future)}).
    + * wrapped task before we "launch" the processing of the task (see {@link #delegate(Future)}). + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -505,13 +551,16 @@ public WrappedFuture(TaskDetails taskDetails) { } /** - * This method stores the delegated {@link Future} in its internal property.
    - *
    + *

    + * This method stores the delegated {@link Future} in its internal property. + *

    + *

    * After the delegated {@link Future} is created, the underlying {@link ScheduledExecutorService} starts * processing the task. To be able to "address" this wrapped future before we start processing the task (the * wrapped task needs to access it), we populate this lazy (see * {@link #wrapFuture(FutureFactory, Runnable, TaskDetails)} and - * {@link #wrapFuture(FutureFactory, Callable, TaskDetails)}).
    + * {@link #wrapFuture(FutureFactory, Callable, TaskDetails)}). + *

    * * @param delegatedFuture the "original" future that handles the logic in the background * @return the instance itself (since we want to return the {@link WrappedFuture} after launching the underlying @@ -524,10 +573,13 @@ public Future delegate(Future delegatedFuture) { } /** - * This method returns the delegated future.
    - *
    + *

    + * This method returns the delegated future. + *

    + *

    * We define a getter for this property to be able to override it in the extending class and achieve a - * polymorphic behavior.
    + * polymorphic behavior. + *

    * * @return the original "unwrapped" {@link Future} that is used as a delegate for the methods of this class */ @@ -537,10 +589,12 @@ public Future delegate() { /** * {@inheritDoc} - *
    + * + *

    * This method fires the {@link #onCancelTask(TaskDetails)} if the future has not been cancelled before. * Afterwards it also fires the {@link #onCompleteTask(TaskDetails, Throwable)} callback if the task is not - * running right now (otherwise this will be fired inside the wrapped task).
    + * running right now (otherwise this will be fired inside the wrapped task). + *

    */ @Override public boolean cancel(boolean mayInterruptIfRunning) { @@ -579,26 +633,32 @@ public V get(long timeout, TimeUnit unit) throws InterruptedException, Execution /** * This is a wrapper for the {@link ScheduledFuture}s returned by the {@link ScheduledExecutorService} that allows * us to override the behaviour of the {@link #cancel(boolean)} method (to be able to free the resources of a task - * that gets cancelled without being executed).
    + * that gets cancelled without being executed). * * @param the type of the result returned by the task that is the origin of this {@link ScheduledFuture}. */ private class WrappedScheduledFuture extends WrappedFuture implements ScheduledFuture { /** + *

    * "Original" unwrapped {@link ScheduledFuture} that is returned by the delegated - * {@link ScheduledExecutorService}.
    - *
    + * {@link ScheduledExecutorService}. + *

    + *

    * Note: All methods except the {@link #cancel(boolean)} are getting passed through without any modifications. + *

    */ private ScheduledFuture delegate; /** + *

    * This creates a {@link ScheduledFuture} that cleans up the reserved resources when it is cancelled while the - * task is still pending.
    - *
    + * task is still pending. + *

    + *

    * We do not hand in the {@link #delegate} in the constructor because we need to populate this instance to the * wrapped task before we "launch" the processing of the task (see {@link #delegate(Future)}). - * + *

    + * * @param taskDetails metadata holding the relevant information of the task */ private WrappedScheduledFuture(TaskDetails taskDetails) { @@ -606,12 +666,15 @@ private WrappedScheduledFuture(TaskDetails taskDetails) { } /** - * This method stores the delegated {@link ScheduledFuture} in its internal property.
    - *
    + *

    + * This method stores the delegated {@link ScheduledFuture} in its internal property. + *

    + *

    * After the delegated {@link ScheduledFuture} is created, the underlying {@link ScheduledExecutorService} * starts processing the task. To be able to "address" this wrapped future before we start processing the task * (the wrapped task needs to access it), we populate this lazy (see - * {@link #wrapScheduledFuture(FutureFactory, Runnable, TaskDetails)}).
    + * {@link #wrapScheduledFuture(FutureFactory, Runnable, TaskDetails)}). + *

    * * @param delegatedFuture the "original" future that handles the logic in the background * @return the instance itself (since we want to return the {@link WrappedFuture} immediately after launching @@ -640,10 +703,12 @@ public int compareTo(Delayed o) { /** * {@inheritDoc} - *
    + * + *

    * This method fires the {@link #onCancelTask(TaskDetails)} if the future has not been cancelled before. * Afterwards it also fires the {@link #onCompleteTask(TaskDetails, Throwable)} callback if the task is not - * running right now (otherwise this will be fired inside the wrapped task).
    + * running right now (otherwise this will be fired inside the wrapped task). + *

    */ @Override public boolean cancel(boolean mayInterruptIfRunning) { @@ -665,9 +730,12 @@ public boolean isCancelled() { } /** - * This method wraps the passed in task to automatically call the callbacks for its lifecycle.
    - *
    - * The lifecycle methods are for example used to manage the resources that are reserved for this task.
    + *

    + * This method wraps the passed in task to automatically call the callbacks for its lifecycle. + *

    + *

    + * The lifecycle methods are for example used to manage the resources that are reserved for this task. + *

    * * @param task the raw task that shall be wrapped with the resource freeing logic * @param taskDetails metadata holding the relevant information of the task @@ -709,11 +777,14 @@ private Callable wrapTask(Callable task, TaskDetails taskDetails, Futu } /** + *

    * This method does the same as {@link #wrapTask(Callable, TaskDetails, Future)} but defaults to a task that is not - * associated to a user accessible {@link Future}.
    - *
    + * associated to a user accessible {@link Future}. + *

    + *

    * This method is used whenever the {@link Future} is not returned by the underlying method so we don't need to wrap - * it.
    + * it. + *

    * * @param task the raw task that shall be wrapped with the resource freeing logic * @param taskDetails metadata holding the relevant information of the task @@ -725,9 +796,12 @@ private Callable wrapTask(Callable task, TaskDetails taskDetails) { } /** - * This method wraps the passed in task to automatically call the callbacks for its lifecycle.
    - *
    - * The lifecycle methods are for example used to manage the resources that are reserved for this task.
    + *

    + * This method wraps the passed in task to automatically call the callbacks for its lifecycle. + *

    + *

    + * The lifecycle methods are for example used to manage the resources that are reserved for this task. + *

    * * @param task the raw task that shall be wrapped with the resource freeing logic * @param taskDetails metadata holding the relevant information of the task @@ -767,11 +841,14 @@ private Runnable wrapTask(Runnable task, TaskDetails taskDetails, Future } /** + *

    * This method does the same as {@link #wrapTask(Runnable, TaskDetails, Future)} but defaults to a task that is not - * associated to a user accessible {@link Future}.
    - *
    + * associated to a user accessible {@link Future}. + *

    + *

    * This method is used whenever the {@link Future} is not returned by the underlying method so we don't need to wrap - * it.
    + * it. + *

    * * @param task the raw task that shall be wrapped with the resource freeing logic * @param taskDetails metadata holding the relevant information of the task @@ -782,10 +859,13 @@ private Runnable wrapTask(Runnable task, TaskDetails taskDetails) { } /** - * This is a utility method that wraps the task and the resulting {@link Future} in a single call.
    - *
    + *

    + * This is a utility method that wraps the task and the resulting {@link Future} in a single call. + *

    + *

    * It creates the {@link WrappedFuture} and the wrapped task and starts the execution of the task by delegating - * the launch through the {@link FutureFactory}.
    + * the launch through the {@link FutureFactory}. + *

    * * @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task * @param task the task that shall be wrapped to clean up its reserved resources upon completion @@ -803,10 +883,13 @@ private Future wrapFuture(FutureFactory, Callable> futureFac } /** - * This is a utility method that wraps the task and the resulting {@link Future} in a single call.
    - *
    + *

    + * This is a utility method that wraps the task and the resulting {@link Future} in a single call. + *

    + *

    * It creates the {@link WrappedFuture} and the wrapped task and starts the execution of the task by delegating - * the launch through the {@link FutureFactory}.
    + * the launch through the {@link FutureFactory}. + *

    * * @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task * @param task the task that shall be wrapped to clean up its reserved resources upon completion @@ -824,10 +907,13 @@ private Future wrapFuture(FutureFactory, Runnable> futureFactor } /** - * This is a utility method that wraps the task and the resulting {@link ScheduledFuture} in a single call.
    - *
    + *

    + * This is a utility method that wraps the task and the resulting {@link ScheduledFuture} in a single call. + *

    + *

    * It creates the {@link WrappedScheduledFuture} and the wrapped task and starts the execution of the task by - * delegating the launch through the {@link FutureFactory}.
    + * delegating the launch through the {@link FutureFactory}. + *

    * * @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task * @param task the task that shall be wrapped to clean up its reserved resources upon completion @@ -845,10 +931,13 @@ private ScheduledFuture wrapScheduledFuture(FutureFactory - *
    + *

    + * This is a utility method that wraps the task and the resulting {@link ScheduledFuture} in a single call. + *

    + *

    * It creates the {@link WrappedScheduledFuture} and the wrapped task and starts the execution of the task by - * delegating the launch through the {@link FutureFactory}.
    + * delegating the launch through the {@link FutureFactory}. + *

    * * @param futureFactory the lambda that returns the original "unwrapped" future from the wrapped task * @param task the task that shall be wrapped to clean up its reserved resources upon completion @@ -866,11 +955,14 @@ private ScheduledFuture wrapScheduledFuture(FutureFactory * This is a utility method that wraps a {@link Collection} of {@link Callable}s to make them free their resources - * upon completion.
    - *
    + * upon completion. + *

    + *

    * It simply iterates over the tasks and wraps them one by one by calling - * {@link #wrapTask(Callable, TaskDetails)}.
    + * {@link #wrapTask(Callable, TaskDetails)}. + *

    * * @param tasks list of jobs that shall be wrapped * @param the type of the values returned by the {@link Callable}s @@ -888,11 +980,14 @@ private Collection> wrapTasks(Collection * This method checks if we have enough resources to schedule the given amount of tasks and reserves the space if - * the check is successful.
    - *
    + * the check is successful. + *

    + *

    * The reserved resources will be freed again once the tasks finish their execution or when they are cancelled - * through their corresponding {@link Future}.
    + * through their corresponding {@link Future}. + *

    * * @param requestedJobCount the amount of tasks that shall be scheduled * @return true if we could reserve the given space and false otherwise @@ -908,11 +1003,14 @@ private boolean reserveCapacity(int requestedJobCount) { } /** + *

    * This method is a utility method that simply checks the passed in object for being {@code null} and throws a - * {@link RejectedExecutionException} if it is.
    - *
    + * {@link RejectedExecutionException} if it is. + *

    + *

    * It is used to turn the silent methods into non-silent ones and conform with the {@link ScheduledExecutorService} - * interface.
    + * interface. + *

    * * @param result the object that shall be checked for being null (the result of a "silent" method call) * @param the type of the result diff --git a/src/main/java/com/iota/iri/utils/thread/DedicatedScheduledExecutorService.java b/src/main/java/com/iota/iri/utils/thread/DedicatedScheduledExecutorService.java index d0dd4923ab..93ab1af3d4 100644 --- a/src/main/java/com/iota/iri/utils/thread/DedicatedScheduledExecutorService.java +++ b/src/main/java/com/iota/iri/utils/thread/DedicatedScheduledExecutorService.java @@ -6,49 +6,56 @@ import java.util.concurrent.*; /** + *

    * This class represents a {@link ScheduledExecutorService} that is associated with one specific task for which it - * provides automatic logging capabilities
    - *
    + * provides automatic logging capabilities. + *

    + *

    * It informs the user about its lifecycle using the logback loggers used by IRI. In addition it offers "silent" methods * of the {@link ScheduledExecutorService} that do not throw {@link Exception}s when we try to start the same task * multiple times. This is handy for implementing the "start" and "shutdown" methods of the background workers of IRI * that would otherwise have to take care of not starting the same task more than once (when trying to be robust against - * coding errors or tests that start the same thread multiple times).
    + * coding errors or tests that start the same thread multiple times). + *

    */ public class DedicatedScheduledExecutorService extends BoundedScheduledExecutorService { /** - * Default logger for this class allowing us to dump debug and status messages.
    - *
    + * Default logger for this class allowing us to dump debug and status messages. + *

    * Note: The used logger can be overwritten by providing a different logger in the constructor (to have transparent - * log messages that look like they are coming from a different source).
    + * log messages that look like they are coming from a different source). + *

    */ private static final Logger DEFAULT_LOGGER = LoggerFactory.getLogger(DedicatedScheduledExecutorService.class); /** - * Holds a reference to the logger that is used to emit messages.
    + * Holds a reference to the logger that is used to emit messages. */ private final Logger logger; /** - * Holds the name of the thread that gets started by this class and that gets printed in the log messages.
    + * Holds the name of the thread that gets started by this class and that gets printed in the log messages. */ private final String threadName; /** - * Flag indicating if we want to issue debug messages (for example whenever a task gets started and finished).
    + * Flag indicating if we want to issue debug messages (for example whenever a task gets started and finished). */ private final boolean debug; /** + *

    * Creates a {@link ScheduledExecutorService} that is associated with one specific task for which it provides - * automatic logging capabilities (using the provided thread name).
    - *
    + * automatic logging capabilities (using the provided thread name). + *

    + *

    * It informs the user about its lifecycle using the logback loggers used by IRI. In addition it offers "silent" * methods of the {@link ScheduledExecutorService} that do not throw {@link Exception}s when we try to start the * same task multiple times. This is handy for implementing the "start" and "shutdown" methods of the background * workers of IRI that would otherwise have to take care of not starting the same task more than once (when trying - * to be robust against coding errors or tests that start the same thread multiple times).
    - *
    + * to be robust against coding errors or tests that start the same thread multiple times). + *

    + *

    *

          *     Example:
          *
    @@ -82,7 +89,8 @@ public class DedicatedScheduledExecutorService extends BoundedScheduledExecutorS
          *         [main] INFO  MilestoneSolidifier - [Milestone Solidifier] Stopped (after #4 executions) ...
          *     
    -     *
    +     * 

    + * * @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will * always be logged) * @param logger logback logger that shall be used for the origin of the log messages @@ -98,7 +106,7 @@ public DedicatedScheduledExecutorService(String threadName, Logger logger, boole /** * Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to the - * {@link #DEFAULT_LOGGER} for the log messages.
    + * {@link #DEFAULT_LOGGER} for the log messages. * * @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will * always be logged) @@ -110,7 +118,7 @@ public DedicatedScheduledExecutorService(String threadName, boolean debug) { /** * Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to false - * for the debug flag.
    + * for the debug flag. * * @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will * always be logged) @@ -121,11 +129,14 @@ public DedicatedScheduledExecutorService(String threadName, Logger logger) { } /** + *

    * Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null} - * for the thread name (which causes only error messages to be printed - unless debug is true).
    - *
    + * for the thread name (which causes only error messages to be printed - unless debug is true). + *

    + *

    * Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want - * to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
    + * to inform the user when scheduling a log output, but which still needs the "only run one task" logic. + *

    * * @param logger logback logger that shall be used for the origin of the log messages * @param debug debug flag that indicates if every "run" should be accompanied with a log message @@ -136,7 +147,7 @@ public DedicatedScheduledExecutorService(Logger logger, boolean debug) { /** * Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to the - {@link #DEFAULT_LOGGER} for the log messages and false for the debug flag.
    + {@link #DEFAULT_LOGGER} for the log messages and false for the debug flag. * * @param threadName name of the thread (or null if we want to disable the automatic logging - exceptions will * always be logged) @@ -146,13 +157,16 @@ public DedicatedScheduledExecutorService(String threadName) { } /** + *

    * Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null} * for the thread name (which causes only error messages to be printed - unless debug is true) and and false for the - * debug flag.
    - *
    + * debug flag. + *

    + *

    * Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want - * to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
    - * + * to inform the user when scheduling a log output, but which still needs the "only run one task" logic. + *

    + * * @param logger logback logger that shall be used for the origin of the log messages */ public DedicatedScheduledExecutorService(Logger logger) { @@ -160,12 +174,15 @@ public DedicatedScheduledExecutorService(Logger logger) { } /** + *

    * Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null} * for the thread name (which causes only error messages to be printed - unless debug is true) and the - * {@link #DEFAULT_LOGGER} for the log messages.
    - *
    + * {@link #DEFAULT_LOGGER} for the log messages. + *

    + *

    * Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want - * to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
    + * to inform the user when scheduling a log output, but which still needs the "only run one task" logic. + *

    * * @param debug debug flag that indicates if every "run" should be accompanied with a log message */ @@ -174,19 +191,22 @@ public DedicatedScheduledExecutorService(boolean debug) { } /** + *

    * Does the same as {@link #DedicatedScheduledExecutorService(String, Logger, boolean)} but defaults to {@code null} * for the thread name (which causes only error messages to be printed), the {@link #DEFAULT_LOGGER} for the log - * messages and {@code false} for the debug flag.
    - *
    + * messages and {@code false} for the debug flag. + *

    + *

    * Note: This is for example used by the {@link com.iota.iri.utils.log.interval.IntervalLogger} which does not want - * to inform the user when scheduling a log output, but which still needs the "only run one task" logic.
    + * to inform the user when scheduling a log output, but which still needs the "only run one task" logic. + *

    */ public DedicatedScheduledExecutorService() { this(null, DEFAULT_LOGGER, false); } /** - * This method is the getter for the name of the thread that gets created by this service.
    + * This method is the getter for the name of the thread that gets created by this service. * * @return it simply returns the private property of {@link #threadName}. */ @@ -198,10 +218,14 @@ public String getThreadName() { /** * {@inheritDoc} - * This method shows a message whenever a task gets successfully scheduled.
    - *
    + * + *

    + * This method shows a message whenever a task gets successfully scheduled. + *

    + *

    * We only show the scheduling message if debugging is enabled or a thread name was defined when creating this - * {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless messages).
    + * {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless messages). + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -216,17 +240,23 @@ public void onScheduleTask(TaskDetails taskDetails) { /** * {@inheritDoc} - * This method shows a message whenever a task starts to be processed.
    - *
    + * + *

    + * This method shows a message whenever a task starts to be processed. + *

    + *

    * We only show the starting message if debug is enabled or if it is the first start of the task in a named * {@link DedicatedScheduledExecutorService} (display it like it would be a {@link Thread} with one start message - * and one stop message - to not pollute the CLI with meaningless messages).
    - *
    + * and one stop message - to not pollute the CLI with meaningless messages). + *

    + *

    * To increase the information available for debugging, we change the thread name to the one that initiated the - * start (rather than the randomly assigned one from the executor service) before printing the start message.
    - *
    + * start (rather than the randomly assigned one from the executor service) before printing the start message. + *

    + *

    * After the start message was printed, we set the name of the {@link Thread} that will consequently be used for log - * messages from the task itself.
    + * messages from the task itself. + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -249,15 +279,20 @@ public void onStartTask(TaskDetails taskDetails) { /** * {@inheritDoc} + * + *

    * This method shows a message when the task finishes its execution (can happen multiple times for recurring - * tasks).
    - *
    + * tasks). + *

    + *

    * We only show the finishing message if debug is enabled and if no error occurred (otherwise the - * {@link #onCompleteTask(TaskDetails, Throwable)} callback will give enough information about the crash).
    - *
    + * {@link #onCompleteTask(TaskDetails, Throwable)} callback will give enough information about the crash). + *

    + *

    * To be consistent with the start message, we change the thread name to the one that initiated the task (this also * makes it easier to distinguish log messages that are emitted by the "real" logic of the task from the "automated" - * messages about its lifecycle).
    + * messages about its lifecycle). + *

    * * @param taskDetails metadata holding the relevant information of the task * @param error the exception that caused this task to terminate or {@code null} if it terminated normally @@ -275,10 +310,14 @@ public void onFinishTask(TaskDetails taskDetails, Throwable error) { /** * {@inheritDoc} - * This method shows an information about the intent to cancel the task.
    - *
    + * + *

    + * This method shows an information about the intent to cancel the task. + *

    + *

    * We only show the cancel message if debugging is enabled or a thread name was defined when creating this - * {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless messages).
    + * {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless messages). + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -293,15 +332,21 @@ public void onCancelTask(TaskDetails taskDetails) { /** * {@inheritDoc} + * + *

    * This method shows a stopped message whenever it finally terminates (and doesn't get launched again in case of - * recurring tasks).
    - *
    + * recurring tasks). + *

    + *

    * We only show the stopped message if debug is enabled, an exception occurred (always show unexpected errors) or if * we have a named {@link DedicatedScheduledExecutorService} (to not pollute the CLI with meaningless - * messages).
    + * messages). + *

    + *

    * If the completion of the task was not caused by an outside call to cancel it, we change the thread name to the * one that initiated the task (this makes it easier to distinguish log messages that are emitted by the "real" - * logic of the task from the "automated" messages about its lifecycle).
    + * logic of the task from the "automated" messages about its lifecycle). + *

    * * @param taskDetails metadata holding the relevant information of the task * @param error the exception that caused this task to terminate or {@code null} if it terminated normally @@ -329,10 +374,13 @@ public void onCompleteTask(TaskDetails taskDetails, Throwable error) { //region PRIVATE UTILITY METHODS /////////////////////////////////////////////////////////////////////////////////// /** - * This method is a utility method that prints the schedule message of the task.
    - *
    + *

    + * This method is a utility method that prints the schedule message of the task. + *

    + *

    * It constructs the matching message by passing the task details into the - * {@link #buildScheduledMessage(TaskDetails)} method.
    + * {@link #buildScheduledMessage(TaskDetails)} method. + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -341,10 +389,13 @@ private void printScheduledMessage(TaskDetails taskDetails) { } /** - * This method is a utility method that prints the started message of the task.
    - *
    + *

    + * This method is a utility method that prints the started message of the task. + *

    + *

    * It constructs the message by passing the details into the {@link #buildStartedMessage(TaskDetails)} and printing - * it through the logger.
    + * it through the logger. + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -353,10 +404,13 @@ private void printStartedMessage(TaskDetails taskDetails) { } /** - * This method is a utility method that prints the finished message of the task.
    - *
    + *

    + * This method is a utility method that prints the finished message of the task. + *

    + *

    * It constructs the message by passing the details into the {@link #buildFinishedMessage(TaskDetails)} and printing - * it through the logger.
    + * it through the logger. + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -365,10 +419,13 @@ private void printFinishedMessage(TaskDetails taskDetails) { } /** - * This method is a utility method that prints the stop message of the task.
    - *
    + *

    + * This method is a utility method that prints the stop message of the task. + *

    + *

    * It constructs the message by passing the details into the {@link #buildStopMessage(TaskDetails)} and printing - * it through the logger.
    + * it through the logger. + *

    * * @param taskDetails metadata holding the relevant information of the task */ @@ -377,12 +434,15 @@ private void printStopMessage(TaskDetails taskDetails) { } /** - * This method is a utility method that prints the stopped message of the task.
    - *
    + *

    + * This method is a utility method that prints the stopped message of the task. + *

    + *

    * It constructs the message by passing the details into the {@link #buildStoppedMessage(TaskDetails, Throwable)} * and printing it through the logger. If an error occurred we use the error channel and append the error to - * get a stack trace of what happened.
    - * + * get a stack trace of what happened. + *

    + * * @param taskDetails metadata holding the relevant information of the task * @param error the exception that caused the task to be stopped */ @@ -396,10 +456,13 @@ private void printStoppedMessage(TaskDetails taskDetails, Throwable error) { } /** - * This method is a utility method that generates the thread name that is used in the log messages.
    - *
    + *

    + * This method is a utility method that generates the thread name that is used in the log messages. + *

    + *

    * It simply returns the thread name (if one is set) or generates a name if this - * {@link DedicatedScheduledExecutorService} is "unnamed".
    + * {@link DedicatedScheduledExecutorService} is "unnamed". + *

    * * @return the thread name that is used in the log messages */ @@ -411,7 +474,7 @@ private String getPrintableThreadName(TaskDetails taskDetails) { /** * This method creates the schedule message of the task by first building the temporal parts of the message and - * then appending them to the actual message.
    + * then appending them to the actual message. * * @param taskDetails metadata holding the relevant information of the task * @return the schedule message that can be used with the logger @@ -435,7 +498,7 @@ private String buildScheduledMessage(TaskDetails taskDetails) { /** * This method creates the started message of the task by simply extracting the relevant information from the - * {@code taskDetails} and concatenating them to the actual message.
    + * {@code taskDetails} and concatenating them to the actual message. * * @param taskDetails metadata holding the relevant information of the task * @return the started message that can be used with the logger @@ -449,7 +512,7 @@ private String buildStartedMessage(TaskDetails taskDetails) { /** * This method creates the finished message of the task by simply extracting the relevant information from the - * {@code taskDetails} and concatenating them to the actual message.
    + * {@code taskDetails} and concatenating them to the actual message. * * @param taskDetails metadata holding the relevant information of the task * @return the finished message that can be used with the logger @@ -463,7 +526,7 @@ private String buildFinishedMessage(TaskDetails taskDetails) { /** * This method creates the stop message of the task by simply extracting the relevant information from the - * {@code taskDetails} and concatenating them to the actual message.
    + * {@code taskDetails} and concatenating them to the actual message. * * @param taskDetails metadata holding the relevant information of the task * @return the stop message that can be used with the logger @@ -475,12 +538,15 @@ private String buildStopMessage(TaskDetails taskDetails) { : "Cancelling Start [" + printableThreadName + "] ..."; } - /** + /** + *

    * This method creates the stopped message of the task by simply extracting the relevant information from the - * {@code taskDetails} and concatenating them to the actual message.
    - *
    - * We differentiate between different termination ways by giving different reasons in the message.
    - * + * {@code taskDetails} and concatenating them to the actual message. + *

    + *

    + * We differentiate between different termination ways by giving different reasons in the message. + *

    + * * @param taskDetails metadata holding the relevant information of the task * @return the stop message that can be used with the logger */ @@ -500,7 +566,7 @@ private String buildStoppedMessage(TaskDetails taskDetails, Throwable error) { /** * This method is a utility method that builds the message fragment which expresses the delay of the scheduled - * task.
    + * task. * * @param taskDetails metadata holding the relevant information of the task * @return the message fragment which expresses the delay of the scheduled task @@ -515,7 +581,7 @@ private String buildDelayMessageFragment(TaskDetails taskDetails) { /** * This method is a utility method that builds the message fragment which expresses the interval of the scheduled - * task.
    + * task. * * @param taskDetails metadata holding the relevant information of the task * @return the message fragment which expresses the interval of the scheduled task @@ -530,7 +596,7 @@ private String buildIntervalMessageFragment(TaskDetails taskDetails) { /** * This method is a utility method that creates a human readable abbreviation of the provided - * {@link TimeUnit}.
    + * {@link TimeUnit}. * * @param unit the time unit used for the values in the {@link TaskDetails} * @return a human readable abbreviation of the provided {@link TimeUnit} diff --git a/src/main/java/com/iota/iri/utils/thread/ReportingExecutorService.java b/src/main/java/com/iota/iri/utils/thread/ReportingExecutorService.java index d10c3a71fc..96160efb78 100644 --- a/src/main/java/com/iota/iri/utils/thread/ReportingExecutorService.java +++ b/src/main/java/com/iota/iri/utils/thread/ReportingExecutorService.java @@ -1,36 +1,48 @@ package com.iota.iri.utils.thread; /** + *

    * This interface defines a contract for {@link java.util.concurrent.ExecutorService}s that makes them call a - * "reporting" method whenever an important event occurs.
    - *
    + * "reporting" method whenever an important event occurs. + *

    + *

    * This way a child class extending these kind of {@link java.util.concurrent.ExecutorService}s can "hook" into these - * events by overriding the specific method and add additional logic like logging or debugging capabilities.
    + * events by overriding the specific method and add additional logic like logging or debugging capabilities. + *

    */ public interface ReportingExecutorService { /** - * This method gets called whenever a new task is scheduled to run.
    - *
    + *

    + * This method gets called whenever a new task is scheduled to run. + *

    + *

    * In contrast to {@link #onStartTask(TaskDetails)} this method gets called only once for "recurring" tasks that are - * scheduled to run in pre-defined intervals.
    + * scheduled to run in pre-defined intervals. + *

    * * @param taskDetails object containing details about this task */ void onScheduleTask(TaskDetails taskDetails); /** - * This method gets called whenever a task is started.
    - *
    - * For recurring tasks it is called multiple times.
    + *

    + * This method gets called whenever a task is started. + *

    + *

    + * For recurring tasks it is called multiple times. + *

    * * @param taskDetails object containing details about this task */ void onStartTask(TaskDetails taskDetails); /** - * This method gets called whenever a task is finished.
    - *
    - * For recurring tasks it is called multiple times.
    + *

    + * This method gets called whenever a task is finished. + *

    + *

    + * For recurring tasks it is called multiple times. + *

    * * @param taskDetails object containing details about this task * @param error {@link Exception} that caused the task to complete @@ -38,20 +50,26 @@ public interface ReportingExecutorService { void onFinishTask(TaskDetails taskDetails, Throwable error); /** + *

    * This method gets called whenever a task is cancelled through its {@link java.util.concurrent.Future} or through - * the shutdown methods of the {@link java.util.concurrent.ExecutorService}.
    - *
    - * It only gets called once for every task.
    + * the shutdown methods of the {@link java.util.concurrent.ExecutorService}. + *

    + *

    + * It only gets called once for every task. + *

    * * @param taskDetails object containing details about this task */ void onCancelTask(TaskDetails taskDetails); /** - * This method gets called whenever a task completes.
    - *
    + *

    + * This method gets called whenever a task completes. + *

    + *

    * This can be through either raising an exception, cancelling from the outside or by simply terminating in a normal - * manner. For recurring tasks this only gets called once.
    + * manner. For recurring tasks this only gets called once. + *

    * * @param taskDetails object containing details about this task * @param error {@link Exception} that caused the task to complete diff --git a/src/main/java/com/iota/iri/utils/thread/SilentScheduledExecutorService.java b/src/main/java/com/iota/iri/utils/thread/SilentScheduledExecutorService.java index 8d29e044f1..0ad4dc8308 100644 --- a/src/main/java/com/iota/iri/utils/thread/SilentScheduledExecutorService.java +++ b/src/main/java/com/iota/iri/utils/thread/SilentScheduledExecutorService.java @@ -5,17 +5,20 @@ import java.util.concurrent.*; /** + *

    * This interface extends the {@link ScheduledExecutorService} by providing additional methods to enqueue tasks without - * throwing a {@link RejectedExecutionException} exception.
    - *
    + * throwing a {@link RejectedExecutionException} exception. + *

    + *

    * This can be useful when preventing additional tasks to run is no error but an intended design decision in the * implementing class. In these cases raising an exception and catching it would cause too much unnecessary overhead - * (using {@link Exception}s for control flow is an anti pattern).
    + * (using {@link Exception}s for control flow is an anti pattern). + *

    */ public interface SilentScheduledExecutorService extends ScheduledExecutorService { /** * Does the same as {@link ScheduledExecutorService#schedule(Runnable, long, TimeUnit)} but returns {@code null} - * instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution.
    + * instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution. * * @param command the task to execute * @param delay the time from now to delay execution @@ -29,7 +32,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as {@link ScheduledExecutorService#schedule(Callable, long, TimeUnit)} but returns {@code null} * instead of throwing a {@link java.util.concurrent.RejectedExecutionException} if the task cannot be scheduled for - * execution.
    + * execution. * * @param callable the function to execute * @param delay the time from now to delay execution @@ -44,7 +47,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as {@link ScheduledExecutorService#scheduleAtFixedRate(Runnable, long, long, TimeUnit)} but returns * {@code null} instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for - * execution.
    + * execution. * * @param command the task to execute * @param initialDelay the time to delay first execution @@ -60,7 +63,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as * {@link ScheduledExecutorService#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} but returns {@code null} - * instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution.
    + * instead of throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution. * * @param command the task to execute * @param initialDelay the time to delay first execution @@ -75,7 +78,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as {@link ScheduledExecutorService#submit(Callable)} but returns {@code null} instead of throwing a - * {@link RejectedExecutionException} if the task cannot be scheduled for execution.
    + * {@link RejectedExecutionException} if the task cannot be scheduled for execution. * * @param task the task to submit * @param the type of the task's result @@ -86,7 +89,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as {@link ScheduledExecutorService#submit(Runnable)} but returns {@code null} instead of throwing a - * {@link RejectedExecutionException} if the task cannot be scheduled for execution.
    + * {@link RejectedExecutionException} if the task cannot be scheduled for execution. * * @param task the task to submit * @return a Future representing pending completion of the task / {@code null} if the task cannot be scheduled for @@ -97,7 +100,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as {@link ScheduledExecutorService#submit(Runnable, Object)} but returns {@code null} instead of - * throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution.
    + * throwing a {@link RejectedExecutionException} if the task cannot be scheduled for execution. * * @param task the task to submit * @param result the result to return @@ -110,7 +113,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as {@link ScheduledExecutorService#invokeAll(Collection)} but returns {@code null} instead of - * throwing a {@link RejectedExecutionException} if any task cannot be scheduled for execution.
    + * throwing a {@link RejectedExecutionException} if any task cannot be scheduled for execution. * * @param tasks the collection of tasks * @param the type of the values returned from the tasks @@ -124,7 +127,7 @@ public interface SilentScheduledExecutorService extends ScheduledExecutorService /** * Does the same as {@link ScheduledExecutorService#invokeAll(Collection, long, TimeUnit)} but returns {@code null} - * instead of throwing a {@link RejectedExecutionException} if any task cannot be scheduled for execution.
    + * instead of throwing a {@link RejectedExecutionException} if any task cannot be scheduled for execution. * * @param tasks the collection of tasks * @param timeout the maximum time to wait @@ -142,7 +145,7 @@ List> silentInvokeAll(Collection> tasks, lon /** * Does the same as {@link ScheduledExecutorService#invokeAny(Collection)} but returns {@code null} instead of - * throwing a {@link RejectedExecutionException} if tasks cannot be scheduled for execution.
    + * throwing a {@link RejectedExecutionException} if tasks cannot be scheduled for execution. * * @param tasks the collection of tasks * @param the type of the values returned from the tasks @@ -156,7 +159,7 @@ List> silentInvokeAll(Collection> tasks, lon /** * Does the same as {@link ScheduledExecutorService#invokeAny(Collection, long, TimeUnit)} but returns {@code null} - * instead of throwing a {@link RejectedExecutionException} if tasks cannot be scheduled for execution.
    + * instead of throwing a {@link RejectedExecutionException} if tasks cannot be scheduled for execution. * * @param tasks the collection of tasks * @param timeout the maximum time to wait @@ -172,7 +175,7 @@ List> silentInvokeAll(Collection> tasks, lon /** * Does the same as {@link ScheduledExecutorService#execute(Runnable)} but doesn't throw a - * {@link RejectedExecutionException} if this task cannot be accepted for execution.
    + * {@link RejectedExecutionException} if this task cannot be accepted for execution. * * @param command the runnable task * @throws NullPointerException if command is null diff --git a/src/main/java/com/iota/iri/utils/thread/TaskDetails.java b/src/main/java/com/iota/iri/utils/thread/TaskDetails.java index 3fe30de012..e6c72f9ca7 100644 --- a/src/main/java/com/iota/iri/utils/thread/TaskDetails.java +++ b/src/main/java/com/iota/iri/utils/thread/TaskDetails.java @@ -5,58 +5,64 @@ import java.util.concurrent.atomic.AtomicInteger; /** + *

    * This class represents a container for the metadata of a task that was scheduled through an - * {@link java.util.concurrent.ExecutorService} that implements the {@link ReportingExecutorService} interface.
    - *
    + * {@link java.util.concurrent.ExecutorService} that implements the {@link ReportingExecutorService} interface. + *

    + *

    * It can for example be used to show detailed log messages or even implement more sophisticated features like the - * {@link BoundedScheduledExecutorService}.
    + * {@link BoundedScheduledExecutorService}. + *

    */ public class TaskDetails { /** - * Holds the name of the {@link Thread} that created the task.
    + * Holds the name of the {@link Thread} that created the task. */ private final String threadName; /** * Holds a thread-safe flag that indicates if the task is currently scheduled for execution (false if running or - * done already).
    + * done already). */ private final AtomicBoolean scheduledForExecution; /** - * Holds a thread-safe counter for the amount of times this task was executed.
    + * Holds a thread-safe counter for the amount of times this task was executed. */ private final AtomicInteger executionCount; /** * Holds the initial delay that was provided when scheduling the task (or {@code null} if the task was set to run - * immediately).
    + * immediately). */ private Long delay = null; /** - * Holds the interval in which the task is repeated (or {@code null} for non-recurring tasks).
    + * Holds the interval in which the task is repeated (or {@code null} for non-recurring tasks). */ private Long interval = null; /** * Holds the timeout that a task can take to terminate before it gets interrupted (or {@code null} if none was - * provided).
    + * provided). */ private Long timeout = null; /** * Holds the time unit that the other values are denominated in (or {@code null} if no time based values are - * provided).
    + * provided). */ private TimeUnit timeUnit = null; /** + *

    * Creates a container for the metadata of a task that was scheduled through an - * {@link java.util.concurrent.ExecutorService}.
    - *
    + * {@link java.util.concurrent.ExecutorService}. + *

    + *

    * It is automatically initiated with the calling {@link Thread} name, the {@link #scheduledForExecution} flag being - * set to {@code true} and the {@link #executionCount} being set to 0.
    + * set to {@code true} and the {@link #executionCount} being set to 0. + *

    */ public TaskDetails() { this.threadName = Thread.currentThread().getName(); @@ -65,7 +71,7 @@ public TaskDetails() { } /** - * Getter for the internal {@link #threadName} property.
    + * Getter for the internal {@link #threadName} property. * * @return name of the {@link Thread} that scheduled the task. */ @@ -74,10 +80,11 @@ public String getThreadName() { } /** - * Getter for the internal {@link #scheduledForExecution} property.
    - *
    + * Getter for the internal {@link #scheduledForExecution} property. + *

    * Note: There is no setter for this property because it returns a mutable object that is not supposed be - * overwritten.
    + * overwritten. + *

    * * @return a thread-safe flag that indicates if the task is currently scheduled for execution */ @@ -86,10 +93,11 @@ public AtomicBoolean getScheduledForExecution() { } /** - * Getter for the internal {@link #executionCount} property.
    - *
    + * Getter for the internal {@link #executionCount} property. + *

    * Note: There is no setter for this property because it returns a mutable object that is not supposed be - * overwritten.
    + * overwritten. + *

    * * @return a thread-safe counter for the amount of times this task was executed */ @@ -98,7 +106,7 @@ public AtomicInteger getExecutionCount() { } /** - * Setter for the internal {@link #delay} property.
    + * Setter for the internal {@link #delay} property. * * @param delay the initial delay that was provided when scheduling the task * @return the instance of TaskDetails itself to allow the chaining of calls @@ -110,7 +118,7 @@ public TaskDetails setDelay(Long delay) { } /** - * Getter for the internal {@link #delay} property.
    + * Getter for the internal {@link #delay} property. * * @return the initial delay that was provided when scheduling the task (or {@code null} if the task was set to run * immediately) @@ -120,7 +128,7 @@ public Long getDelay() { } /** - * Setter for the internal {@link #interval} property.
    + * Setter for the internal {@link #interval} property. * * @param interval the interval in which the task is repeated (or {@code null} for non-recurring tasks) * @return the instance of TaskDetails itself to allow the chaining of calls @@ -132,7 +140,7 @@ public TaskDetails setInterval(Long interval) { } /** - * Getter for the internal {@link #interval} property.
    + * Getter for the internal {@link #interval} property. * * @return the interval in which the task is repeated (or {@code null} for non-recurring tasks) */ @@ -141,7 +149,7 @@ public Long getInterval() { } /** - * Setter for the internal {@link #timeout} property.
    + * Setter for the internal {@link #timeout} property. * * @param timeout the timeout that a task can take to terminate before it gets interrupted (or {@code null} if none * was provided) @@ -154,7 +162,7 @@ public TaskDetails setTimeout(Long timeout) { } /** - * Getter for the internal {@link #timeout} property.
    + * Getter for the internal {@link #timeout} property. * * @return the timeout that a task can take to terminate before it gets interrupted (or {@code null} if none was * provided) @@ -164,7 +172,7 @@ public Long getTimeout() { } /** - * Setter for the internal {@link #timeUnit} property.
    + * Setter for the internal {@link #timeUnit} property. * * @param timeUnit the time unit that the other values are denominated in (or {@code null} if no time based values * are provided) @@ -177,7 +185,7 @@ public TaskDetails setTimeUnit(TimeUnit timeUnit) { } /** - * Getter for the internal {@link #timeUnit} property.
    + * Getter for the internal {@link #timeUnit} property. * * @return the time unit that the other values are denominated in (or {@code null} if no time based values are * provided) diff --git a/src/main/java/com/iota/iri/zmq/MessageQ.java b/src/main/java/com/iota/iri/zmq/MessageQ.java index 7f5d4e35c9..eacbd391d0 100644 --- a/src/main/java/com/iota/iri/zmq/MessageQ.java +++ b/src/main/java/com/iota/iri/zmq/MessageQ.java @@ -31,39 +31,31 @@ * For a complete list and detailed topic specification please refer to the README.md. *

    */ -public class MessageQ { +class MessageQ { private final static Logger LOG = LoggerFactory.getLogger(MessageQ.class); private final ZMQ.Context context; private final ZMQ.Socket publisher; - private boolean enabled = false; private final ExecutorService publisherService = Executors.newSingleThreadExecutor(); public static MessageQ createWith(ZMQConfig config) { - return new MessageQ(config.getZmqPort(), config.getZmqIpc(), config.getZmqThreads(), config.isZmqEnabled()); + return new MessageQ(config); } /** * Creates and starts a ZMQ publisher. * - * @param port port the publisher will be bound to - * @param ipc IPC socket the publisher will be bound to - * @param nthreads number of threads used by the ZMQ publisher - * @param enabled boolean enable flag; by default the publisher will not be started + * @param config {@link ZMQConfig} that should be used. */ - private MessageQ(int port, String ipc, int nthreads, boolean enabled) { - if (enabled) { - context = ZMQ.context(nthreads); - publisher = context.socket(ZMQ.PUB); - publisher.bind(String.format("tcp://*:%d", port)); - if (ipc != null) { - publisher.bind(ipc); - } - this.enabled = true; - } else { - context = null; - publisher = null; + private MessageQ(ZMQConfig config) { + context = ZMQ.context(config.getZmqThreads()); + publisher = context.socket(ZMQ.PUB); + if (config.isZmqEnableTcp()) { + publisher.bind(String.format("tcp://*:%d", config.getZmqPort())); + } + if (config.isZmqEnableIpc()) { + publisher.bind(config.getZmqIpc()); } } @@ -74,10 +66,8 @@ private MessageQ(int port, String ipc, int nthreads, boolean enabled) { * @param objects arguments referenced by the message body, similar to a format string */ public void publish(String message, Object... objects) { - if (enabled) { - String toSend = String.format(message, objects); - publisherService.submit(() -> publisher.send(toSend)); - } + String toSend = String.format(message, objects); + publisherService.submit(() -> publisher.send(toSend)); } /** diff --git a/src/main/java/com/iota/iri/zmq/MessageQueueProvider.java b/src/main/java/com/iota/iri/zmq/MessageQueueProvider.java new file mode 100644 index 0000000000..065b92a7c7 --- /dev/null +++ b/src/main/java/com/iota/iri/zmq/MessageQueueProvider.java @@ -0,0 +1,35 @@ +package com.iota.iri.zmq; + +import com.iota.iri.model.Hash; +import com.iota.iri.model.persistables.Transaction; +import com.iota.iri.storage.Indexable; +import com.iota.iri.storage.Persistable; + +/** + * Publish messages to the MessageQueue. + */ +public interface MessageQueueProvider { + /** + * Publishes the message to the MessageQueue. + * + * @param message that can be formatted by {@link String#format(String, Object...)} + * @param objects that should replace the placeholder in message. + * @see String#format(String, Object...) + */ + void publish(String message, Object... objects); + + /** + * Publishes the transaction details to the MessageQueue. + * + * @param model with Transaction details send to the MessageQueue. + * @param index {@link Hash} identifier of the {@link Transaction} set + * @param item identifying the purpose of the update + * @return true when message was send to the MessageQueue + */ + boolean publishTransaction(Persistable model, Indexable index, String item); + + /** + * Shutdown the MessageQueue. + */ + void shutdown(); +} diff --git a/src/main/java/com/iota/iri/storage/ZmqPublishProvider.java b/src/main/java/com/iota/iri/zmq/ZmqMessageQueueProvider.java similarity index 52% rename from src/main/java/com/iota/iri/storage/ZmqPublishProvider.java rename to src/main/java/com/iota/iri/zmq/ZmqMessageQueueProvider.java index 9425805933..289bce9a6a 100644 --- a/src/main/java/com/iota/iri/storage/ZmqPublishProvider.java +++ b/src/main/java/com/iota/iri/zmq/ZmqMessageQueueProvider.java @@ -1,55 +1,34 @@ -package com.iota.iri.storage; +package com.iota.iri.zmq; +import com.iota.iri.conf.ZMQConfig; import com.iota.iri.controllers.TransactionViewModel; import com.iota.iri.model.Hash; import com.iota.iri.model.persistables.Transaction; +import com.iota.iri.storage.Indexable; +import com.iota.iri.storage.Persistable; import com.iota.iri.utils.Converter; -import com.iota.iri.utils.Pair; -import com.iota.iri.zmq.MessageQ; - -import java.util.Collection; -import java.util.List; -import java.util.Set; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class ZmqPublishProvider implements PersistenceProvider { +/** + * Use zeromq to create a MessageQueue that publishes messages. + */ +public class ZmqMessageQueueProvider implements MessageQueueProvider { - private static final Logger log = LoggerFactory.getLogger(ZmqPublishProvider.class); + private static final Logger log = LoggerFactory.getLogger(ZmqMessageQueueProvider.class); private final MessageQ messageQ; - public ZmqPublishProvider( MessageQ messageQ ) { - this.messageQ = messageQ; - } - - @Override - public void init() throws Exception { - - } - - @Override - public boolean isAvailable() { - return false; - } - - @Override - public void shutdown() { - - } - - @Override - public boolean save(Persistable model, Indexable index) throws Exception { - return false; - } - - @Override - public void delete(Class model, Indexable index) throws Exception { - + /** + * Factory method to create a new ZmqMessageQueue with the given configuration. + * + * @param configuration with the zmq properties used to create MessageQueue + */ + public ZmqMessageQueueProvider(ZMQConfig configuration) { + this.messageQ = MessageQ.createWith(configuration); } @Override - public boolean update(Persistable model, Indexable index, String item) throws Exception { + public boolean publishTransaction(Persistable model, Indexable index, String item) { if(!(model instanceof Transaction)) { return false; } @@ -74,7 +53,7 @@ private void publishTx(TransactionViewModel transactionViewModel) { txStringBuilder.append(transactionViewModel.getHash()); txStringBuilder.append(" "); txStringBuilder.append(transactionViewModel.getAddressHash()); txStringBuilder.append(" "); txStringBuilder.append(String.valueOf(transactionViewModel.value())); txStringBuilder.append(" "); - txStringBuilder.append(transactionViewModel.getObsoleteTagValue().toString().substring(0,27)); txStringBuilder.append(" "); + txStringBuilder.append(transactionViewModel.getObsoleteTagValue().toString(), 0, 27); txStringBuilder.append(" "); txStringBuilder.append(String.valueOf(transactionViewModel.getTimestamp())); txStringBuilder.append(" "); txStringBuilder.append(String.valueOf(transactionViewModel.getCurrentIndex())); txStringBuilder.append(" "); txStringBuilder.append(String.valueOf(transactionViewModel.lastIndex())); txStringBuilder.append(" "); @@ -82,7 +61,7 @@ private void publishTx(TransactionViewModel transactionViewModel) { txStringBuilder.append(transactionViewModel.getTrunkTransactionHash()); txStringBuilder.append(" "); txStringBuilder.append(transactionViewModel.getBranchTransactionHash()); txStringBuilder.append(" "); txStringBuilder.append(String.valueOf(transactionViewModel.getArrivalTime())); txStringBuilder.append(" "); - txStringBuilder.append(transactionViewModel.getTagValue().toString().substring(0,27)); + txStringBuilder.append(transactionViewModel.getTagValue().toString(), 0, 27); messageQ.publish(txStringBuilder.toString()); } catch (Exception e) { @@ -106,83 +85,20 @@ private void publishTxTrytes(TransactionViewModel transactionViewModel) { } } + /** + * Publishes the message to the MessageQueue. + * + * @param message that can be formatted by {@link String#format(String, Object...)} + * @param objects that should replace the placeholder in message. + * @see String#format(String, Object...) + */ @Override - public boolean exists(Class model, Indexable key) throws Exception { - return false; + public void publish(String message, Object... objects) { + this.messageQ.publish(message, objects); } @Override - public Pair latest(Class model, Class indexModel) throws Exception { - return null; - } - - @Override - public Set keysWithMissingReferences(Class modelClass, Class otherClass) throws Exception { - return null; - } - - @Override - public Persistable get(Class model, Indexable index) throws Exception { - return null; - } - - @Override - public boolean mayExist(Class model, Indexable index) throws Exception { - return false; - } - - @Override - public long count(Class model) throws Exception { - return 0; - } - - @Override - public Set keysStartingWith(Class modelClass, byte[] value) { - return null; - } - - @Override - public Persistable seek(Class model, byte[] key) throws Exception { - return null; - } - - @Override - public Pair next(Class model, Indexable index) throws Exception { - return null; - } - - @Override - public Pair previous(Class model, Indexable index) throws Exception { - return null; - } - - @Override - public Pair first(Class model, Class indexModel) throws Exception { - return null; - } - - @Override - public boolean saveBatch(List> models) throws Exception { - return false; - } - - @Override - public void deleteBatch(Collection>> models) throws Exception { - - } - - @Override - public void clear(Class column) throws Exception { - - } - - @Override - public void clearMetadata(Class column) throws Exception { - - } - - @Override - public List loadAllKeysFromTable(Class model) { - return null; + public void shutdown() { + this.messageQ.shutdown(); } } \ No newline at end of file diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml index 0ce840ac49..85a08b9c40 100644 --- a/src/main/resources/logback.xml +++ b/src/main/resources/logback.xml @@ -24,7 +24,7 @@ DENY - %d{MM/dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + %d{MM/dd HH:mm:ss.SSS} [%thread] %-5level %logger{0}:%L - %msg%n @@ -37,7 +37,7 @@ DENY - %d{MM/dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + %d{MM/dd HH:mm:ss.SSS} [%thread] %-5level %logger{0}:%L - %msg%n @@ -46,4 +46,4 @@ - \ No newline at end of file + diff --git a/src/test/java/com/iota/iri/IXITest.java b/src/test/java/com/iota/iri/IXITest.java index 0efa3070a4..b8a4de59fc 100644 --- a/src/test/java/com/iota/iri/IXITest.java +++ b/src/test/java/com/iota/iri/IXITest.java @@ -1,5 +1,6 @@ package com.iota.iri; +import com.iota.iri.service.CallableRequest; import com.iota.iri.service.dto.AbstractResponse; import com.iota.iri.service.dto.ErrorResponse; import org.hamcrest.CoreMatchers; @@ -8,6 +9,10 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Map; + import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -27,6 +32,12 @@ public static void setUp() throws Exception { ixiDir.create(); ixi = new IXI(); ixi.init(ixiDir.getRoot().getAbsolutePath()); + + Field ixiApiField = ixi.getClass().getDeclaredField("ixiAPI"); + ixiApiField.setAccessible(true); + Map>> ixiAPI = + (Map>>) ixiApiField.get(ixi); + ixiAPI.put("IXI", new HashMap<>()); } /** @@ -70,7 +81,7 @@ public void processCommandEmpty() { } /** - * If an does not match the command pattern, expect an unknown command error message. + * If the given command does not exist, expect an unknown command error message. */ @Test public void processCommandUnknown() { @@ -79,4 +90,13 @@ public void processCommandUnknown() { assertTrue("Wrong error message returned in response", response.toString().contains("Command [unknown] is unknown")); } -} \ No newline at end of file + /** + * If an IXI module does not have the given command, expect an unknown command error message. + */ + @Test + public void processIXICommandUnknown() { + AbstractResponse response = ixi.processCommand("IXI.unknown", null); + assertThat("Wrong type of response", response, CoreMatchers.instanceOf(ErrorResponse.class)); + assertTrue("Wrong error message returned in response", response.toString().contains("Command [IXI.unknown] is unknown")); + } +} diff --git a/src/test/java/com/iota/iri/TangleMockUtils.java b/src/test/java/com/iota/iri/TangleMockUtils.java index 1bee23d7fb..49637ab492 100644 --- a/src/test/java/com/iota/iri/TangleMockUtils.java +++ b/src/test/java/com/iota/iri/TangleMockUtils.java @@ -17,25 +17,31 @@ import java.util.*; /** - * Contains utilities that help to mock the retrieval of database entries from the tangle.
    - *
    + * Contains utilities that help to mock the retrieval of database entries from the tangle. + * + *

    * Mocking the tangle allows us to write unit tests that perform much faster than spinning up a new database for every - * test.
    + * test. + *

    */ public class TangleMockUtils { //region [mockMilestone] /////////////////////////////////////////////////////////////////////////////////////////// /** - * Registers a {@link Milestone} in the mocked tangle that can consequently be accessed by the tested classes.
    - *
    + *

    + * Registers a {@link Milestone} in the mocked tangle that can consequently be accessed by the tested classes. + *

    + *

    * It first creates the {@link Milestone} with the given details and then mocks the retrieval methods of the tangle * to return this object. In addition to mocking the specific retrieval method for the given hash, we also mock the * retrieval method for the "latest" entity so the mocked tangle returns the elements in the order that they were - * mocked / created (which allows the mocked tangle to behave just like a normal one).
    - *
    + * mocked / created (which allows the mocked tangle to behave just like a normal one). + *

    + *

    * Note: We return the mocked object which allows us to set additional fields or modify it after "injecting" it into - * the mocked tangle.
    - * + * the mocked tangle. + *

    + * * @param tangle mocked tangle object that shall retrieve a milestone object when being queried for it * @param hash transaction hash of the milestone * @param index milestone index of the milestone @@ -72,7 +78,7 @@ public static List mockValidBundle(Tangle tangle, address = TransactionTestUtils.nextWord(address); Converter.trits(address, trits, TransactionViewModel.ADDRESS_TRINARY_OFFSET); if (tx != null) { - TransactionTestUtils.getTransactionTritsWithTrunkAndBranch(trits, tx.getHash(), Hash.NULL_HASH); + TransactionTestUtils.getTransactionTritsWithTrunkAndBranchTrits(trits, tx.getHash(), Hash.NULL_HASH); } TransactionTestUtils.setLastIndex(trits, lastIndex); TransactionTestUtils.setCurrentIndex(trits, currentIndex--); @@ -111,7 +117,7 @@ public static List mockValidBundle(Tangle tangle, * This transaction is returned when the hash is asked to load in the tangle object * * @param tangle mocked tangle object that shall retrieve a milestone object when being queried for it - * @param hash + * @param hash transaction hash * @return The newly created (empty) transaction */ public static Transaction mockTransaction(Tangle tangle, Hash hash) { diff --git a/src/test/java/com/iota/iri/TransactionTestUtils.java b/src/test/java/com/iota/iri/TransactionTestUtils.java index c9200f4531..fc5b148f08 100644 --- a/src/test/java/com/iota/iri/TransactionTestUtils.java +++ b/src/test/java/com/iota/iri/TransactionTestUtils.java @@ -8,7 +8,6 @@ import com.iota.iri.model.persistables.Transaction; import com.iota.iri.utils.Converter; -import java.util.Arrays; import java.util.Random; import org.apache.commons.lang3.StringUtils; @@ -26,7 +25,13 @@ public class TransactionTestUtils { public static void setCurrentIndex(TransactionViewModel tx, long currentIndex) { setCurrentIndex(tx.trits(), currentIndex); } - + + /** + * Updates the transaction index trits. + * + * @param trits The trits to update + * @param currentIndex The new index to set the transaction to + */ public static void setCurrentIndex(byte[] trits, long currentIndex) { Converter.copyTrits(currentIndex, trits, TransactionViewModel.CURRENT_INDEX_TRINARY_OFFSET, TransactionViewModel.CURRENT_INDEX_TRINARY_SIZE); @@ -42,21 +47,27 @@ public static void setLastIndex(TransactionViewModel tx, long lastIndex) { setLastIndex(tx.trits(), lastIndex); } + /** + * Updates the last transaction index trits. + * + * @param trits The trits to update + * @param currentIndex The new last index to set the transaction to + */ public static void setLastIndex(byte[] trits, long lastIndex) { Converter.copyTrits(lastIndex, trits, TransactionViewModel.LAST_INDEX_TRINARY_OFFSET, TransactionViewModel.LAST_INDEX_TRINARY_SIZE); } /** - * Generates a random transaction with a random hash. + * Generates a transaction with a hash. * Transaction last and current index are set to the index provided. * * @param index The index to set the transaction to - * @return A random transaction which is located on the end of its (nonexistent) bundle + * @return A transaction which is located on the end of its (nonexistent) bundle */ public static TransactionViewModel createBundleHead(int index) { - TransactionViewModel tx = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); + TransactionViewModel tx = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); setLastIndex(tx, index); setCurrentIndex(tx, index); return tx; @@ -71,14 +82,14 @@ public static TransactionViewModel createBundleHead(int index) { * @return A transaction in the same bundle as trunk, with its index 1 below trunk index */ public static TransactionViewModel createTransactionWithTrunkBundleHash(TransactionViewModel trunkTx, Hash branchHash) { - byte[] txTrits = getTransactionWithTrunkAndBranch(trunkTx.getHash(), branchHash); + byte[] txTrits = getTransactionTritsWithTrunkAndBranch(trunkTx.getHash(), branchHash); setCurrentIndex(txTrits, trunkTx.getCurrentIndex() - 1); setLastIndex(txTrits, trunkTx.lastIndex()); System.arraycopy(trunkTx.trits(), TransactionViewModel.BUNDLE_TRINARY_OFFSET, txTrits, TransactionViewModel.BUNDLE_TRINARY_OFFSET, TransactionViewModel.BUNDLE_TRINARY_SIZE); TransactionViewModel tx = new TransactionViewModel( txTrits, - getRandomTransactionHash()); + getTransactionHash()); return tx; } @@ -97,6 +108,13 @@ public static TransactionViewModel createTransactionWithTrytes(String trytes) { return createTransactionFromTrits(trits); } + /** + * Creates a {@link TransactionViewModel} from the supplied trits. + * Trits are not checked for size and content. + * + * @param trits The transaction trits + * @return The transaction + */ public static TransactionViewModel createTransactionFromTrits(byte[] trits) { return new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); } @@ -113,7 +131,7 @@ public static TransactionViewModel createTransactionFromTrits(byte[] trits) { */ public static TransactionViewModel createTransactionWithTrunkAndBranch(String trytes, Hash trunk, Hash branch) { byte[] trits = createTransactionWithTrunkAndBranchTrits(trytes, trunk, branch); - return new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); + return createTransactionFromTrits(trits); } /** @@ -128,19 +146,20 @@ public static TransactionViewModel createTransactionWithTrunkAndBranch(String tr public static byte[] createTransactionWithTrunkAndBranchTrits(String trytes, Hash trunk, Hash branch) { String expandedTrytes = expandTrytes(trytes); byte[] trits = Converter.allocatingTritsFromTrytes(expandedTrytes); - return getTransactionTritsWithTrunkAndBranch(trits, trunk, branch); + return getTransactionTritsWithTrunkAndBranchTrits(trits, trunk, branch); } /** - * Generates random transaction trits with the provided trytes, trunk and hash. + * Generates transaction trits with the provided trytes, trunk and hash. + * No validation is done on the resulting trits, so fields are not valid except trunk and branch. * * @param trunk The trunk transaction hash * @param branch The branch transaction hash * @return The transaction trits */ - public static byte[] getTransactionWithTrunkAndBranch(Hash trunk, Hash branch) { - byte[] trits = new byte[TransactionViewModel.TRINARY_SIZE]; - return getTransactionTritsWithTrunkAndBranch(trits, trunk, branch); + public static byte[] getTransactionTritsWithTrunkAndBranch(Hash trunk, Hash branch) { + byte[] trits = getTransactionTrits(); + return getTransactionTritsWithTrunkAndBranchTrits(trits, trunk, branch); } /** @@ -150,7 +169,7 @@ public static byte[] getTransactionWithTrunkAndBranch(Hash trunk, Hash branch) { * @param branch The branch transaction hash * @return trits The transaction trits */ - public static byte[] getTransactionTritsWithTrunkAndBranch(byte[] trits, Hash trunk, Hash branch) { + public static byte[] getTransactionTritsWithTrunkAndBranchTrits(byte[] trits, Hash trunk, Hash branch) { System.arraycopy(trunk.trits(), 0, trits, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_OFFSET, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_SIZE); System.arraycopy(branch.trits(), 0, trits, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_OFFSET, @@ -184,22 +203,22 @@ else if (chars[i] != 'Z') { } /** - * Generates a random transaction. + * Generates a transaction. * * @return The transaction */ - public static Transaction getRandomTransaction() { - byte[] trits = getRandomTransactionTrits(); + public static Transaction getTransaction() { + byte[] trits = getTransactionTrits(); return buildTransaction(trits); } /** - * Generates random trits for a transaction. + * Generates trits for a transaction. * * @return The transaction trits */ - public static byte[] getRandomTransactionTrits() { - return getRandomTrits(TransactionViewModel.TRINARY_SIZE); + public static byte[] getTransactionTrits() { + return getTrits(TransactionViewModel.TRINARY_SIZE); } /** @@ -209,34 +228,58 @@ public static byte[] getRandomTransactionTrits() { */ public static Transaction get9Transaction() { byte[] trits = new byte[TransactionViewModel.TRINARY_SIZE]; - Arrays.fill(trits, (byte) 0); + for (int i = 0; i < trits.length; i++) { + trits[i] = 0; + } return buildTransaction(trits); } + + + /** + * Generates a transaction with trunk and hash. + * + * @param trunk The trunk transaction hash + * @param branch The branch transaction hash + * @return The transaction + */ + public static Transaction createTransactionWithTrunkAndBranch(Hash trunk, Hash branch) { + byte[] trits = getTrits(TransactionViewModel.TRINARY_SIZE); + getTransactionTritsWithTrunkAndBranchTrits(trits, trunk, branch); + return buildTransaction(trits); + } /** - * Generates random trits for a transaction. + * Generates trits for a hash. * * @return The transaction hash */ - public static Hash getRandomTransactionHash() { - byte[] out = getRandomTrits(Hash.SIZE_IN_TRITS); + public static Hash getTransactionHash() { + byte[] out = getTrits(Hash.SIZE_IN_TRITS); return HashFactory.TRANSACTION.create(out); } /** * Builds a transaction by transforming trits to bytes. + * Make sure the trits are in the correct order (TVM.trits()) * * @param trits The trits to build the transaction * @return The created transaction */ - public static Transaction buildTransaction(byte[] trits) { - Transaction transaction = new Transaction(); + public static Transaction buildTransaction(byte[] trits) { + TransactionViewModel TVM = new TransactionViewModel(trits, Hash.NULL_HASH); + + //Getters obtain and load values from TVM trits ("lazy loading") + TVM.getAddressHash(); + TVM.getTrunkTransactionHash(); + TVM.getBranchTransactionHash(); + TVM.getBundleHash(); + TVM.getTagValue(); + TVM.getObsoleteTagValue(); + TVM.setAttachmentData(); + TVM.setMetadata(); - transaction.bytes = Converter.allocateBytesForTrits(trits.length); - Converter.bytes(trits, 0, transaction.bytes, 0, trits.length); - transaction.readMetadata( transaction.bytes); - return transaction; + return TVM.getTransaction(); } /** @@ -250,12 +293,13 @@ private static String expandTrytes(String trytes) { } /** - * Generates random trits of specified size. + * Generates 'random' trits of specified size. + * Not truly random as we always use the same seed. * * @param size the amount of trits to generate * @return The trits */ - private static byte[] getRandomTrits(int size) { + private static byte[] getTrits(int size) { byte[] out = new byte[size]; for(int i = 0; i < out.length; i++) { diff --git a/src/test/java/com/iota/iri/TransactionValidatorTest.java b/src/test/java/com/iota/iri/TransactionValidatorTest.java index f70c29d516..1ee6f751f6 100644 --- a/src/test/java/com/iota/iri/TransactionValidatorTest.java +++ b/src/test/java/com/iota/iri/TransactionValidatorTest.java @@ -11,12 +11,10 @@ import com.iota.iri.storage.Tangle; import com.iota.iri.storage.rocksDB.RocksDBPersistenceProvider; import com.iota.iri.utils.Converter; -import com.iota.iri.zmq.MessageQ; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; import static com.iota.iri.TransactionTestUtils.*; import static org.junit.Assert.assertFalse; @@ -42,8 +40,7 @@ public static void setUp() throws Exception { dbFolder.getRoot().getAbsolutePath(), logFolder.getRoot().getAbsolutePath(),1000, Tangle.COLUMN_FAMILIES, Tangle.METADATA_COLUMN_FAMILY)); tangle.init(); TipsViewModel tipsViewModel = new TipsViewModel(); - MessageQ messageQ = Mockito.mock(MessageQ.class); - TransactionRequester txRequester = new TransactionRequester(tangle, snapshotProvider, messageQ); + TransactionRequester txRequester = new TransactionRequester(tangle, snapshotProvider); txValidator = new TransactionValidator(tangle, snapshotProvider, tipsViewModel, txRequester); txValidator.setMwm(false, MAINNET_MWM); } @@ -66,20 +63,20 @@ public void testMinMwm() throws InterruptedException { @Test public void validateTrits() { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Converter.copyTrits(0, trits, 0, trits.length); txValidator.validateTrits(trits, MAINNET_MWM); } @Test(expected = RuntimeException.class) public void validateTritsWithInvalidMetadata() { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); txValidator.validateTrits(trits, MAINNET_MWM); } @Test public void validateBytesWithNewCurl() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Converter.copyTrits(0, trits, 0, trits.length); byte[] bytes = Converter.allocateBytesForTrits(trits.length); Converter.bytes(trits, 0, bytes, 0, trits.length); @@ -102,7 +99,7 @@ public void verifyTxIsNotSolid() throws Exception { @Test public void addSolidTransactionWithoutErrors() { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Converter.copyTrits(0, trits, 0, trits.length); txValidator.addSolidTransaction(TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); } @@ -116,7 +113,7 @@ private TransactionViewModel getTxWithBranchAndTrunk() throws Exception { trunkTx = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); branchTx = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); - byte[] childTx = getRandomTransactionTrits(); + byte[] childTx = getTransactionTrits(); System.arraycopy(trunkTx.getHash().trits(), 0, childTx, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_OFFSET, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_SIZE); System.arraycopy(branchTx.getHash().trits(), 0, childTx, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_OFFSET, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_SIZE); tx = new TransactionViewModel(childTx, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, childTx)); @@ -165,25 +162,25 @@ public void testTransactionPropagation() throws Exception { @Test public void testTransactionPropagationFailure() throws Exception { - TransactionViewModel leftChildLeaf = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); + TransactionViewModel leftChildLeaf = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); leftChildLeaf.updateSolid(true); leftChildLeaf.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel rightChildLeaf = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); + TransactionViewModel rightChildLeaf = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); rightChildLeaf.updateSolid(true); rightChildLeaf.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel parent = new TransactionViewModel(getTransactionWithTrunkAndBranch(leftChildLeaf.getHash(), - rightChildLeaf.getHash()), getRandomTransactionHash()); + TransactionViewModel parent = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(leftChildLeaf.getHash(), + rightChildLeaf.getHash()), getTransactionHash()); parent.updateSolid(false); parent.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel parentSibling = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); + TransactionViewModel parentSibling = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); parentSibling.updateSolid(false); parentSibling.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel grandParent = new TransactionViewModel(getTransactionWithTrunkAndBranch(parent.getHash(), - parentSibling.getHash()), getRandomTransactionHash()); + TransactionViewModel grandParent = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(parent.getHash(), + parentSibling.getHash()), getTransactionHash()); grandParent.updateSolid(false); grandParent.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -199,7 +196,7 @@ public void testTransactionPropagationFailure() throws Exception { } private TransactionViewModel getTxWithoutBranchAndTrunk() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); TransactionViewModel tx = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); tx.store(tangle, snapshotProvider.getInitialSnapshot()); diff --git a/src/test/java/com/iota/iri/benchmarks/BenchmarkRunner.java b/src/test/java/com/iota/iri/benchmarks/BenchmarkRunner.java index a5954a2ad6..ef4a22af9c 100644 --- a/src/test/java/com/iota/iri/benchmarks/BenchmarkRunner.java +++ b/src/test/java/com/iota/iri/benchmarks/BenchmarkRunner.java @@ -28,4 +28,19 @@ public void launchDbBenchmarks() throws RunnerException { //possible to do assertions over run results new Runner(opts).run(); } + + @Test + public void launchCryptoBenchmark() throws RunnerException { + Options opts = new OptionsBuilder() + .include(this.getClass().getPackage().getName() + ".crypto") + .mode(Mode.Throughput) + .timeUnit(TimeUnit.SECONDS) + .warmupIterations(5) + .forks(1) + .measurementIterations(10) + .shouldFailOnError(true) + .shouldDoGC(false) + .build(); + new Runner(opts).run(); + } } diff --git a/src/test/java/com/iota/iri/benchmarks/crypto/CurlBenchmark.java b/src/test/java/com/iota/iri/benchmarks/crypto/CurlBenchmark.java new file mode 100644 index 0000000000..341ab3d23a --- /dev/null +++ b/src/test/java/com/iota/iri/benchmarks/crypto/CurlBenchmark.java @@ -0,0 +1,62 @@ +package com.iota.iri.benchmarks.crypto; + +import com.iota.iri.crypto.Curl; +import com.iota.iri.crypto.SpongeFactory; +import com.iota.iri.utils.Converter; +import com.iota.iri.utils.Pair; +import org.junit.Assert; +import org.openjdk.jmh.annotations.Benchmark; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +public class CurlBenchmark { + private final static String TRYTES = "RSWWSFXPQJUBJROQBRQZWZXZJWMUBVIVMHPPTYSNW9YQIQQF9RCSJJCVZG9ZWITXNCSBBDHEEKDRBHVTWCZ9SZOOZHVBPCQNPKTWFNZAWGCZ9QDIMKRVINMIRZBPKRKQAIPGOHBTHTGYXTBJLSURDSPEOJ9UKJECUKCCPVIQQHDUYKVKISCEIEGVOQWRBAYXWGSJUTEVG9RPQLPTKYCRAJ9YNCUMDVDYDQCKRJOAPXCSUDAJGETALJINHEVNAARIPONBWXUOQUFGNOCUSSLYWKOZMZUKLNITZIFXFWQAYVJCVMDTRSHORGNSTKX9Z9DLWNHZSMNOYTU9AUCGYBVIITEPEKIXBCOFCMQPBGXYJKSHPXNUKFTXIJVYRFILAVXEWTUICZCYYPCEHNTK9SLGVL9RLAMYTAEPONCBHDXSEQZOXO9XCFUCPPMKEBR9IEJGQOPPILHFXHMIULJYXZJASQEGCQDVYFOM9ETXAGVMSCHHQLFPATWOSMZIDL9AHMSDCE9UENACG9OVFAEIPPQYBCLXDMXXA9UBJFQQBCYKETPNKHNOUKCSSYLWZDLKUARXNVKKKHNRBVSTVKQCZL9RY9BDTDTPUTFUBGRMSTOTXLWUHDMSGYRDSZLIPGQXIDMNCNBOAOI9WFUCXSRLJFIVTIPIAZUK9EDUJJ9B9YCJEZQQELLHVCWDNRH9FUXDGZRGOVXGOKORTCQQA9JXNROLETYCNLRMBGXBL9DQKMOAZCBJGWLNJLGRSTYBKLGFVRUF9QOPZVQFGMDJA9TBVGFJDBAHEVOLW9GNU9NICLCQJBOAJBAHHBZJGOFUCQMBGYQLCWNKSZPPBQMSJTJLM9GXOZHTNDLGIRCSIJAZTENQVQDHFSOQM9WVNWQQJNOPZMEISSCLOADMRNWALBBSLSWNCTOSNHNLWZBVCFIOGFPCPRKQSRGKFXGTWUSCPZSKQNLQJGKDLOXSBJMEHQPDZGSENUKWAHRNONDTBLHNAKGLOMCFYRCGMDOVANPFHMQRFCZIQHCGVORJJNYMTORDKPJPLA9LWAKAWXLIFEVLKHRKCDG9QPQCPGVKIVBENQJTJGZKFTNZHIMQISVBNLHAYSSVJKTIELGTETKPVRQXNAPWOBGQGFRMMK9UQDWJHSQMYQQTCBMVQKUVGJEAGTEQDN9TCRRAZHDPSPIYVNKPGJSJZASZQBM9WXEDWGAOQPPZFLAMZLEZGXPYSOJRWL9ZH9NOJTUKXNTCRRDO9GKULXBAVDRIZBOKJYVJUSHIX9F9O9ACYCAHUKBIEPVZWVJAJGSDQNZNWLIWVSKFJUMOYDMVUFLUXT9CEQEVRFBJVPCTJQCORM9JHLYFSMUVMFDXZFNCUFZZIKREIUIHUSHRPPOUKGFKWX9COXBAZMQBBFRFIBGEAVKBWKNTBMLPHLOUYOXPIQIZQWGOVUWQABTJT9ZZPNBABQFYRCQLXDHDEX9PULVTCQLWPTJLRSVZQEEYVBVY9KCNEZXQLEGADSTJBYOXEVGVTUFKNCNWMEDKDUMTKCMRPGKDCCBDHDVVSMPOPUBZOMZTXJSQNVVGXNPPBVSBL9WWXWQNMHRMQFEQYKWNCSW9URI9FYPT9UZMAFMMGUKFYTWPCQKVJ9DIHRJFMXRZUGI9TMTFUQHGXNBITDSORZORQIAMKY9VRYKLEHNRNFSEFBHF9KXIQAEZEJNQOENJVMWLMHI9GNZPXYUIFAJIVCLAGKUZIKTJKGNQVTXJORWIQDHUPBBPPYOUPFAABBVMMYATXERQHPECDVYGWDGXFJKOMOBXKRZD9MCQ9LGDGGGMYGUAFGMQTUHZOAPLKPNPCIKUNEMQIZOCM9COAOMZSJ9GVWZBZYXMCNALENZ9PRYMHENPWGKX9ULUIGJUJRKFJPBTTHCRZQKEAHT9DC9GSWQEGDTZFHACZMLFYDVOWZADBNMEM9XXEOMHCNJMDSUAJRQTBUWKJF9RZHK9ACGUNI9URFIHLXBXCEODONPXBSCWP9WNAEYNALKQHGULUQGAFL9LB9NBLLCACLQFGQMXRHGBTMI9YKAJKVELRWWKJAPKMSYMJTDYMZ9PJEEYIRXRMMFLRSFSHIXUL9NEJABLRUGHJFL9RASMSKOI9VCFRZ9GWTMODUUESIJBHWWHZYCLDENBFSJQPIOYC9MBGOOXSWEMLVU9L9WJXKZKVDBDMFSVHHISSSNILUMWULMVMESQUIHDGBDXROXGH9MTNFSLWJZRAPOKKRGXAAQBFPYPAAXLSTMNSNDTTJQSDQORNJS9BBGQ9KQJZYPAQ9JYQZJ9B9KQDAXUACZWRUNGMBOQLQZUHFNCKVQGORRZGAHES9PWJUKZWUJSBMNZFILBNBQQKLXITCTQDDBV9UDAOQOUPWMXTXWFWVMCXIXLRMRWMAYYQJPCEAAOFEOGZQMEDAGYGCTKUJBS9AGEXJAFHWWDZRYEN9DN9HVCMLFURISLYSWKXHJKXMHUWZXUQARMYPGKRKQMHVR9JEYXJRPNZINYNCGZHHUNHBAIJHLYZIZGGIDFWVNXZQADLEDJFTIUTQWCQSX9QNGUZXGXJYUUTFSZPQKXBA9DFRQRLTLUJENKESDGTZRGRSLTNYTITXRXRGVLWBTEWPJXZYLGHLQBAVYVOSABIVTQYQM9FIQKCBRRUEMVVTMERLWOK"; + private final static String HASH = "TIXEPIEYMGURTQ9ABVYVQSWMNGCVQFASMFAEQWUZCLIWLCDIGYVXOEJBBEMZOIHAYSUQMEFOGZBXUMHQW"; + + /** + * Benchmark absorb and squeeze methods of Curl 81 hash function. + */ + @Benchmark + public void curl() { + int size = 8019; + byte[] in_trits = new byte[size]; + byte[] hash_trits = new byte[Curl.HASH_LENGTH]; + Converter.trits(TRYTES, in_trits, 0); + Curl curl = (Curl) SpongeFactory.create(SpongeFactory.Mode.CURLP81); + curl.absorb(in_trits, 0, in_trits.length); + curl.squeeze(hash_trits, 0, Curl.HASH_LENGTH); + String out_trytes = Converter.trytes(hash_trits); + Assert.assertEquals(HASH, out_trytes); + } + + /** + * Benchmark absorb and squeeze methods of pair Curl 81 hash function. + */ + @Benchmark + public void pairCurl() throws NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException { + int size = 8019; + byte[] in_trits = new byte[size]; + Pair hashPair = new Pair<>(new long[Curl.HASH_LENGTH], new long[Curl.HASH_LENGTH]); + Converter.trits(TRYTES, in_trits, 0); + // Using reflection to benchmark private, non-production code. + // Reflection doesn't have impact on benchmark result (this has been tested) + // Please remove this code when method are public + Class curlClass = Curl.class; + Constructor curlConstructor = curlClass.getDeclaredConstructor(boolean.class, SpongeFactory.Mode.class); + curlConstructor.setAccessible(true); + Curl curl = curlConstructor.newInstance(true, SpongeFactory.Mode.CURLP81); + Method pairAbsorb = curlClass.getDeclaredMethod("absorb", Pair.class, int.class, int.class); + Method pairSqueeze = curlClass.getDeclaredMethod("squeeze", Pair.class, int.class, int.class); + pairAbsorb.setAccessible(true); + pairSqueeze.setAccessible(true); + + pairAbsorb.invoke(curl, Converter.longPair(in_trits), 0, in_trits.length); + pairSqueeze.invoke(curl, hashPair, 0, Curl.HASH_LENGTH); + byte[] hash_trits = Converter.trits(hashPair.low, hashPair.hi); + String out_trytes = Converter.trytes(hash_trits); + Assert.assertEquals(HASH, out_trytes); + } + +} diff --git a/src/test/java/com/iota/iri/benchmarks/crypto/PearlDiverBenchmark.java b/src/test/java/com/iota/iri/benchmarks/crypto/PearlDiverBenchmark.java new file mode 100644 index 0000000000..9d36eb1ed3 --- /dev/null +++ b/src/test/java/com/iota/iri/benchmarks/crypto/PearlDiverBenchmark.java @@ -0,0 +1,23 @@ +package com.iota.iri.benchmarks.crypto; + +import com.iota.iri.crypto.PearlDiver; +import com.iota.iri.utils.Converter; +import org.openjdk.jmh.annotations.Benchmark; + +public class PearlDiverBenchmark { + private static final int MIN_WEIGHT_MAGNITUDE = 9; + private static final int NUM_CORES = -1; // use n-1 cores + private static final String TRYTES = "ZMGYJPAAYHGRDGKCUEAIZDERGWUJH9QIHEESTUAZIQDQGMREKOVZCQRKHUZPXQ9PPIBGARFTZBZCYGAWIEUGPMIEEMKVSLCCJPITK9A9VLHCWLRZTRYDPGPFGEEFMVMVQGGM9NFPEKLVQHMOPDTJHDIEHBFGBBFOHPLHX9RJVEIQC9WAOFPHP9GGFRYCJEX9UVUGVJLBCBNQCLKSLULGZUHCVUJZMILSROKHYAA9RLF9XQXKXCULHJIUGXDWBKJLGLHXXR9BNKDKG9JMJCTAGMKWGXPPKVBVNROUADXHUJWYDWZDSIUIVUTKKBTVJ9CMKWVFMCYMCVFMDPLQ9YFIHUMRTBZLIT9LIQRFXF9AKDMTXMHKCSFQDJIFQRFBHSIKVKQBUKFDPOLWQXBUZKNGGHJW9IVLG9GTIW9IRWGCM9A9YT99JUDKQQLWBFXGYVDYSRYSAWVRBFXIKMQRGCIHLWOUTKFWWDFRQZMCAUXSSK9LWQRCNNCXBJOIKVMLTMRENT9YLUOHACKHYYUERSBKWSCJTCJBCDCARABPVNNKIXXSVLELPILCTRQCEGYITPVODUMUEYFRJBOKAVVIFURHZO9SYVHZGZZIRYGYJDCOWIEEZNBDBHMVDFCGDCYSJFEGCYDIGTPTAZTM9MJXKCCRGJQTXTQYOGLGBAWAUAPVMJAAUB9QOOO9FPQVCQVZQLAICIWUXZAVVRZJFKMXSJEL9KGDSGPKEIZHDXQWZFYWPLVOSSOAETKPMSDDQEZDOCNCOJCEWLSA9SSQODHAEUEERDCOTSHNEOVGCFGXQRRIVYJQUUMRVCAEMMPFYZAEWRFVBNWSGTGEWZUCOWKMEIEAGXCHPVTABBXUEWCLHTEOJZ9JPRQP9CCJQMUNQYMMHCVPSABZ9XVRLWFOMYOGVVYSYMYWCXQFUBHUVPONPSZUCYAWHWOEGAFCUXUWBRTBTYWVXNSSIZ9LHLX9UNN9FUJDUVBNLJSD9OG9FACSRNELL9DMTYITWDZPOHZSUSNUWJ9CEKQOXOXVZJVZZPTWJIFXXXRAVTDLLETTKPGNLWQCWERXFRZOPBZTDWNURWXDFKKP9SIG9IPGUSEHAGPQTCRAMMMPUWPRQKJDAKFUTR9YVVCXJQC9ZWZBOEAQISNTDHUQRKCXASYNZLQIJMZMMO9TWTHPMJIJKPTU9DMMCPYXOSPNSDQPID9YSIOFDNIJBQUEB9JTSRTWLMXUP9WIQFSZNINQLNS9NJWTLDLZYABHFKCZOBHQQQVWWVTWCKMFSYVPZHRHNJZWWFUHMCHYKLMCBXGAVRJSYLXOSYUHHTB9VMQT9NYPRDVAWWYREFVGFEJZKGPVEOQXWZD9LQNUFTREMBT9NETURCGYBWLGNMUPDO9ZSSLZJR9AEUPYLGEEIKUTHTCIXHSBCEKATIEQYBTYACTFWUJQCCTYHNTMFXHZKEFIEOXFQTKYDY9BDSLS9HXC9ANNQONFKIBZHREPYLSZWCR99HBYTLLISHGDXNLSRKNBSFCPHAFEUYLQWISLWOZRKKP9GSYFMXUBXJLMARNGJHORAVNZVDBKAZESOBFVBFSOZQGZ9IOJGCDUPYRDGEJVOPNLEFXTNXUBDSEDIYLUQBTDJONKRMBUFWYIJINZGIZOVPRKCTYFILHJPGAUSS9QPANDZQFXWTSLLEMDXMKZYWTIRUZIXWMJRRIUINDORRFTNSHASDZRGADSIRZYQAGJUVDCCRWOCMKRL9IPEEUJKLZMGLBSLXFVEHT9LWIKZYMNZUIQMJBRJETVQVWIENTSF9HLAOMKRNHHTRTPFHGWYVODZMNFPJRHLJXTKXWSSWXSTILWWJHQFFZLIZZGVAMZ9AGTM9RXQRZENESFBYZDJAQQFEHIYWYNKNYIXIRFGMMMXUXQ9TMCHIMATYHNBAVGKOXLIIVPOEFLUJCYURXQLEXSBKCVGCPOZGTVFZVSMOE9DEHTYVT9BDXHEIZFAQFZGSEDQCJAMZTWTLACGTTQKFDWFH9DLTOSCPGFJVKEFOCDLNDOKJK9MNUEFXMOLXMXBMAYO9QZOJEYWPPBOFZMJQTLULEGEPECTHHCAOHSHLIIBNIELLLXIGMUPFLCEBHUSQ9ZEYGXVDOSZTECCJHATQODTZNLPGUVOXYZTHAQELTYYTTJTICZYRGLRROICEKMWXDEUCKFRKOQFCMPITOMWVPYGPMLVWNQAV99SYHJYZJTKXQICGYHKQJ9QAOFACDNHRYYHDMCSHPLMVLWEXUEZDDTQJUBMBVJCO9RNASPTYVHVLHAYYGYUCRBWZZESBHEUEZIHJDNOVOFCEAZTKDXACSUPBFYCIRDVPPJTSWHM9BAVSPPGMBVZKIJBAGWZEIIUNUYOHEVEO9FYMJZXJFLXXIYFBIZYKTHNQWBJYHJPTYJPPUWDBQWJAWEYSGSFVLQYIB9TNYUSNWVVJPFVDQUONBBPQJINWXAKXHFSUQKYJBFGXCQWT9TYHMAFIWQPQI9IBEMDCRIFOZN9KFGBGH99ZSKFTLOWMNYJDBREEYQBVSMZPDVGRYNDRPERXXISDEYTLJBNTJSVTSSTMHG9HCC9PIAHWZAMDGRMZFNQKEJCW9NBFRTNRRXTOTUAJS9DKRAUZWCIUYXTUHYT9SJDSFRWGCPFOBUHHNXMWNLJJRLGMVMBISRI"; + + /** + * Benchmark Pearl Diver search function. + */ + @Benchmark + public void search() { + PearlDiver pearlDiver = new PearlDiver(); + byte[] myTrits = Converter.allocateTritsForTrytes(TRYTES.length()); + Converter.trits(TRYTES, myTrits, 0); + pearlDiver.search(myTrits, MIN_WEIGHT_MAGNITUDE, NUM_CORES); + } + +} diff --git a/src/test/java/com/iota/iri/conf/ConfigTest.java b/src/test/java/com/iota/iri/conf/ConfigTest.java index f865147f8e..c341615db0 100644 --- a/src/test/java/com/iota/iri/conf/ConfigTest.java +++ b/src/test/java/com/iota/iri/conf/ConfigTest.java @@ -19,8 +19,11 @@ import java.io.IOException; import java.io.Writer; import java.lang.reflect.Method; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collection; +import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -51,7 +54,7 @@ public static void tearDownAfterClass() throws IOException { Test that iterates over common configs. It also attempts to check different types of types (double, boolean, string) */ @Test - public void testArgsParsingMainnet() { + public void testArgsParsingMainnet() throws UnknownHostException { String[] args = { "-p", "14000", "-u", "13000", @@ -59,6 +62,7 @@ public void testArgsParsingMainnet() { "-n", "udp://neighbor1 neighbor, tcp://neighbor2", "--api-host", "1.1.1.1", "--remote-limit-api", "call1 call2, call3", + "--remote-trusted-api-hosts", "192.168.0.55, 10.0.0.10", "--max-find-transactions", "500", "--max-requests-list", "1000", "--max-get-trytes", "4000", @@ -72,11 +76,11 @@ public void testArgsParsingMainnet() { "--ixi-dir", "/ixi", "--db-path", "/db", "--db-log-path", "/dblog", - "--zmq-enabled", + "--zmq-enabled", "true", //we ignore this on mainnet "--mwm", "4", "--testnet-coordinator", "TTTTTTTTT", - "--test-no-coo-validation", + "--test-no-coo-validation", "true", //this should be ignored everywhere "--fake-config" }; @@ -92,6 +96,13 @@ public void testArgsParsingMainnet() { Assert.assertEquals("api host", "1.1.1.1", iotaConfig.getApiHost()); Assert.assertEquals("remote limit api", Arrays.asList("call1", "call2", "call3"), iotaConfig.getRemoteLimitApi()); + + List expectedTrustedApiHosts = Arrays.asList( + InetAddress.getByName("192.168.0.55"), + InetAddress.getByName("10.0.0.10"), + InetAddress.getByName("127.0.0.1")); + Assert.assertEquals("remote trusted api hosts", expectedTrustedApiHosts, iotaConfig.getRemoteTrustedApiHosts()); + Assert.assertEquals("max find transactions", 500, iotaConfig.getMaxFindTransactions()); Assert.assertEquals("max requests list", 1000, iotaConfig.getMaxRequestsList()); Assert.assertEquals("max get trytes", 4000, iotaConfig.getMaxGetTrytes()); @@ -102,7 +113,7 @@ public void testArgsParsingMainnet() { Assert.assertEquals("max peers", 10, iotaConfig.getMaxPeers()); Assert.assertEquals("dns refresher", false, iotaConfig.isDnsRefresherEnabled()); Assert.assertEquals("dns resolution", false, iotaConfig.isDnsResolutionEnabled()); - Assert.assertEquals("tip solidification", true, iotaConfig.isTipSolidifierEnabled()); + Assert.assertEquals("tip solidification", false, iotaConfig.isTipSolidifierEnabled()); Assert.assertEquals("ixi-dir", "/ixi", iotaConfig.getIxiDir()); Assert.assertEquals("db path", "/db", iotaConfig.getDbPath()); Assert.assertEquals("zmq enabled", true, iotaConfig.isZmqEnabled()); @@ -115,7 +126,7 @@ public void testArgsParsingMainnet() { @Test public void testRemoteFlag() { - String[] args = {"--remote"}; + String[] args = {"--remote", "true"}; IotaConfig iotaConfig = ConfigFactory.createIotaConfig(false); iotaConfig.parseConfigFromArgs(args); Assert.assertEquals("The api interface should be open to the public", "0.0.0.0", iotaConfig.getApiHost()); @@ -144,11 +155,11 @@ public void testArgsParsingTestnet() { "--ixi-dir", "/ixi", "--db-path", "/db", "--db-log-path", "/dblog", - "--zmq-enabled", + "--zmq-enabled", "true", //we ignore this on mainnet "--mwm", "4", "--testnet-coordinator", "TTTTTTTTT", - "--testnet-no-coo-validation", + "--testnet-no-coo-validation", "true", //this should be ignored everywhere "--fake-config" }; @@ -190,6 +201,7 @@ public void testIniParsingMainnet() throws Exception { .append("[IRI]").append(System.lineSeparator()) .append("PORT = 17000").append(System.lineSeparator()) .append("NEIGHBORS = udp://neighbor1 neighbor, tcp://neighbor2").append(System.lineSeparator()) + .append("REMOTE_TRUSTED_API_HOSTS = 192.168.0.55, 10.0.0.10").append(System.lineSeparator()) .append("ZMQ_ENABLED = true").append(System.lineSeparator()) .append("P_REMOVE_REQUEST = 0.4").append(System.lineSeparator()) .append("MWM = 4").append(System.lineSeparator()) @@ -206,6 +218,13 @@ public void testIniParsingMainnet() throws Exception { Assert.assertEquals("PORT", 17000, iotaConfig.getPort()); Assert.assertEquals("NEIGHBORS", Arrays.asList("udp://neighbor1", "neighbor", "tcp://neighbor2"), iotaConfig.getNeighbors()); + + List expectedTrustedApiHosts = Arrays.asList( + InetAddress.getByName("192.168.0.55"), + InetAddress.getByName("10.0.0.10"), + BaseIotaConfig.Defaults.REMOTE_LIMIT_API_DEFAULT_HOST); + Assert.assertEquals("REMOTE_TRUSTED_API_HOSTS", expectedTrustedApiHosts, iotaConfig.getRemoteTrustedApiHosts()); + Assert.assertEquals("ZMQ_ENABLED", true, iotaConfig.isZmqEnabled()); Assert.assertEquals("P_REMOVE_REQUEST", 0.4d, iotaConfig.getpRemoveRequest(), 0); Assert.assertNotEquals("MWM", 4, iotaConfig.getMwm()); diff --git a/src/test/java/com/iota/iri/conf/ZMQConfigTest.java b/src/test/java/com/iota/iri/conf/ZMQConfigTest.java new file mode 100644 index 0000000000..e2caee1feb --- /dev/null +++ b/src/test/java/com/iota/iri/conf/ZMQConfigTest.java @@ -0,0 +1,88 @@ +package com.iota.iri.conf; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class ZMQConfigTest { + + @Test + public void isZmqEnabledLegacy() { + String[] args = { + "--zmq-enabled", "true", + }; + IotaConfig config = ConfigFactory.createIotaConfig(false); + config.parseConfigFromArgs(args); + assertTrue("ZMQ must be globally enabled", config.isZmqEnabled()); + assertTrue("ZMQ TCP must be enabled", config.isZmqEnableTcp()); + assertTrue("ZMQ IPC must be enabled", config.isZmqEnableIpc()); + } + + @Test + public void isZmqEnabled() { + String[] args = { + "--zmq-enable-tcp", "true", + "--zmq-enable-ipc", "true", + }; + IotaConfig config = ConfigFactory.createIotaConfig(false); + config.parseConfigFromArgs(args); + assertTrue("ZMQ must be globally enabled", config.isZmqEnabled()); + assertTrue("ZMQ TCP must be enabled", config.isZmqEnableTcp()); + assertTrue("ZMQ IPC must be enabled", config.isZmqEnableIpc()); + } + + @Test + public void isZmqEnableTcp() { + String[] args = { + "--zmq-enable-tcp", "true" + }; + IotaConfig config = ConfigFactory.createIotaConfig(false); + config.parseConfigFromArgs(args); + assertEquals("ZMQ port must be the default port", 5556, config.getZmqPort()); + assertTrue("ZMQ TCP must be enabled", config.isZmqEnableTcp()); + } + + @Test + public void isZmqEnableIpc() { + String[] args = { + "--zmq-enable-ipc", "true" + }; + IotaConfig config = ConfigFactory.createIotaConfig(false); + config.parseConfigFromArgs(args); + assertEquals("ZMQ ipc must be the default ipc", "ipc://iri", config.getZmqIpc()); + assertTrue("ZMQ IPC must be enabled", config.isZmqEnableIpc()); + } + + @Test + public void getZmqPort() { + String[] args = { + "--zmq-port", "8899" + }; + IotaConfig config = ConfigFactory.createIotaConfig(false); + config.parseConfigFromArgs(args); + assertTrue("ZMQ TCP must be enabled", config.isZmqEnableTcp()); + assertEquals("ZMQ port must be overridden", 8899, config.getZmqPort()); + } + + @Test + public void getZmqThreads() { + String[] args = { + "--zmq-threads", "5" + }; + IotaConfig config = ConfigFactory.createIotaConfig(false); + config.parseConfigFromArgs(args); + assertEquals("ZMQ threads must be overridden", 5, config.getZmqThreads()); + } + + @Test + public void getZmqIpc() { + String[] args = { + "--zmq-ipc", "ipc://test" + }; + IotaConfig config = ConfigFactory.createIotaConfig(false); + config.parseConfigFromArgs(args); + assertTrue("ZMQ IPC must be enabled", config.isZmqEnableIpc()); + assertEquals("ZMQ ipc must be overridden", "ipc://test", config.getZmqIpc()); + } +} \ No newline at end of file diff --git a/src/test/java/com/iota/iri/controllers/BundleViewModelTest.java b/src/test/java/com/iota/iri/controllers/BundleViewModelTest.java index f50f09583b..eeb99aa519 100644 --- a/src/test/java/com/iota/iri/controllers/BundleViewModelTest.java +++ b/src/test/java/com/iota/iri/controllers/BundleViewModelTest.java @@ -13,7 +13,7 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionTrits; +import static com.iota.iri.TransactionTestUtils.getTransactionTrits; public class BundleViewModelTest { private static final TemporaryFolder dbFolder = new TemporaryFolder(); @@ -71,7 +71,7 @@ public void getTail() throws Exception { @Test public void firstShouldFindTx() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); TransactionViewModel transactionViewModel = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); diff --git a/src/test/java/com/iota/iri/controllers/TipsViewModelTest.java b/src/test/java/com/iota/iri/controllers/TipsViewModelTest.java index 0be38f4b5d..b5420df844 100644 --- a/src/test/java/com/iota/iri/controllers/TipsViewModelTest.java +++ b/src/test/java/com/iota/iri/controllers/TipsViewModelTest.java @@ -8,7 +8,7 @@ import java.util.concurrent.ExecutionException; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; import static org.junit.Assert.*; @@ -79,7 +79,7 @@ public void nonsolidCapacityLimited() throws ExecutionException, InterruptedExce int capacity = TipsViewModel.MAX_TIPS; //fill tips list for (int i = 0; i < capacity * 2 ; i++) { - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); tipsVM.addTipHash(hash); } //check that limit wasn't breached @@ -92,7 +92,7 @@ public void solidCapacityLimited() throws ExecutionException, InterruptedExcepti int capacity = TipsViewModel.MAX_TIPS; //fill tips list for (int i = 0; i < capacity * 2 ; i++) { - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); tipsVM.addTipHash(hash); tipsVM.setSolid(hash); } @@ -106,7 +106,7 @@ public void totalCapacityLimited() throws ExecutionException, InterruptedExcepti int capacity = TipsViewModel.MAX_TIPS; //fill tips list for (int i = 0; i <= capacity * 4; i++) { - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); tipsVM.addTipHash(hash); if (i % 2 == 1) { tipsVM.setSolid(hash); diff --git a/src/test/java/com/iota/iri/controllers/TransactionViewModelTest.java b/src/test/java/com/iota/iri/controllers/TransactionViewModelTest.java index 4d059f1201..720316e842 100644 --- a/src/test/java/com/iota/iri/controllers/TransactionViewModelTest.java +++ b/src/test/java/com/iota/iri/controllers/TransactionViewModelTest.java @@ -23,9 +23,9 @@ import java.util.Random; import java.util.Set; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionTrits; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; -import static com.iota.iri.TransactionTestUtils.getTransactionWithTrunkAndBranch; +import static com.iota.iri.TransactionTestUtils.getTransactionTrits; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionTritsWithTrunkAndBranch; import static org.junit.Assert.*; @@ -77,17 +77,17 @@ public void getApprovers() throws Exception { TransactionViewModel transactionViewModel, otherTxVM, trunkTx, branchTx; - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); trunkTx = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); branchTx = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); - byte[] childTx = getRandomTransactionTrits(); + byte[] childTx = getTransactionTrits(); System.arraycopy(trunkTx.getHash().trits(), 0, childTx, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_OFFSET, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_SIZE); System.arraycopy(branchTx.getHash().trits(), 0, childTx, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_OFFSET, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_SIZE); transactionViewModel = new TransactionViewModel(childTx, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, childTx)); - childTx = getRandomTransactionTrits(); + childTx = getTransactionTrits(); System.arraycopy(trunkTx.getHash().trits(), 0, childTx, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_OFFSET, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_SIZE); System.arraycopy(branchTx.getHash().trits(), 0, childTx, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_OFFSET, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_SIZE); otherTxVM = new TransactionViewModel(childTx, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, childTx)); @@ -120,13 +120,13 @@ public void update() throws Exception { public void trits() throws Exception { byte[] blanks = new byte[13]; for(int i=0; i++ < 1000;) { - byte[] trits = getRandomTransactionTrits(), searchTrits; + byte[] trits = getTransactionTrits(), searchTrits; System.arraycopy(new byte[TransactionViewModel.VALUE_TRINARY_SIZE], 0, trits, TransactionViewModel.VALUE_TRINARY_OFFSET, TransactionViewModel.VALUE_TRINARY_SIZE); Converter.copyTrits(seed.nextLong(), trits, TransactionViewModel.VALUE_TRINARY_OFFSET, TransactionViewModel.VALUE_USABLE_TRINARY_SIZE); System.arraycopy(blanks, 0, trits, TransactionViewModel.TRUNK_TRANSACTION_TRINARY_OFFSET-blanks.length, blanks.length); System.arraycopy(blanks, 0, trits, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_OFFSET-blanks.length, blanks.length); System.arraycopy(blanks, 0, trits, TransactionViewModel.BRANCH_TRANSACTION_TRINARY_OFFSET + TransactionViewModel.BRANCH_TRANSACTION_TRINARY_SIZE-blanks.length, blanks.length); - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); TransactionViewModel transactionViewModel = new TransactionViewModel(trits, hash); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); assertArrayEquals(transactionViewModel.trits(), TransactionViewModel.fromHash(tangle, transactionViewModel.getHash()).trits()); @@ -136,10 +136,10 @@ public void trits() throws Exception { @Test public void getBytes() throws Exception { for(int i=0; i++ < 1000;) { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); System.arraycopy(new byte[TransactionViewModel.VALUE_TRINARY_SIZE], 0, trits, TransactionViewModel.VALUE_TRINARY_OFFSET, TransactionViewModel.VALUE_TRINARY_SIZE); Converter.copyTrits(seed.nextLong(), trits, TransactionViewModel.VALUE_TRINARY_OFFSET, TransactionViewModel.VALUE_USABLE_TRINARY_SIZE); - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); TransactionViewModel transactionViewModel = new TransactionViewModel(trits, hash); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); assertArrayEquals(transactionViewModel.getBytes(), TransactionViewModel.fromHash(tangle, transactionViewModel.getHash()).getBytes()); @@ -295,13 +295,13 @@ public void getArrivalTime() throws Exception { public void updateHeightShouldWork() throws Exception { int count = 4; TransactionViewModel[] transactionViewModels = new TransactionViewModel[count]; - Hash hash = getRandomTransactionHash(); - transactionViewModels[0] = new TransactionViewModel(getTransactionWithTrunkAndBranch(Hash.NULL_HASH, + Hash hash = getTransactionHash(); + transactionViewModels[0] = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(Hash.NULL_HASH, Hash.NULL_HASH), hash); transactionViewModels[0].store(tangle, snapshotProvider.getInitialSnapshot()); for(int i = 0; ++i < count; ) { - transactionViewModels[i] = new TransactionViewModel(getTransactionWithTrunkAndBranch(hash, - Hash.NULL_HASH), hash = getRandomTransactionHash()); + transactionViewModels[i] = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hash, + Hash.NULL_HASH), hash = getTransactionHash()); transactionViewModels[i].store(tangle, snapshotProvider.getInitialSnapshot()); } @@ -316,10 +316,10 @@ public void updateHeightShouldWork() throws Exception { public void updateHeightPrefilledSlotShouldFail() throws Exception { int count = 4; TransactionViewModel[] transactionViewModels = new TransactionViewModel[count]; - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); for(int i = 0; ++i < count; ) { - transactionViewModels[i] = new TransactionViewModel(getTransactionWithTrunkAndBranch(hash, - Hash.NULL_HASH), hash = getRandomTransactionHash()); + transactionViewModels[i] = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hash, + Hash.NULL_HASH), hash = getTransactionHash()); transactionViewModels[i].store(tangle, snapshotProvider.getInitialSnapshot()); } @@ -332,7 +332,7 @@ public void updateHeightPrefilledSlotShouldFail() throws Exception { @Test public void findShouldBeSuccessful() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); TransactionViewModel transactionViewModel = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); Hash hash = transactionViewModel.getHash(); @@ -343,9 +343,9 @@ public void findShouldBeSuccessful() throws Exception { @Test public void findShouldReturnNull() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); TransactionViewModel transactionViewModel = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); - trits = getRandomTransactionTrits(); + trits = getTransactionTrits(); TransactionViewModel transactionViewModelNoSave = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); Hash hash = transactionViewModelNoSave.getHash(); @@ -358,7 +358,7 @@ public void testManyTXInDB() throws Exception { int i, j; LinkedList hashes = new LinkedList<>(); Hash hash; - hash = getRandomTransactionHash(); + hash = getTransactionHash(); hashes.add(hash); long start, diff, diffget; long subSumDiff=0,maxdiff=0, sumdiff = 0; @@ -366,13 +366,13 @@ public void testManyTXInDB() throws Exception { int interval1 = 50; int interval = interval1*10; log.info("Starting Test. #TX: {}", TransactionViewModel.getNumberOfStoredTransactions(tangle)); - new TransactionViewModel(getTransactionWithTrunkAndBranch(Hash.NULL_HASH, Hash.NULL_HASH), hash).store(tangle, snapshotProvider.getInitialSnapshot()); + new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(Hash.NULL_HASH, Hash.NULL_HASH), hash).store(tangle, snapshotProvider.getInitialSnapshot()); TransactionViewModel transactionViewModel; boolean pop = false; for (i = 0; i++ < max;) { - hash = getRandomTransactionHash(); + hash = getTransactionHash(); j = hashes.size(); - transactionViewModel = new TransactionViewModel(getTransactionWithTrunkAndBranch(hashes.get(seed.nextInt(j)), hashes.get(seed.nextInt(j))), hash); + transactionViewModel = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hashes.get(seed.nextInt(j)), hashes.get(seed.nextInt(j))), hash); start = System.nanoTime(); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); diff = System.nanoTime() - start; @@ -408,7 +408,7 @@ public void testManyTXInDB() throws Exception { @Test public void firstShouldFindTx() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); TransactionViewModel transactionViewModel = new TransactionViewModel(trits, TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits)); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); diff --git a/src/test/java/com/iota/iri/crypto/PearlDiverTest.java b/src/test/java/com/iota/iri/crypto/PearlDiverTest.java index 68b2655b63..f68fabdfbb 100644 --- a/src/test/java/com/iota/iri/crypto/PearlDiverTest.java +++ b/src/test/java/com/iota/iri/crypto/PearlDiverTest.java @@ -55,7 +55,7 @@ public void testInvalidTritsLength() { @Ignore("to test pearlDiver iteratively") public void testNoRandomFail() { for (int i = 0; i < 10000; i++) { - byte[] trits = TransactionTestUtils.getRandomTransactionTrits(); + byte[] trits = TransactionTestUtils.getTransactionTrits(); pearlDiver.search(trits, MIN_WEIGHT_MAGNITUDE, NUM_CORES); Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits); for (int j = Hash.SIZE_IN_TRITS - 1; j > Hash.SIZE_IN_TRITS - MIN_WEIGHT_MAGNITUDE; j--) { diff --git a/src/test/java/com/iota/iri/model/HashTest.java b/src/test/java/com/iota/iri/model/HashTest.java index bdbb561916..17e854d64c 100644 --- a/src/test/java/com/iota/iri/model/HashTest.java +++ b/src/test/java/com/iota/iri/model/HashTest.java @@ -4,7 +4,7 @@ import com.iota.iri.crypto.SpongeFactory; import com.iota.iri.utils.Converter; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionTrits; +import static com.iota.iri.TransactionTestUtils.getTransactionTrits; import org.junit.Assert; import org.junit.Test; @@ -14,7 +14,7 @@ public class HashTest { @Test public void calculate() throws Exception { - Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, getRandomTransactionTrits()); + Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, getTransactionTrits()); Assert.assertNotEquals(0, hash.hashCode()); Assert.assertNotEquals(null, hash.bytes()); Assert.assertNotEquals(null, hash.trits()); @@ -22,7 +22,7 @@ public void calculate() throws Exception { @Test public void calculate1() throws Exception { - Hash hash = TransactionHash.calculate(getRandomTransactionTrits(), 0, 729, SpongeFactory.create(SpongeFactory.Mode.CURLP81)); + Hash hash = TransactionHash.calculate(getTransactionTrits(), 0, 729, SpongeFactory.create(SpongeFactory.Mode.CURLP81)); Assert.assertNotEquals(null, hash.bytes()); Assert.assertNotEquals(0, hash.hashCode()); Assert.assertNotEquals(null, hash.trits()); @@ -30,7 +30,7 @@ public void calculate1() throws Exception { @Test public void calculate2() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); byte[] bytes = Converter.allocateBytesForTrits(trits.length); Converter.bytes(trits, bytes); Hash hash = TransactionHash.calculate(bytes, TransactionViewModel.TRINARY_SIZE, SpongeFactory.create(SpongeFactory.Mode.CURLP81)); @@ -47,23 +47,23 @@ public void trailingZeros() throws Exception { @Test public void trits() throws Exception { - Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, getRandomTransactionTrits()); + Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, getTransactionTrits()); Assert.assertFalse(Arrays.equals(new byte[Hash.SIZE_IN_TRITS], hash.trits())); } @Test public void equals() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits); Hash hash1 = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits); Assert.assertTrue(hash.equals(hash1)); Assert.assertFalse(hash.equals(Hash.NULL_HASH)); - Assert.assertFalse(hash.equals(TransactionHash.calculate(SpongeFactory.Mode.CURLP81, getRandomTransactionTrits()))); + Assert.assertFalse(hash.equals(TransactionHash.calculate(SpongeFactory.Mode.CURLP81, getTransactionTrits()))); } @Test public void hashCodeTest() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits); Assert.assertNotEquals(hash.hashCode(), 0); Assert.assertEquals(Hash.NULL_HASH.hashCode(), -240540129); @@ -71,7 +71,7 @@ public void hashCodeTest() throws Exception { @Test public void toStringTest() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits); Assert.assertEquals(Hash.NULL_HASH.toString(), "999999999999999999999999999999999999999999999999999999999999999999999999999999999"); Assert.assertNotEquals(hash.toString(), "999999999999999999999999999999999999999999999999999999999999999999999999999999999"); @@ -81,7 +81,7 @@ public void toStringTest() throws Exception { @Test public void bytes() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits); Assert.assertTrue(Arrays.equals(new byte[Hash.SIZE_IN_BYTES], Hash.NULL_HASH.bytes())); Assert.assertFalse(Arrays.equals(new byte[Hash.SIZE_IN_BYTES], hash.bytes())); @@ -90,7 +90,7 @@ public void bytes() throws Exception { @Test public void compareTo() throws Exception { - byte[] trits = getRandomTransactionTrits(); + byte[] trits = getTransactionTrits(); Hash hash = TransactionHash.calculate(SpongeFactory.Mode.CURLP81, trits); Assert.assertEquals(hash.compareTo(Hash.NULL_HASH), -Hash.NULL_HASH.compareTo(hash)); } diff --git a/src/test/java/com/iota/iri/network/TransactionRequesterTest.java b/src/test/java/com/iota/iri/network/TransactionRequesterTest.java index b4f02e88d3..72b909a9e6 100644 --- a/src/test/java/com/iota/iri/network/TransactionRequesterTest.java +++ b/src/test/java/com/iota/iri/network/TransactionRequesterTest.java @@ -5,12 +5,11 @@ import com.iota.iri.service.snapshot.SnapshotProvider; import com.iota.iri.service.snapshot.impl.SnapshotProviderImpl; import com.iota.iri.storage.Tangle; -import com.iota.iri.zmq.MessageQ; import org.junit.After; import org.junit.Before; import org.junit.Test; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; import java.util.ArrayList; import java.util.Arrays; @@ -22,7 +21,6 @@ public class TransactionRequesterTest { private static Tangle tangle = new Tangle(); private static SnapshotProvider snapshotProvider; - private MessageQ mq; @Before public void setUp() throws Exception { @@ -81,13 +79,13 @@ public void instance() throws Exception { @Test public void popEldestTransactionToRequest() throws Exception { - TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider, mq); + TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider); // Add some Txs to the pool and see if the method pops the eldest one - Hash eldest = getRandomTransactionHash(); + Hash eldest = getTransactionHash(); txReq.requestTransaction(eldest, false); - txReq.requestTransaction(getRandomTransactionHash(), false); - txReq.requestTransaction(getRandomTransactionHash(), false); - txReq.requestTransaction(getRandomTransactionHash(), false); + txReq.requestTransaction(getTransactionHash(), false); + txReq.requestTransaction(getTransactionHash(), false); + txReq.requestTransaction(getTransactionHash(), false); txReq.popEldestTransactionToRequest(); // Check that the transaction is there no more @@ -98,18 +96,18 @@ public void popEldestTransactionToRequest() throws Exception { public void transactionRequestedFreshness() throws Exception { // Add some Txs to the pool and see if the method pops the eldest one List eldest = new ArrayList(Arrays.asList( - getRandomTransactionHash(), - getRandomTransactionHash(), - getRandomTransactionHash() + getTransactionHash(), + getTransactionHash(), + getTransactionHash() )); - TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider, mq); + TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider); int capacity = TransactionRequester.MAX_TX_REQ_QUEUE_SIZE; //fill tips list for (int i = 0; i < 3; i++) { txReq.requestTransaction(eldest.get(i), false); } for (int i = 0; i < capacity; i++) { - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); txReq.requestTransaction(hash,false); } @@ -123,11 +121,11 @@ public void transactionRequestedFreshness() throws Exception { @Test public void nonMilestoneCapacityLimited() throws Exception { - TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider, mq); + TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider); int capacity = TransactionRequester.MAX_TX_REQ_QUEUE_SIZE; //fill tips list for (int i = 0; i < capacity * 2 ; i++) { - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); txReq.requestTransaction(hash,false); } //check that limit wasn't breached @@ -136,11 +134,11 @@ public void nonMilestoneCapacityLimited() throws Exception { @Test public void milestoneCapacityNotLimited() throws Exception { - TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider, mq); + TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider); int capacity = TransactionRequester.MAX_TX_REQ_QUEUE_SIZE; //fill tips list for (int i = 0; i < capacity * 2 ; i++) { - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); txReq.requestTransaction(hash,true); } //check that limit was surpassed @@ -149,11 +147,11 @@ public void milestoneCapacityNotLimited() throws Exception { @Test public void mixedCapacityLimited() throws Exception { - TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider, mq); + TransactionRequester txReq = new TransactionRequester(tangle, snapshotProvider); int capacity = TransactionRequester.MAX_TX_REQ_QUEUE_SIZE; //fill tips list for (int i = 0; i < capacity * 4 ; i++) { - Hash hash = getRandomTransactionHash(); + Hash hash = getTransactionHash(); txReq.requestTransaction(hash, (i % 2 == 1)); } diff --git a/src/test/java/com/iota/iri/network/impl/TransactionRequesterWorkerImplTest.java b/src/test/java/com/iota/iri/network/impl/TransactionRequesterWorkerImplTest.java index 91060e854b..5bde327817 100644 --- a/src/test/java/com/iota/iri/network/impl/TransactionRequesterWorkerImplTest.java +++ b/src/test/java/com/iota/iri/network/impl/TransactionRequesterWorkerImplTest.java @@ -20,15 +20,13 @@ import com.iota.iri.model.persistables.Transaction; import com.iota.iri.network.Node; import com.iota.iri.network.TransactionRequester; -import com.iota.iri.network.impl.TransactionRequesterWorkerImpl; import com.iota.iri.service.snapshot.SnapshotProvider; import com.iota.iri.storage.Tangle; -import com.iota.iri.zmq.MessageQ; -import static com.iota.iri.TransactionTestUtils.getRandomTransaction; +import static com.iota.iri.TransactionTestUtils.getTransaction; import static com.iota.iri.TransactionTestUtils.get9Transaction; import static com.iota.iri.TransactionTestUtils.buildTransaction; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; import static org.mockito.Mockito.when; @@ -36,13 +34,13 @@ public class TransactionRequesterWorkerImplTest { //Good private static final TransactionViewModel TVMRandomNull = new TransactionViewModel( - getRandomTransaction(), Hash.NULL_HASH); + getTransaction(), Hash.NULL_HASH); private static final TransactionViewModel TVMRandomNotNull = new TransactionViewModel( - getRandomTransaction(), getRandomTransactionHash()); + getTransaction(), getTransactionHash()); private static final TransactionViewModel TVMAll9Null = new TransactionViewModel( get9Transaction(), Hash.NULL_HASH); private static final TransactionViewModel TVMAll9NotNull = new TransactionViewModel( - get9Transaction(), getRandomTransactionHash()); + get9Transaction(), getTransactionHash()); //Bad private static final TransactionViewModel TVMNullNull = new TransactionViewModel((Transaction)null, Hash.NULL_HASH); @@ -53,9 +51,6 @@ public class TransactionRequesterWorkerImplTest { @Mock(answer = Answers.RETURNS_DEEP_STUBS) private static SnapshotProvider snapshotProvider; - @Mock - private static MessageQ messageQ; - private static TransactionRequester requester; private static TransactionRequesterWorkerImpl worker; @@ -70,7 +65,7 @@ public class TransactionRequesterWorkerImplTest { @Before public void before() { - requester = new TransactionRequester(tangle, snapshotProvider, messageQ); + requester = new TransactionRequester(tangle, snapshotProvider); worker = new TransactionRequesterWorkerImpl(); worker.init(tangle, requester, tipsVM, node); @@ -146,7 +141,7 @@ private void fillRequester() throws Exception { } private void addRequest() throws Exception { - Hash randomHash = getRandomTransactionHash(); + Hash randomHash = getTransactionHash(); TangleMockUtils.mockTransaction(tangle, randomHash); requester.requestTransaction(randomHash, false); } diff --git a/src/test/java/com/iota/iri/service/APIIntegrationTests.java b/src/test/java/com/iota/iri/service/APIIntegrationTests.java index 73152876a8..a3b90e061b 100644 --- a/src/test/java/com/iota/iri/service/APIIntegrationTests.java +++ b/src/test/java/com/iota/iri/service/APIIntegrationTests.java @@ -10,6 +10,7 @@ import com.iota.iri.controllers.TransactionViewModel; import com.iota.iri.crypto.SpongeFactory; import com.iota.iri.model.TransactionHash; +import com.iota.iri.service.restserver.resteasy.RestEasy; import com.iota.iri.utils.Converter; import com.jayway.restassured.RestAssured; import com.jayway.restassured.builder.ResponseSpecBuilder; @@ -83,20 +84,24 @@ public static void setUp() throws Exception { logFolder.create(); configuration = ConfigFactory.createIotaConfig(true); - String[] args = {"-p", portStr, "--testnet", "--db-path", dbFolder.getRoot().getAbsolutePath(), "--db-log-path", + String[] args = {"-p", portStr, "--testnet", "true", "--db-path", dbFolder.getRoot().getAbsolutePath(), "--db-log-path", logFolder.getRoot().getAbsolutePath(), "--mwm", "1"}; configuration.parseConfigFromArgs(args); //create node iota = new Iota(configuration); ixi = new IXI(iota); - api = new API(iota, ixi); + api = new API(iota.configuration, ixi, iota.transactionRequester, + iota.spentAddressesService, iota.tangle, iota.bundleValidator, + iota.snapshotProvider, iota.ledgerService, iota.node, iota.tipsSelector, + iota.tipsViewModel, iota.transactionValidator, + iota.latestMilestoneTracker); //init try { iota.init(); iota.snapshotProvider.getInitialSnapshot().setTimestamp(0); - api.init(); + api.init(new RestEasy(configuration)); ixi.init(IXIConfig.IXI_DIR); } catch (final Exception e) { log.error("Exception during IOTA node initialisation: ", e); diff --git a/src/test/java/com/iota/iri/service/ApiCallTest.java b/src/test/java/com/iota/iri/service/ApiCallTest.java new file mode 100644 index 0000000000..3d00328542 --- /dev/null +++ b/src/test/java/com/iota/iri/service/ApiCallTest.java @@ -0,0 +1,28 @@ +package com.iota.iri.service; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import com.iota.iri.conf.IotaConfig; + +public class ApiCallTest { + + private API api; + + @Before + public void setUp() { + IotaConfig configuration = Mockito.mock(IotaConfig.class); + api = new API(configuration, null, null, null, null, null, null, null, null, null, null, null, null); + } + + @Test + public void apiHasAllEnums() { + for (ApiCommand c : ApiCommand.values()) { + if (!api.commandRoute.containsKey(c)) { + Assert.fail("Api should contain all enum values"); + } + } + } +} diff --git a/src/test/java/com/iota/iri/service/NodeIntegrationTests.java b/src/test/java/com/iota/iri/service/NodeIntegrationTests.java index 47c93ad2c2..04b0d48051 100644 --- a/src/test/java/com/iota/iri/service/NodeIntegrationTests.java +++ b/src/test/java/com/iota/iri/service/NodeIntegrationTests.java @@ -12,6 +12,7 @@ import com.iota.iri.model.Hash; import com.iota.iri.model.HashFactory; import com.iota.iri.network.Node; +import com.iota.iri.service.restserver.resteasy.RestEasy; import com.iota.iri.utils.Converter; import org.apache.commons.lang3.ArrayUtils; import org.junit.After; @@ -52,8 +53,14 @@ public void testGetsSolid() throws Exception { iotaNodes[i] = newNode(i, folders[i*2], folders[i*2+1]); ixi[i] = new IXI(iotaNodes[i]); ixi[i].init(IXIConfig.IXI_DIR); - api[i] = new API(iotaNodes[i], ixi[i]); - api[i].init(); + + api[i] = new API(iotaNodes[i].configuration, ixi[i], iotaNodes[i].transactionRequester, + iotaNodes[i].spentAddressesService, iotaNodes[i].tangle, iotaNodes[i].bundleValidator, + iotaNodes[i].snapshotProvider, iotaNodes[i].ledgerService, iotaNodes[i].node, + iotaNodes[i].tipsSelector, iotaNodes[i].tipsViewModel, iotaNodes[i].transactionValidator, + iotaNodes[i].latestMilestoneTracker); + + api[i].init(new RestEasy(iotaNodes[i].configuration)); } Node.uri("udp://localhost:14701").ifPresent(uri -> iotaNodes[0].node.addNeighbor(iotaNodes[0].node.newNeighbor(uri, true))); //Node.uri("udp://localhost:14700").ifPresent(uri -> iotaNodes[1].node.addNeighbor(iotaNodes[1].node.newNeighbor(uri, true))); diff --git a/src/test/java/com/iota/iri/service/restserver/RestEasyTest.java b/src/test/java/com/iota/iri/service/restserver/RestEasyTest.java new file mode 100644 index 0000000000..f62f9f05d7 --- /dev/null +++ b/src/test/java/com/iota/iri/service/restserver/RestEasyTest.java @@ -0,0 +1,135 @@ +package com.iota.iri.service.restserver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.net.InetAddress; +import java.util.Base64; + +import javax.ws.rs.client.Client; +import javax.ws.rs.client.ClientBuilder; +import javax.ws.rs.client.Entity; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import org.jboss.resteasy.test.TestPortProvider; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +import com.iota.iri.conf.APIConfig; +import com.iota.iri.service.dto.ErrorResponse; +import com.iota.iri.service.dto.GetNodeInfoResponse; +import com.iota.iri.service.restserver.resteasy.RestEasy; + +public class RestEasyTest { + + private static final String USER_PASS = "user:pass"; + + @Rule + public MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock + private APIConfig apiconfig; + + + private RestEasy server; + + @Before + public void setUp() { + Mockito.when(apiconfig.getPort()).thenReturn(TestPortProvider.getPort()); + Mockito.when(apiconfig.getApiHost()).thenReturn(TestPortProvider.getHost()); + Mockito.when(apiconfig.getMaxBodyLength()).thenReturn(Integer.MAX_VALUE); + } + + @After + public void tearDown() { + this.server.stop(); + } + + @Test + public void nodeInfoMissingApiVersion() { + this.server = new RestEasy(apiconfig); + this.server.init((String param, InetAddress address) -> { + return GetNodeInfoResponse.createEmptyResponse(); + }); + this.server.start(); + + Client client = ClientBuilder.newClient(); + String jsonString = "{\"command\": \"getNodeInfo\"}"; + Response val = client.target(TestPortProvider.generateURL("/")) + .request() + .post(Entity.entity(jsonString, MediaType.APPLICATION_JSON)); + ErrorResponse response = val.readEntity(ErrorResponse.class); + assertEquals("API version should be required in the header", "Invalid API Version", response.getError()); + } + + @Test + public void nodeInfoValid() { + this.server = new RestEasy(apiconfig); + this.server.init((String param, InetAddress address) -> { + return GetNodeInfoResponse.createEmptyResponse(); + }); + this.server.start(); + + Client client = ClientBuilder.newClient(); + String jsonString = "{\"command\": \"getNodeInfo\"}"; + Response val = client.target(TestPortProvider.generateURL("/")) + .request() + .header("X-IOTA-API-Version", "1") + .post(Entity.entity(jsonString, MediaType.APPLICATION_JSON)); + + GetNodeInfoResponse response = val.readEntity(GetNodeInfoResponse.class); + assertNotNull("Response should not be parseable as a GetNodeInfoResponse", response); + } + + @Test + public void notAllowed() { + Mockito.when(apiconfig.getRemoteAuth()).thenReturn(USER_PASS); + + this.server = new RestEasy(apiconfig); + this.server.init((String param, InetAddress address) -> { + return GetNodeInfoResponse.createEmptyResponse(); + }); + this.server.start(); + + Client client = ClientBuilder.newClient(); + String jsonString = "{\"command\": \"getNodeInfo\"}"; + Response val = client.target(TestPortProvider.generateURL("/")) + .request() + .header("X-IOTA-API-Version", "1") + .post(Entity.entity(jsonString, MediaType.APPLICATION_JSON)); + + assertEquals("Request should be denied due to lack of authentication", + Response.Status.UNAUTHORIZED, val.getStatusInfo()); + } + + @Test + public void allowed() { + Mockito.when(apiconfig.getRemoteAuth()).thenReturn(USER_PASS); + + this.server = new RestEasy(apiconfig); + this.server.init((String param, InetAddress address) -> { + return GetNodeInfoResponse.createEmptyResponse(); + }); + this.server.start(); + + Client client = ClientBuilder.newClient(); + String jsonString = "{\"command\": \"getNodeInfo\"}"; + + String encoded = Base64.getEncoder().encodeToString(USER_PASS.getBytes()); + + Response val = client.target(TestPortProvider.generateURL("/")) + .request() + .header("X-IOTA-API-Version", "1") + .header("Authorization", "Basic " + encoded) + .post(Entity.entity(jsonString, MediaType.APPLICATION_JSON)); + + assertEquals("Request should be accepted as we authenticated", Response.Status.OK, val.getStatusInfo()); + } +} diff --git a/src/test/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImplTest.java b/src/test/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImplTest.java new file mode 100644 index 0000000000..b1dd52bbb5 --- /dev/null +++ b/src/test/java/com/iota/iri/service/snapshot/impl/LocalSnapshotManagerImplTest.java @@ -0,0 +1,149 @@ +package com.iota.iri.service.snapshot.impl; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.any; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.exceptions.base.MockitoAssertionError; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +import com.iota.iri.conf.SnapshotConfig; +import com.iota.iri.service.milestone.LatestMilestoneTracker; +import com.iota.iri.service.snapshot.SnapshotException; +import com.iota.iri.service.snapshot.SnapshotProvider; +import com.iota.iri.service.snapshot.SnapshotService; +import com.iota.iri.service.transactionpruning.TransactionPruner; +import com.iota.iri.utils.thread.ThreadUtils; + +public class LocalSnapshotManagerImplTest { + + private static final int BUFFER = LocalSnapshotManagerImpl.LOCAL_SNAPSHOT_SYNC_BUFFER; + + private static final int DELAY_SYNC = 5; + private static final int DELAY_UNSYNC = 1; + private static final int SNAPSHOT_DEPTH = 5; + + @Rule + public MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock + private static SnapshotConfig config; + + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + SnapshotProvider snapshotProvider; + + @Mock + SnapshotService snapshotService; + + @Mock + TransactionPruner transactionPruner; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + LatestMilestoneTracker milestoneTracker; + + private LocalSnapshotManagerImpl lsManager; + + @Before + public void setUp() throws Exception { + this.lsManager = new LocalSnapshotManagerImpl(); + + lsManager.init(snapshotProvider, snapshotService, transactionPruner, config); + when(snapshotProvider.getLatestSnapshot().getIndex()).thenReturn(-5, -1, 10, 998, 999, 1999, 2000); + + when(config.getLocalSnapshotsIntervalSynced()).thenReturn(DELAY_SYNC); + when(config.getLocalSnapshotsIntervalUnsynced()).thenReturn(DELAY_UNSYNC); + when(config.getLocalSnapshotsDepth()).thenReturn(SNAPSHOT_DEPTH); + } + + @After + public void tearDown() { + lsManager.shutdown(); + } + + @Test + public synchronized void takeLocalSnapshot() throws SnapshotException { + // Always return true + when(milestoneTracker.isInitialScanComplete()).thenReturn(true); + + // When we call it, we are in sync + when(milestoneTracker.getLatestMilestoneIndex()).thenReturn(-5); + + // We are more then the depth ahead + when(snapshotProvider.getLatestSnapshot().getIndex()).thenReturn(100); + when(snapshotProvider.getInitialSnapshot().getIndex()).thenReturn(100 - SNAPSHOT_DEPTH - DELAY_SYNC - 1); + + // Run in separate thread to allow us to time-out + Thread t = new Thread(() -> lsManager.monitorThread(milestoneTracker)); + + t.start(); + // We should finish directly, margin for slower computers + ThreadUtils.sleep(100); + + // Cancel the thread + t.interrupt(); + + // Verify we took a snapshot + try { + verify(snapshotService, times(1)).takeLocalSnapshot(any(), any()); + } catch (MockitoAssertionError e) { + throw new MockitoAssertionError("A snapshot should have been taken when we are below SNAPSHOT_DEPTH"); + } + } + + @Test + public void isInSyncTestScanIncomplete() { + when(milestoneTracker.isInitialScanComplete()).thenReturn(false); + + assertFalse("We should be out of sync when he havent finished initial scans", lsManager.isInSync(milestoneTracker)); + } + + @Test + public void isInSyncTestScanComplete() { + // Always return true + when(milestoneTracker.isInitialScanComplete()).thenReturn(true); + + // We don't really support -1 indexes, but if this breaks, it is a good indication to be careful going further + when(milestoneTracker.getLatestMilestoneIndex()).thenReturn(-1, 5, 10, 998 + BUFFER - 1, 2000); + + // snapshotProvider & milestoneTracker + // -5 & -1 -> not in sync + assertFalse("Previous out of sync and not equal index should not be in sync", lsManager.isInSync(milestoneTracker)); + + // -1 and 5 -> not in sync + assertFalse("Previous out of sync and not equal index should not be in sync", lsManager.isInSync(milestoneTracker)); + + // 10 and 10 -> in sync + assertTrue("Equal index should be in sync", lsManager.isInSync(milestoneTracker)); + + // 998 and 1002 -> in sync since sync gap = 5 + assertTrue("Index difference less than the buffer still should be in sync", lsManager.isInSync(milestoneTracker)); + + // 999 and 2000 -> out of sync again, bigger gap than 5 + assertFalse("Index difference more than the buffer should be out of sync again ", lsManager.isInSync(milestoneTracker)); + + // 1999 and 2000 -> out of sync still + assertFalse("Previous out of sync and not equal index should not be in sync", lsManager.isInSync(milestoneTracker)); + + // 2000 and 2000 -> in sync again + assertTrue("Equal index should be in sync", lsManager.isInSync(milestoneTracker)); + } + + @Test + public void getDelayTest() { + assertEquals("Out of sync should return the config value at getLocalSnapshotsIntervalUnsynced", + DELAY_UNSYNC, lsManager.getSnapshotInterval(false)); + + assertEquals("In sync should return the config value at getLocalSnapshotsIntervalSynced", + DELAY_SYNC, lsManager.getSnapshotInterval(true)); + } +} diff --git a/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotImplTest.java b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotImplTest.java new file mode 100644 index 0000000000..2ac294fa06 --- /dev/null +++ b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotImplTest.java @@ -0,0 +1,68 @@ +package com.iota.iri.service.snapshot.impl; + +import static org.junit.Assert.*; + +import java.util.HashMap; + +import org.junit.Before; +import org.junit.Test; + +import com.iota.iri.TransactionTestUtils; +import com.iota.iri.model.Hash; +import com.iota.iri.service.snapshot.Snapshot; +import com.iota.iri.service.snapshot.SnapshotMetaData; +import com.iota.iri.service.snapshot.SnapshotState; + +public class SnapshotImplTest { + + private static SnapshotState state; + + private static SnapshotMetaData metaData; + + @Before + public void setUp() throws Exception { + state = new SnapshotStateImpl(new HashMap<>()); + metaData = new SnapshotMetaDataImpl(Hash.NULL_HASH, 1, 1l, new HashMap<>(), new HashMap<>()); + } + + @Test + public void skippedMilestoneTest() { + Snapshot snapshot = new SnapshotImpl(state, metaData); + assertTrue("Not previously seen milestone should be accepted", snapshot.addSkippedMilestone(1)); + + assertFalse("Previously seen milestone should not be accepted", snapshot.addSkippedMilestone(1)); + assertTrue("Skipped milestone should be removed correctly", snapshot.removeSkippedMilestone(1)); + assertFalse("Not skipped milestone should fail to get removed", snapshot.removeSkippedMilestone(1)); + } + + @Test + public void updateTest() { + Snapshot snapshot = new SnapshotImpl(state, metaData); + snapshot.setIndex(0); + snapshot.setHash(Hash.NULL_HASH); + snapshot.setInitialTimestamp(1l); + + Snapshot newSnapshot = snapshot.clone(); + newSnapshot.setIndex(1); + snapshot.setHash(TransactionTestUtils.getTransactionHash()); + snapshot.setInitialTimestamp(5l); + + assertNotEquals("Modified snapshot clone should not be equal to its original", snapshot, newSnapshot); + snapshot.update(newSnapshot); + assertEquals("Updating a snapshot with another snapshot should make them equal", snapshot, newSnapshot); + } + + @Test + public void cloneTest() { + Snapshot oldSnapshot = new SnapshotImpl(state, metaData); + Snapshot newSnapshot = oldSnapshot.clone(); + + assertEquals("A clone of a snapshot is equal to its original", oldSnapshot, newSnapshot); + + oldSnapshot.addSkippedMilestone(1); + + // Clone shouldnt have the skipped milestone + assertFalse("Adding a value to a clone should be reflected on the original", newSnapshot.removeSkippedMilestone(1)); + assertNotEquals("A clone should not be equal to its original after modification", oldSnapshot, newSnapshot); + } +} diff --git a/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotMetaDataImplTest.java b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotMetaDataImplTest.java new file mode 100644 index 0000000000..7e41c4696b --- /dev/null +++ b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotMetaDataImplTest.java @@ -0,0 +1,124 @@ +package com.iota.iri.service.snapshot.impl; + +import static org.junit.Assert.*; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.junit.Before; +import org.junit.Test; + +import com.iota.iri.TransactionTestUtils; +import com.iota.iri.conf.BaseIotaConfig; +import com.iota.iri.model.Hash; +import com.iota.iri.service.snapshot.SnapshotMetaData; + +public class SnapshotMetaDataImplTest { + + private static final Hash A = TransactionTestUtils.getTransactionHash(); + private static final Hash B = TransactionTestUtils.getTransactionHash(); + private static final Hash C = TransactionTestUtils.getTransactionHash(); + private static final Hash D = TransactionTestUtils.getTransactionHash(); + + private static Map solidEntryPoints = new HashMap(){{ + put(A, 1); + put(B, 2); + put(C, -1); + }}; + + private static Map seenMilestones = new HashMap(){{ + put(A, 10); + put(B, 11); + put(C, 12); + put(D, 13); + }}; + + private SnapshotMetaDataImpl meta; + + @Before + public void setUp() { + meta = new SnapshotMetaDataImpl(A, + BaseIotaConfig.Defaults.MILESTONE_START_INDEX, + BaseIotaConfig.Defaults.GLOBAL_SNAPSHOT_TIME, + solidEntryPoints, + seenMilestones); + } + + @Test + public void initialIndexTest(){ + assertEquals("Initial index should be equal to the one provided", + meta.getInitialIndex(), BaseIotaConfig.Defaults.MILESTONE_START_INDEX); + assertEquals("Current index should be equal to the initial index", + meta.getIndex(), BaseIotaConfig.Defaults.MILESTONE_START_INDEX); + + meta.setIndex(BaseIotaConfig.Defaults.MILESTONE_START_INDEX + 1); + assertNotEquals("Initial index should not be the same as current index after setting", + meta.getInitialIndex(), meta.getIndex()); + } + + @Test + public void initialTimestampTest(){ + assertEquals("Initial timestamp should be equal to the one provided", + meta.getInitialTimestamp(), BaseIotaConfig.Defaults.GLOBAL_SNAPSHOT_TIME); + assertEquals("Current timestamp should be equal to the initial timestamp", + meta.getTimestamp(), BaseIotaConfig.Defaults.GLOBAL_SNAPSHOT_TIME); + + meta.setTimestamp(BaseIotaConfig.Defaults.GLOBAL_SNAPSHOT_TIME + 1); + assertNotEquals("Initial timestamp should not be the same as current timestamp after setting", + meta.getInitialTimestamp(), meta.getTimestamp()); + } + + @Test + public void hashTest(){ + assertEquals("Initial hash should be equal to the one provided", meta.getInitialHash(), A); + + assertEquals("Current hash should be equal to the initial hash", meta.getHash(), A); + + meta.setHash(B); + assertNotEquals("Initial hash should not be the same as current hash after setting", meta.getInitialHash(), meta.getHash()); + } + + @Test + public void solidEntryPointsTest(){ + assertTrue("We should have the entry point provided on start", meta.hasSolidEntryPoint(A)); + assertTrue("We should have the entry point provided on start", meta.hasSolidEntryPoint(B)); + assertTrue("We should have the entry point provided on start", meta.hasSolidEntryPoint(C)); + + assertEquals("Index from entry should be to the one set to the hash", 1, meta.getSolidEntryPointIndex(A)); + assertEquals("Index from entry should be to the one set to the hash", 2, meta.getSolidEntryPointIndex(B)); + + // Test -1 to ensure, if we ever enforce this positive, something could break + // We don't really support -1 indexes, but if this breaks, it is a good indication to be careful going further + assertEquals("Index from entry should be to the one set to the hash", -1, meta.getSolidEntryPointIndex(C)); + + assertEquals("Solid entries amount should be the same as the ones provided", meta.getSolidEntryPoints().size(), solidEntryPoints.size()); + assertEquals("Solid entries should be the same as the ones provided", meta.getSolidEntryPoints(), new HashMap<>(solidEntryPoints)); + + meta.setSolidEntryPoints(seenMilestones); + // Right now, the map is replaced, so none are 'new' or 'existing'. + assertEquals("Existing entrypoints should have a new index", 10, meta.getSolidEntryPointIndex(A)); + assertEquals("Existing entrypoints should have a new index", 11, meta.getSolidEntryPointIndex(B)); + assertEquals("Existing entrypoints should have a new index", 12, meta.getSolidEntryPointIndex(C)); + assertEquals("New entry point added should exist and equal to the index provided", 13, meta.getSolidEntryPointIndex(D)); + } + + @Test + public void seenMilestonesTest(){ + assertEquals("Seen milestones amount should be the same as the ones provided", + meta.getSeenMilestones().size(), seenMilestones.size()); + assertEquals("Seen milestones should be the same as the ones provided", + meta.getSeenMilestones(), new HashMap<>(seenMilestones)); + } + + @Test + public void createTest(){ + SnapshotMetaData newMetaData = new SnapshotMetaDataImpl(meta); + assertEquals("new SnapshotMetaData made from another should be equal", newMetaData, meta); + + newMetaData = new SnapshotMetaDataImpl(Hash.NULL_HASH, 0, 0l, Collections.EMPTY_MAP, Collections.EMPTY_MAP); + newMetaData.update(meta); + assertEquals("Updating a SnapshotMetaData with another should make them equal", newMetaData, meta); + } + +} diff --git a/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotMockUtils.java b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotMockUtils.java index 8d17c1b902..ff7e642ba7 100644 --- a/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotMockUtils.java +++ b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotMockUtils.java @@ -20,20 +20,60 @@ public class SnapshotMockUtils { //region [mockSnapshotProvider] //////////////////////////////////////////////////////////////////////////////////// + /** + * Properly imitates a snapshot provider by making a real initial and latest snapshot. + * The balance of this provider is made to let the DEFAULT_GENESIS_ADDRESS (Null hash) have the entire IOTA supply. + * Genesis timestamp set to {@value #DEFAULT_GENESIS_TIMESTAMP}. + * Initial snapshot hash set to DEFAULT_GENESIS_ADDRESS. + * Starting index is {@value #DEFAULT_MILESTONE_START_INDEX} + * + * @param snapshotProvider The provider we are mocking. Must be a Mockito Mocked object + * @return The supplied snapshotProvider object + */ public static SnapshotProvider mockSnapshotProvider(SnapshotProvider snapshotProvider) { return mockSnapshotProvider(snapshotProvider, DEFAULT_MILESTONE_START_INDEX); } + /** + * Properly imitates a snapshot provider by making a real initial and latest snapshot. + * The balance of this provider is made to let the DEFAULT_GENESIS_ADDRESS (Null hash) have the entire IOTA supply. + * Genesis timestamp set to {@value #DEFAULT_GENESIS_TIMESTAMP}. + * Initial snapshot hash set to DEFAULT_GENESIS_ADDRESS. + * + * @param snapshotProvider The provider we are mocking. Must be a Mockito Mocked object + * @param milestoneStartIndex The index we use for the genesis/initial snapshot + * @return The supplied snapshotProvider object + */ public static SnapshotProvider mockSnapshotProvider(SnapshotProvider snapshotProvider, int milestoneStartIndex) { return mockSnapshotProvider(snapshotProvider, milestoneStartIndex, DEFAULT_GENESIS_HASH); } + /** + * Properly imitates a snapshot provider by making a real initial and latest snapshot. + * The balance of this provider is made to let the DEFAULT_GENESIS_ADDRESS (Null hash) have the entire IOTA supply. + * Genesis timestamp set to {@value #DEFAULT_GENESIS_TIMESTAMP} + * + * @param snapshotProvider The provider we are mocking. Must be a Mockito Mocked object + * @param milestoneStartIndex The index we use for the genesis/initial snapshot + * @param genesisHash The Genesis hash + * @return The supplied snapshotProvider object + */ public static SnapshotProvider mockSnapshotProvider(SnapshotProvider snapshotProvider, int milestoneStartIndex, Hash genesisHash) { return mockSnapshotProvider(snapshotProvider, milestoneStartIndex, genesisHash, DEFAULT_GENESIS_TIMESTAMP); } + /** + * Properly imitates a snapshot provider by making a real initial and latest snapshot. + * The balance of this provider is made to let the DEFAULT_GENESIS_ADDRESS (Null hash) have the entire IOTA supply. + * + * @param snapshotProvider The provider we are mocking. Must be a Mockito Mocked object + * @param milestoneStartIndex The index we use for the genesis/initial snapshot + * @param genesisHash The Genesis hash + * @param genesisTimestamp The timestamp of the initial snapshot creation + * @return The supplied snapshotProvider object + */ public static SnapshotProvider mockSnapshotProvider(SnapshotProvider snapshotProvider, int milestoneStartIndex, Hash genesisHash, long genesisTimestamp) { @@ -43,6 +83,16 @@ public static SnapshotProvider mockSnapshotProvider(SnapshotProvider snapshotPro return mockSnapshotProvider(snapshotProvider, milestoneStartIndex, genesisHash, genesisTimestamp, balances); } + /** + * Properly imitates a snapshot provider by making a real initial and latest snapshot + * + * @param snapshotProvider The provider we are mocking. Must be a Mockito Mocked object + * @param milestoneStartIndex The index we use for the genesis/initial snapshot + * @param genesisHash The Genesis hash + * @param genesisTimestamp The timestamp of the initial snapshot creation + * @param balances The balances to add to the provider + * @return The supplied snapshotProvider object + */ public static SnapshotProvider mockSnapshotProvider(SnapshotProvider snapshotProvider, int milestoneStartIndex, Hash genesisHash, long genesisTimestamp, Map balances) { diff --git a/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImplTest.java b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImplTest.java new file mode 100644 index 0000000000..8e51f88648 --- /dev/null +++ b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotProviderImplTest.java @@ -0,0 +1,59 @@ +package com.iota.iri.service.snapshot.impl; + +import static org.junit.Assert.*; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import com.iota.iri.conf.ConfigFactory; +import com.iota.iri.conf.IotaConfig; +import com.iota.iri.model.Hash; +import com.iota.iri.service.snapshot.SnapshotException; +import com.iota.iri.service.spentaddresses.SpentAddressesException; + +public class SnapshotProviderImplTest { + + private SnapshotProviderImpl provider; + + private SnapshotImpl cachedBuildinSnapshot; + + @Before + public void setUp(){ + provider = new SnapshotProviderImpl(); + + // When running multiple tests, the static cached snapshot breaks this test + cachedBuildinSnapshot = SnapshotProviderImpl.builtinSnapshot; + SnapshotProviderImpl.builtinSnapshot = null; + } + + @After + public void tearDown(){ + provider.shutdown(); + + // Set back the cached snapshot for tests after us who might use it + SnapshotProviderImpl.builtinSnapshot = cachedBuildinSnapshot; + } + + @Test + public void testGetLatestSnapshot() throws SnapshotException, SpentAddressesException { + IotaConfig iotaConfig = ConfigFactory.createIotaConfig(true); + provider.init(iotaConfig); + + // If we run this on its own, it correctly takes the testnet milestone + // However, running it with all tests makes it load the last global snapshot contained in the jar + assertEquals("Initial snapshot index should be the same as the milestone start index", + iotaConfig.getMilestoneStartIndex(), provider.getInitialSnapshot().getIndex()); + + assertEquals("Initial snapshot timestamp should be the same as last snapshot time", + iotaConfig.getSnapshotTime(), provider.getInitialSnapshot().getInitialTimestamp()); + + assertEquals("Initial snapshot hash should be the genisis transaction", + Hash.NULL_HASH, provider.getInitialSnapshot().getHash()); + + assertEquals("Initial provider snapshot should be equal to the latest snapshot", + provider.getInitialSnapshot(), provider.getLatestSnapshot()); + + assertTrue("Initial snapshot should have a filled map of addresses", provider.getInitialSnapshot().getBalances().size() > 0); + assertTrue("Initial snapshot supply should be equal to all supply", provider.getInitialSnapshot().hasCorrectSupply()); + } +} diff --git a/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotStateDiffImplTest.java b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotStateDiffImplTest.java new file mode 100644 index 0000000000..6b2726b99e --- /dev/null +++ b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotStateDiffImplTest.java @@ -0,0 +1,49 @@ +package com.iota.iri.service.snapshot.impl; + +import static org.junit.Assert.*; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.junit.Test; + +import com.iota.iri.TransactionTestUtils; +import com.iota.iri.model.Hash; + +public class SnapshotStateDiffImplTest { + + private static final Hash A = TransactionTestUtils.getTransactionHash(); + private static final Hash B = TransactionTestUtils.getTransactionHash(); + private static final Hash C = TransactionTestUtils.getTransactionHash(); + + @Test + public void getBalanceChanges() { + SnapshotStateDiffImpl stateDiff = new SnapshotStateDiffImpl(Collections.EMPTY_MAP); + Map change = stateDiff.getBalanceChanges(); + change.put(A, 1l); + + assertNotEquals("Changes to the statediff balance changes shouldnt reflect on the original state", + stateDiff.getBalanceChanges().size(), change.size()); + } + + @Test + public void isConsistent() { + SnapshotStateDiffImpl stateDiff = new SnapshotStateDiffImpl(new HashMap(){{ + put(A, 1l); + put(B, 5l); + put(C, -6l); + }}); + assertTrue("Sum of diffs should be 0", stateDiff.isConsistent()); + + stateDiff = new SnapshotStateDiffImpl(Collections.EMPTY_MAP); + assertTrue("Empty diff should be consisntent as sum is 0", stateDiff.isConsistent()); + + stateDiff = new SnapshotStateDiffImpl(new HashMap(){{ + put(A, 1l); + put(B, 5l); + }}); + + assertFalse("Diff sum not 0 shouldnt be consistent", stateDiff.isConsistent()); + } +} diff --git a/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotStateImplTest.java b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotStateImplTest.java new file mode 100644 index 0000000000..83f5bef544 --- /dev/null +++ b/src/test/java/com/iota/iri/service/snapshot/impl/SnapshotStateImplTest.java @@ -0,0 +1,135 @@ +package com.iota.iri.service.snapshot.impl; + +import static org.junit.Assert.*; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.Before; +import org.junit.Test; + +import com.iota.iri.TransactionTestUtils; +import com.iota.iri.controllers.TransactionViewModel; +import com.iota.iri.model.Hash; +import com.iota.iri.service.snapshot.SnapshotException; +import com.iota.iri.service.snapshot.SnapshotState; +import com.iota.iri.service.snapshot.SnapshotStateDiff; + +public class SnapshotStateImplTest { + + private static final Hash A = TransactionTestUtils.getTransactionHash(); + private static final Hash B = TransactionTestUtils.getTransactionHash(); + + private static Map map = new HashMap(){{ + put(Hash.NULL_HASH, TransactionViewModel.SUPPLY - 10); + put(A, 10l); + }}; + + private static Map inconsistentMap = new HashMap(){{ + put(Hash.NULL_HASH, 5l); + put(A, -10l); + }}; + + private SnapshotStateImpl state; + private SnapshotStateImpl balanceState; + + @Before + public void setUp() throws Exception { + state = new SnapshotStateImpl(new HashMap<>()); + balanceState = new SnapshotStateImpl(map); + } + + @Test + public void testGetBalance() { + assertNull("Unknown address should return null", balanceState.getBalance(null)); + + long balance = balanceState.getBalance(Hash.NULL_HASH); + assertEquals("Balance should be total - 10", TransactionViewModel.SUPPLY - 10l, balance); + + balance = balanceState.getBalance(A); + assertEquals("Balance should be 10", 10l, balance); + } + + @Test + public void testGetBalances() { + assertEquals("State should not have balances", new HashMap<>(), state.getBalances()); + assertEquals("State should have the balances it was created with", map, balanceState.getBalances()); + } + + @Test + public void testIsConsistent() { + assertTrue("Empty balance should be consistent", state.isConsistent()); + assertTrue("No negative balances should be consistent", balanceState.isConsistent()); + + SnapshotStateImpl inconsistentState = new SnapshotStateImpl(inconsistentMap); + assertFalse("Negative balances should not be consistent", inconsistentState.isConsistent()); + } + + @Test + public void testHasCorrectSupply() { + assertFalse("Empty state should not have correct supply", state.hasCorrectSupply()); + assertTrue("State with total supply should have correct supply", balanceState.hasCorrectSupply()); + + SnapshotStateImpl inconsistentState = new SnapshotStateImpl(inconsistentMap); + assertFalse("Inconsistent state without full supply should be incorrect", inconsistentState.hasCorrectSupply()); + + Map map = new HashMap<>(); + map.put(Hash.NULL_HASH, TransactionViewModel.SUPPLY - 10); + map.put(A, -10l); + map.put(B, 20l); + assertFalse("Inconsistent state with full supply should be correct", inconsistentState.hasCorrectSupply()); + } + + @Test + public void testUpdate() { + assertNotEquals("States with different balances should not be equal", state, balanceState); + state.update(balanceState); + + assertEquals("Updating a state with another state should make them equal", state, balanceState); + } + + @Test + public void testApplyStateDiff() throws SnapshotException { + Map map = new HashMap<>(); + map.put(Hash.NULL_HASH, 5l); + map.put(A, -5l); + + SnapshotStateDiff diff = new SnapshotStateDiffImpl(map); + state.applyStateDiff(diff); + + long balance = state.getBalance(Hash.NULL_HASH); + assertEquals("Applying state to an empty state should have 5 for genesis", 5l, balance); + + balance = state.getBalance(A); + assertEquals("Applying state to an empty state should have -5 for A", -5l, balance); + } + + @Test(expected = SnapshotException.class) + public void testApplyStateDiffThrowsException() throws SnapshotException { + SnapshotStateDiff diff = new SnapshotStateDiffImpl(inconsistentMap); + state.applyStateDiff(diff); + + fail("Applying an inconsistent state should throw an exception"); + } + + @Test + public void testPatchedState() { + SnapshotStateDiff diff = new SnapshotStateDiffImpl(map); + SnapshotState patchedState = state.patchedState(diff); + + assertEquals("Patching an empty state with a map should equal to creation with that map", patchedState, balanceState); + + Map map = new HashMap<>(); + map.put(Hash.NULL_HASH, 5l); + map.put(A, -5l); + + diff = new SnapshotStateDiffImpl(map); + patchedState = balanceState.patchedState(diff); + + long balance = patchedState.getBalance(Hash.NULL_HASH); + assertEquals("5 should have been added to genesis", TransactionViewModel.SUPPLY - 5l, balance); + + balance = patchedState.getBalance(A); + assertEquals("5 should have been removed from A", 5, balance); + } +} diff --git a/src/test/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculatorTest.java b/src/test/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculatorTest.java index b0462179ae..4f0a1caad8 100644 --- a/src/test/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculatorTest.java +++ b/src/test/java/com/iota/iri/service/tipselection/impl/CumulativeWeightCalculatorTest.java @@ -57,15 +57,15 @@ public static void setUp() throws Exception { @Test public void testCalculateCumulativeWeight() throws Exception { TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction1.getHash(), - transaction1.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction2.getHash(), - transaction1.getHash()), getRandomTransactionHash()); - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction2.getHash(), - transaction3.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction1.getHash(), + transaction1.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction2.getHash(), + transaction1.getHash()), getTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction2.getHash(), + transaction3.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -88,13 +88,13 @@ public void testCalculateCumulativeWeight() throws Exception { @Test public void testCalculateCumulativeWeightDiamond() throws Exception { TransactionViewModel transaction, transaction1, transaction2, transaction3; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction1.getHash(), - transaction2.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction1.getHash(), + transaction2.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -120,15 +120,15 @@ public void testCalculateCumulativeWeightDiamond() throws Exception { @Test public void testCalculateCumulativeWeightLinear() throws Exception { TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction1.getHash(), transaction1.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction2.getHash(), transaction2.getHash()), getRandomTransactionHash()); - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction3.getHash(), transaction3.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction1.getHash(), transaction1.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction2.getHash(), transaction2.getHash()), getTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction3.getHash(), transaction3.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -157,19 +157,19 @@ public void testCalculateCumulativeWeightLinear() throws Exception { public void testCalculateCumulativeWeight2() throws Exception { TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4, transaction5, transaction6; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); - transaction5 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction3.getHash(), transaction2.getHash()), getRandomTransactionHash()); - transaction6 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction4.getHash(), transaction5.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); + transaction5 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction3.getHash(), transaction2.getHash()), getTransactionHash()); + transaction6 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction4.getHash(), transaction5.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -204,15 +204,15 @@ public void testCalculateCumulativeWeight2() throws Exception { @Test public void cwCalculationSameAsLegacy() throws Exception { Hash[] hashes = new Hash[100]; - hashes[0] = getRandomTransactionHash(); - TransactionViewModel transactionViewModel1 = new TransactionViewModel(getRandomTransactionTrits(), hashes[0]); + hashes[0] = getTransactionHash(); + TransactionViewModel transactionViewModel1 = new TransactionViewModel(getTransactionTrits(), hashes[0]); transactionViewModel1.store(tangle, snapshotProvider.getInitialSnapshot()); //constant seed for consistent results Random random = new Random(181783497276652981L); for (int i = 1; i < hashes.length; i++) { - hashes[i] = getRandomTransactionHash(); + hashes[i] = getTransactionHash(); TransactionViewModel transactionViewModel = new TransactionViewModel( - getTransactionWithTrunkAndBranch(hashes[i - random.nextInt(i) - 1], + getTransactionTritsWithTrunkAndBranch(hashes[i - random.nextInt(i) - 1], hashes[i - random.nextInt(i) - 1]), hashes[i]); transactionViewModel.store(tangle, snapshotProvider.getInitialSnapshot()); log.debug(String.format("current transaction %.4s \n with trunk %.4s \n and branch %.4s", hashes[i], @@ -236,8 +236,8 @@ public void cwCalculationSameAsLegacy() throws Exception { @Test public void testTangleWithCircle() throws Exception { TransactionViewModel transaction; - Hash randomTransactionHash = getRandomTransactionHash(); - transaction = new TransactionViewModel(getTransactionWithTrunkAndBranch(randomTransactionHash, randomTransactionHash), randomTransactionHash); + Hash randomTransactionHash = getTransactionHash(); + transaction = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(randomTransactionHash, randomTransactionHash), randomTransactionHash); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -249,15 +249,15 @@ public void testTangleWithCircle() throws Exception { @Test public void testTangleWithCircle2() throws Exception { TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - Hash randomTransactionHash2 = getRandomTransactionHash(); - transaction = new TransactionViewModel(getTransactionWithTrunkAndBranch( - randomTransactionHash2, randomTransactionHash2), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch( + Hash randomTransactionHash2 = getTransactionHash(); + transaction = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + randomTransactionHash2, randomTransactionHash2), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( transaction1.getHash(), transaction1.getHash()), randomTransactionHash2); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -271,14 +271,14 @@ public void testTangleWithCircle2() throws Exception { @Test public void testCollsionsInDiamondTangle() throws Exception { TransactionViewModel transaction, transaction1, transaction2, transaction3; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); Hash transactionHash2 = getHashWithSimilarPrefix(transaction1); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), transaction.getHash()), transactionHash2); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction1.getHash(), - transaction2.getHash()), getRandomTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction1.getHash(), + transaction2.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -323,12 +323,12 @@ public void testUpdateRatingsTime() throws Exception { private long ratingTime(int size) throws Exception { Hash[] hashes = new Hash[size]; - hashes[0] = getRandomTransactionHash(); - new TransactionViewModel(getRandomTransactionTrits(), hashes[0]).store(tangle, snapshotProvider.getInitialSnapshot()); + hashes[0] = getTransactionHash(); + new TransactionViewModel(getTransactionTrits(), hashes[0]).store(tangle, snapshotProvider.getInitialSnapshot()); Random random = new Random(); for (int i = 1; i < hashes.length; i++) { - hashes[i] = getRandomTransactionHash(); - new TransactionViewModel(getTransactionWithTrunkAndBranch(hashes[i - random.nextInt(i) - 1], + hashes[i] = getTransactionHash(); + new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hashes[i - random.nextInt(i) - 1], hashes[i - random.nextInt(i) - 1]), hashes[i]).store(tangle, snapshotProvider.getInitialSnapshot()); } long start = System.currentTimeMillis(); diff --git a/src/test/java/com/iota/iri/service/tipselection/impl/RatingOneTest.java b/src/test/java/com/iota/iri/service/tipselection/impl/RatingOneTest.java index 3d82de9f6a..8edffdb5a7 100644 --- a/src/test/java/com/iota/iri/service/tipselection/impl/RatingOneTest.java +++ b/src/test/java/com/iota/iri/service/tipselection/impl/RatingOneTest.java @@ -15,9 +15,9 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionTrits; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; -import static com.iota.iri.TransactionTestUtils.getTransactionWithTrunkAndBranch; +import static com.iota.iri.TransactionTestUtils.getTransactionTrits; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionTritsWithTrunkAndBranch; public class RatingOneTest { private static final TemporaryFolder dbFolder = new TemporaryFolder(); @@ -51,15 +51,15 @@ public static void setUp() throws Exception { @Test public void testCalculate() throws Exception { TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction1.getHash(), - transaction1.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction2.getHash(), - transaction1.getHash()), getRandomTransactionHash()); - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction2.getHash(), - transaction3.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction1.getHash(), + transaction1.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction2.getHash(), + transaction1.getHash()), getTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction2.getHash(), + transaction3.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); diff --git a/src/test/java/com/iota/iri/service/tipselection/impl/TailFinderImplTest.java b/src/test/java/com/iota/iri/service/tipselection/impl/TailFinderImplTest.java index e34d19f81f..c81db939f9 100644 --- a/src/test/java/com/iota/iri/service/tipselection/impl/TailFinderImplTest.java +++ b/src/test/java/com/iota/iri/service/tipselection/impl/TailFinderImplTest.java @@ -1,10 +1,10 @@ package com.iota.iri.service.tipselection.impl; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionTrits; +import static com.iota.iri.TransactionTestUtils.getTransactionTrits; import static com.iota.iri.TransactionTestUtils.createBundleHead; import static com.iota.iri.TransactionTestUtils.createTransactionWithTrunkBundleHash; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; -import static com.iota.iri.TransactionTestUtils.getTransactionWithTrunkAndBranch; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionTritsWithTrunkAndBranch; import com.iota.iri.conf.MainnetConfig; import com.iota.iri.controllers.TransactionViewModel; @@ -55,7 +55,7 @@ public static void setUp() throws Exception { @Test public void findTailTest() throws Exception { - TransactionViewModel txa = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); + TransactionViewModel txa = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); txa.store(tangle, snapshotProvider.getInitialSnapshot()); TransactionViewModel tx2 = createBundleHead(2); @@ -82,7 +82,7 @@ public void findTailTest() throws Exception { @Test public void findMissingTailTest() throws Exception { - TransactionViewModel txa = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); + TransactionViewModel txa = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); txa.store(tangle, snapshotProvider.getInitialSnapshot()); TransactionViewModel tx2 = createBundleHead(2); @@ -91,8 +91,8 @@ public void findMissingTailTest() throws Exception { TransactionViewModel tx1 = createTransactionWithTrunkBundleHash(tx2, txa.getHash()); tx1.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel tx0 = new TransactionViewModel(getTransactionWithTrunkAndBranch(tx1.getHash(), tx2.getHash()), - getRandomTransactionHash()); + TransactionViewModel tx0 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(tx1.getHash(), tx2.getHash()), + getTransactionHash()); tx0.store(tangle, snapshotProvider.getInitialSnapshot()); Optional tail = tailFinder.findTail(tx2.getHash()); diff --git a/src/test/java/com/iota/iri/service/tipselection/impl/WalkValidatorImplTest.java b/src/test/java/com/iota/iri/service/tipselection/impl/WalkValidatorImplTest.java index 74a9d525d3..88d565058b 100644 --- a/src/test/java/com/iota/iri/service/tipselection/impl/WalkValidatorImplTest.java +++ b/src/test/java/com/iota/iri/service/tipselection/impl/WalkValidatorImplTest.java @@ -21,8 +21,8 @@ import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; -import static com.iota.iri.TransactionTestUtils.getTransactionWithTrunkAndBranch; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionTritsWithTrunkAndBranch; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; import java.util.HashMap; import java.util.HashSet; @@ -146,7 +146,7 @@ public void belowMaxDepthWithFreshMilestone() throws Exception { tx.setSnapshot(tangle, snapshotProvider.getInitialSnapshot(), 92); Hash hash = tx.getHash(); for (int i = 0; i < 4 ; i++) { - tx = new TransactionViewModel(getTransactionWithTrunkAndBranch(hash, hash), getRandomTransactionHash()); + tx = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hash, hash), getTransactionHash()); TransactionTestUtils.setLastIndex(tx,0); TransactionTestUtils.setCurrentIndex(tx,0); tx.updateSolid(true); @@ -170,8 +170,8 @@ public void failBelowMaxDepthWithFreshMilestoneDueToLongChain() throws Exception tx.setSnapshot(tangle, snapshotProvider.getInitialSnapshot(), 92); Hash hash = tx.getHash(); for (int i = 0; i < maxAnalyzedTxs ; i++) { - tx = new TransactionViewModel(getTransactionWithTrunkAndBranch(hash, hash), - getRandomTransactionHash()); + tx = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hash, hash), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx,0); TransactionTestUtils.setCurrentIndex(tx,0); hash = tx.getHash(); @@ -193,7 +193,7 @@ public void belowMaxDepthOnGenesis() throws Exception { final int maxAnalyzedTxs = config.getBelowMaxDepthTransactionLimit(); Hash hash = Hash.NULL_HASH; for (int i = 0; i < maxAnalyzedTxs - 2 ; i++) { - tx = new TransactionViewModel(getTransactionWithTrunkAndBranch(hash, hash), getRandomTransactionHash()); + tx = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hash, hash), getTransactionHash()); TransactionTestUtils.setLastIndex(tx,0); TransactionTestUtils.setCurrentIndex(tx,0); tx.updateSolid(true); @@ -216,8 +216,8 @@ public void failBelowMaxDepthOnGenesisDueToLongChain() throws Exception { TransactionViewModel tx = null; Hash hash = Hash.NULL_HASH; for (int i = 0; i < maxAnalyzedTxs; i++) { - tx = new TransactionViewModel(getTransactionWithTrunkAndBranch(hash, hash), - getRandomTransactionHash()); + tx = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(hash, hash), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx,0); TransactionTestUtils.setCurrentIndex(tx,0); tx.updateSolid(true); @@ -261,22 +261,22 @@ public void dontMarkWrongTxsAsBelowMaxDepth() throws Exception { txBad.store(tangle, snapshotProvider.getInitialSnapshot()); txBad.setSnapshot(tangle, snapshotProvider.getInitialSnapshot(), 10); - TransactionViewModel tx2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(tx1.getHash(), tx1.getHash()), - getRandomTransactionHash()); + TransactionViewModel tx2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(tx1.getHash(), tx1.getHash()), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx2,0); TransactionTestUtils.setCurrentIndex(tx2,0); tx2.updateSolid(true); tx2.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel tx3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(tx1.getHash(), txBad.getHash()), - getRandomTransactionHash()); + TransactionViewModel tx3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(tx1.getHash(), txBad.getHash()), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx3,0); TransactionTestUtils.setCurrentIndex(tx3,0); tx3.updateSolid(true); tx3.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel tx4 = new TransactionViewModel(getTransactionWithTrunkAndBranch(tx2.getHash(), tx3.getHash()), - getRandomTransactionHash()); + TransactionViewModel tx4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(tx2.getHash(), tx3.getHash()), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx4,0); TransactionTestUtils.setCurrentIndex(tx4,0); tx4.updateSolid(true); @@ -307,22 +307,22 @@ public void allowConfirmedTxToPassBelowMaxDepthAfterMilestoneConfirmation() thro txBad.store(tangle, snapshotProvider.getInitialSnapshot()); txBad.setSnapshot(tangle, snapshotProvider.getInitialSnapshot(), 10); - TransactionViewModel tx2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(tx1.getHash(), tx1.getHash()), - getRandomTransactionHash()); + TransactionViewModel tx2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(tx1.getHash(), tx1.getHash()), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx2,0); TransactionTestUtils.setCurrentIndex(tx2,0); tx2.updateSolid(true); tx2.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel tx3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(tx1.getHash(), txBad.getHash()), - getRandomTransactionHash()); + TransactionViewModel tx3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(tx1.getHash(), txBad.getHash()), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx3,0); TransactionTestUtils.setCurrentIndex(tx3,0); tx3.updateSolid(true); tx3.store(tangle, snapshotProvider.getInitialSnapshot()); - TransactionViewModel tx4 = new TransactionViewModel(getTransactionWithTrunkAndBranch(tx2.getHash(), tx3.getHash()), - getRandomTransactionHash()); + TransactionViewModel tx4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(tx2.getHash(), tx3.getHash()), + getTransactionHash()); TransactionTestUtils.setLastIndex(tx4,0); TransactionTestUtils.setCurrentIndex(tx4,0); tx4.updateSolid(true); diff --git a/src/test/java/com/iota/iri/service/tipselection/impl/WalkerAlphaTest.java b/src/test/java/com/iota/iri/service/tipselection/impl/WalkerAlphaTest.java index a303e13507..8b6d16664c 100644 --- a/src/test/java/com/iota/iri/service/tipselection/impl/WalkerAlphaTest.java +++ b/src/test/java/com/iota/iri/service/tipselection/impl/WalkerAlphaTest.java @@ -11,7 +11,6 @@ import com.iota.iri.storage.Tangle; import com.iota.iri.storage.rocksDB.RocksDBPersistenceProvider; import com.iota.iri.utils.collections.interfaces.UnIterableMap; -import com.iota.iri.zmq.MessageQ; import java.util.HashMap; import java.util.Map; @@ -27,9 +26,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionTrits; -import static com.iota.iri.TransactionTestUtils.getRandomTransactionHash; -import static com.iota.iri.TransactionTestUtils.getTransactionWithTrunkAndBranch; +import static com.iota.iri.TransactionTestUtils.getTransactionTrits; +import static com.iota.iri.TransactionTestUtils.getTransactionHash; +import static com.iota.iri.TransactionTestUtils.getTransactionTritsWithTrunkAndBranch; public class WalkerAlphaTest { private static final TemporaryFolder dbFolder = new TemporaryFolder(); @@ -58,11 +57,10 @@ public static void setUp() throws Exception { Tangle.COLUMN_FAMILIES, Tangle.METADATA_COLUMN_FAMILY)); tangle.init(); - MessageQ messageQ = Mockito.mock(MessageQ.class); TailFinder tailFinder = Mockito.mock(TailFinder.class); Mockito.when(tailFinder.findTail(Mockito.any(Hash.class))) .then(args -> Optional.of(args.getArgumentAt(0, Hash.class))); - walker = new WalkerAlpha(tailFinder, tangle, messageQ, new Random(1), new MainnetConfig()); + walker = new WalkerAlpha(tailFinder, tangle, new Random(1), new MainnetConfig()); } @@ -70,13 +68,13 @@ public static void setUp() throws Exception { public void testWalkEndsOnlyInRating() throws Exception { //build a small tangle - 1,2,3,4 point to transaction TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -88,8 +86,8 @@ public void testWalkEndsOnlyInRating() throws Exception { UnIterableMap rating = ratingCalculator.calculate(transaction.getHash()); //add 4 after the rating was calculated - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); transaction4.store(tangle, snapshotProvider.getInitialSnapshot()); for (int i=0; i < 100; i++) { @@ -107,13 +105,13 @@ public void showWalkDistributionAlphaHalf() throws Exception { //build a small tangle - 1,2,3,4 point to transaction TransactionViewModel transaction, transaction1, transaction2, transaction3; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -150,13 +148,13 @@ public void showWalkDistributionAlphaZero() throws Exception { //build a small tangle - 1,2,3,4 point to transaction TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -170,8 +168,8 @@ public void showWalkDistributionAlphaZero() throws Exception { rating.put(transaction2.getHash(), 10); //add 4 after the rating was calculated - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); transaction4.store(tangle, snapshotProvider.getInitialSnapshot()); Map counters = new HashMap<>(rating.size()); @@ -197,15 +195,15 @@ public void showWalkDistributionAlphaZero() throws Exception { public void testWalk() throws Exception { //build a small tangle TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - transaction = new TransactionViewModel(getRandomTransactionTrits(), Hash.NULL_HASH); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction1.getHash(), - transaction1.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction2.getHash(), - transaction1.getHash()), getRandomTransactionHash()); - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction2.getHash(), - transaction3.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), Hash.NULL_HASH); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction1.getHash(), + transaction1.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction2.getHash(), + transaction1.getHash()), getTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction2.getHash(), + transaction3.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -227,13 +225,13 @@ public void testWalk() throws Exception { public void testWalkDiamond() throws Exception { //build a small tangle TransactionViewModel transaction, transaction1, transaction2, transaction3; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction.getHash(), - transaction.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch(transaction1.getHash(), - transaction2.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction.getHash(), + transaction.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch(transaction1.getHash(), + transaction2.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); @@ -254,15 +252,15 @@ public void testWalkDiamond() throws Exception { public void testWalkChain() throws Exception { //build a small tangle TransactionViewModel transaction, transaction1, transaction2, transaction3, transaction4; - transaction = new TransactionViewModel(getRandomTransactionTrits(), getRandomTransactionHash()); - transaction1 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction.getHash(), transaction.getHash()), getRandomTransactionHash()); - transaction2 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction1.getHash(), transaction1.getHash()), getRandomTransactionHash()); - transaction3 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction2.getHash(), transaction2.getHash()), getRandomTransactionHash()); - transaction4 = new TransactionViewModel(getTransactionWithTrunkAndBranch( - transaction3.getHash(), transaction3.getHash()), getRandomTransactionHash()); + transaction = new TransactionViewModel(getTransactionTrits(), getTransactionHash()); + transaction1 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction.getHash(), transaction.getHash()), getTransactionHash()); + transaction2 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction1.getHash(), transaction1.getHash()), getTransactionHash()); + transaction3 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction2.getHash(), transaction2.getHash()), getTransactionHash()); + transaction4 = new TransactionViewModel(getTransactionTritsWithTrunkAndBranch( + transaction3.getHash(), transaction3.getHash()), getTransactionHash()); transaction.store(tangle, snapshotProvider.getInitialSnapshot()); transaction1.store(tangle, snapshotProvider.getInitialSnapshot()); transaction2.store(tangle, snapshotProvider.getInitialSnapshot()); diff --git a/src/test/java/com/iota/iri/utils/datastructure/impl/CuckooFilterImplTest.java b/src/test/java/com/iota/iri/utils/datastructure/impl/CuckooFilterImplTest.java new file mode 100644 index 0000000000..9de0f150bd --- /dev/null +++ b/src/test/java/com/iota/iri/utils/datastructure/impl/CuckooFilterImplTest.java @@ -0,0 +1,236 @@ +package com.iota.iri.utils.datastructure.impl; + +import com.iota.iri.utils.datastructure.CuckooFilter; +import org.junit.*; +import org.junit.runners.MethodSorters; + +/** + * This is the Unit Test for the {@link CuckooFilterImpl}, that tests the individual methods as well as the overall + * performance of the filter in regards to the expected false positive rate. + */ +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class CuckooFilterImplTest { + /** + * Holds the amount of elements we want to store in the filter. + * + * Note: 1955 items allows for a ~0.955 load factor at an effective capacity of 2048 + */ + private static final int ELEMENTS_TO_STORE = 1955; + + /** + * Holds a reference to the filter that is shared throughout the tests (for the String methods). + */ + private static CuckooFilter stringCuckooFilter; + + /** + * Holds a reference to the filter that is shared throughout the tests (for the byte[] methods). + */ + private static CuckooFilter byteArrayCuckooFilter; + + /** + * Initializes our test by creating an empty {@link CuckooFilterImpl}. + */ + @BeforeClass + public static void setup() { + stringCuckooFilter = new CuckooFilterImpl(ELEMENTS_TO_STORE); + byteArrayCuckooFilter = new CuckooFilterImpl(ELEMENTS_TO_STORE); + } + + /** + * Frees the resources again, so the unused filter can be cleaned up by the GarbageCollector. + */ + @AfterClass + public static void teardown() { + stringCuckooFilter = null; + byteArrayCuckooFilter = null; + } + + /** + * This method tests the function of the add method (for the String parameter) by: + * + * 1. inserting the defined amount of elements + * 2. checking if the size is within the expected range + */ + @Test + public void testAaddString() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + stringCuckooFilter.add("INSERTED_ITEM" + Integer.toString(insertedItems)); + } + + int sizeDiff = ELEMENTS_TO_STORE - stringCuckooFilter.size(); + + Assert.assertTrue("the filter should have less elements than we added (due to collisions)", sizeDiff >= 0); + Assert.assertTrue("the difference in size should be less than 3%", sizeDiff <= ELEMENTS_TO_STORE * 0.03d); + } + + /** + * This method tests the function of the add method (for the byte[] parameter) by: + * + * 1. inserting the defined amount of elements + * 2. checking if the size is within the expected range + */ + @Test + public void testAaddByteArray() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + byteArrayCuckooFilter.add(("INSERTED_ITEM" + Integer.toString(insertedItems)).getBytes()); + } + + int sizeDiff = ELEMENTS_TO_STORE - byteArrayCuckooFilter.size(); + + Assert.assertTrue("the filter should have less elements than we added (due to collisions)", sizeDiff >= 0); + Assert.assertTrue("the difference in size should be less than 3%", sizeDiff <= ELEMENTS_TO_STORE * 0.03d); + } + + /** + * This method tests the function of the contains method (for the String parameter) by checking if all previously + * added elements are found. + */ + @Test + public void testBcontainsString() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + Assert.assertTrue("the filter should contain all previously added elements", + stringCuckooFilter.contains("INSERTED_ITEM" + Integer.toString(insertedItems))); + } + } + + /** + * This method tests the function of the contains method (for the byte[] parameter) by checking if all previously + * added elements are found. + */ + @Test + public void testBcontainsByteArray() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + Assert.assertTrue("the filter should contain all previously added elements", + byteArrayCuckooFilter.contains(("INSERTED_ITEM" + Integer.toString(insertedItems)).getBytes())); + } + } + + /** + * This method tests the function of the delete method (for the String parameter) by: + * + * 1. removing all previously added elements + * 2. checking if the filter is empty afterwards + */ + @Test + public void testCdeleteString() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + stringCuckooFilter.delete("INSERTED_ITEM" + Integer.toString(insertedItems)); + } + + Assert.assertEquals("the filter should be empty", 0, stringCuckooFilter.size()); + + + } + + /** + * This method tests the function of the delete method (for the byte[] parameter) by: + * + * 1. removing all previously added elements + * 2. checking if the filter is empty afterwards + */ + @Test + public void testCdeleteByteArray() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + stringCuckooFilter.delete(("INSERTED_ITEM" + Integer.toString(insertedItems)).getBytes()); + } + + Assert.assertEquals("the filter should be empty", 0, stringCuckooFilter.size()); + + + } + + /** + * This method tests the performance of the filter (using the String parameter) in regards to false positives by: + * + * 1. inserting the defined amount of elements + * 2. querying for non-existing elements + * 3. calculating the false-positive hits + * 4. comparing the value against the expected result + */ + @Test + public void testDfalsePositiveRateString() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + stringCuckooFilter.add("INSERTED_ITEM" + Integer.toString(insertedItems)); + } + + // a big enough sample size to get a reasonable result + int elementsToQuery = 100000; + + int falsePositives = 0; + int queriedItems; + for (queriedItems = 0; queriedItems < elementsToQuery; queriedItems++) { + if (stringCuckooFilter.contains("QUERIED_ITEMS" + Integer.toString(queriedItems))) { + falsePositives++; + } + } + + double falsePositiveRate = (double) falsePositives / (double) elementsToQuery; + + Assert.assertTrue("expecting the false positive rate to be lower than 3%", falsePositiveRate < 0.03d); + } + + /** + * This method tests the performance of the filter (using the byte[] parameter) in regards to false positives by: + * + * 1. inserting the defined amount of elements + * 2. querying for non-existing elements + * 3. calculating the false-positive hits + * 4. comparing the value against the expected result + */ + @Test + public void testDfalsePositiveRateByteArray() { + int insertedItems; + for (insertedItems = 0; insertedItems < ELEMENTS_TO_STORE; insertedItems++) { + byteArrayCuckooFilter.add(("INSERTED_ITEM" + Integer.toString(insertedItems)).getBytes()); + } + + // a big enough sample size to get a reasonable result + int elementsToQuery = 100000; + + int falsePositives = 0; + int queriedItems; + for (queriedItems = 0; queriedItems < elementsToQuery; queriedItems++) { + if (byteArrayCuckooFilter.contains(("QUERIED_ITEMS" + Integer.toString(queriedItems)).getBytes())) { + falsePositives++; + } + } + + double falsePositiveRate = (double) falsePositives / (double) elementsToQuery; + + Assert.assertTrue("expecting the false positive rate to be lower than 3%", falsePositiveRate < 0.03d); + } + + /** + * This method tests the function of the getCapacity method by: + * + * 1. creating filters of various sizes + * 2. comparing the created capacity against the expected range + * + * Note: Since the capacity has to be a power of two and tries to achieve a load factor of 0.955, the capacity will + * at max be 2.1 times the intended size. + * + * capacity <= 2 * (1 / 0.955) * filterSize + */ + @Test + public void testEcapacity() { + int[] filterSizes = {10, 500, 25_000, 125_000, 10_000_000}; + + CuckooFilter emptyCuckooFilter; + for (int filterSize : filterSizes) { + emptyCuckooFilter = new CuckooFilterImpl(filterSize); + + Assert.assertTrue("the capacity should be bigger than the intended filter size", + emptyCuckooFilter.getCapacity() > filterSize); + + Assert.assertTrue("the capacity should be smaller than 2.094 times the filter size", + emptyCuckooFilter.getCapacity() < filterSize * 2.094d); + } + } +}