|
| 1 | +/* |
| 2 | +Copyright IBM Corp. All Rights Reserved. |
| 3 | +
|
| 4 | +SPDX-License-Identifier: Apache-2.0 |
| 5 | +*/ |
| 6 | + |
| 7 | +package fabric |
| 8 | + |
| 9 | +import ( |
| 10 | + "context" |
| 11 | + "sync" |
| 12 | + |
| 13 | + driver2 "github.com/hyperledger-labs/fabric-smart-client/platform/common/driver" |
| 14 | + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric" |
| 15 | + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/committer" |
| 16 | + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/fabricutils" |
| 17 | + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/rwset" |
| 18 | + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/tracing" |
| 19 | + "github.com/hyperledger-labs/fabric-token-sdk/token/services/network/common/rws/translator" |
| 20 | + "github.com/hyperledger-labs/fabric-token-sdk/token/services/network/driver" |
| 21 | + "github.com/hyperledger/fabric-protos-go/common" |
| 22 | + "github.com/pkg/errors" |
| 23 | + "go.opentelemetry.io/otel/trace" |
| 24 | + "golang.org/x/sync/errgroup" |
| 25 | +) |
| 26 | + |
| 27 | +// deliveryBasedFLMProvider assumes that a listener for a transaction is added before the transaction (i.e. the corresponding block) arrives in the delivery service listener. |
| 28 | +type deliveryBasedFLMProvider struct { |
| 29 | + fnsp *fabric.NetworkServiceProvider |
| 30 | + tracerProvider trace.TracerProvider |
| 31 | + keyTranslator translator.KeyTranslator |
| 32 | +} |
| 33 | + |
| 34 | +func NewDeliveryBasedFLMProvider(fnsp *fabric.NetworkServiceProvider, tracerProvider trace.TracerProvider, keyTranslator translator.KeyTranslator) *deliveryBasedFLMProvider { |
| 35 | + return &deliveryBasedFLMProvider{ |
| 36 | + fnsp: fnsp, |
| 37 | + tracerProvider: tracerProvider, |
| 38 | + keyTranslator: keyTranslator, |
| 39 | + } |
| 40 | +} |
| 41 | + |
| 42 | +type listenerEntry struct { |
| 43 | + namespace driver2.Namespace |
| 44 | + listener driver.FinalityListener |
| 45 | +} |
| 46 | + |
| 47 | +func (p *deliveryBasedFLMProvider) NewManager(network, channel string) (FinalityListenerManager, error) { |
| 48 | + net, err := p.fnsp.FabricNetworkService(network) |
| 49 | + if err != nil { |
| 50 | + return nil, err |
| 51 | + } |
| 52 | + ch, err := net.Channel(channel) |
| 53 | + if err != nil { |
| 54 | + return nil, err |
| 55 | + } |
| 56 | + |
| 57 | + flm := &deliveryBasedFLM{ |
| 58 | + mapper: NewParallelResponseMapper(10, network, p.keyTranslator), |
| 59 | + tracer: p.tracerProvider.Tracer("finality_listener_manager", tracing.WithMetricsOpts(tracing.MetricsOpts{ |
| 60 | + Namespace: network, |
| 61 | + })), |
| 62 | + listeners: NewMapCache[translator.TxID, []listenerEntry](), |
| 63 | + txInfos: NewMapCache[translator.TxID, txInfo](), |
| 64 | + } |
| 65 | + logger.Infof("Starting delivery service for [%s:%s]", network, channel) |
| 66 | + go func() { |
| 67 | + err := ch.Delivery().ScanBlock(context.Background(), func(ctx context.Context, block *common.Block) (bool, error) { |
| 68 | + return false, flm.onBlock(ctx, block) |
| 69 | + }) |
| 70 | + logger.Errorf("failed running delivery for [%s:%s]: %v", network, channel, err) |
| 71 | + }() |
| 72 | + |
| 73 | + return flm, nil |
| 74 | +} |
| 75 | + |
| 76 | +type deliveryBasedFLM struct { |
| 77 | + tracer trace.Tracer |
| 78 | + mapper *parallelBlockMapper |
| 79 | + |
| 80 | + mu sync.RWMutex |
| 81 | + listeners CacheMap[translator.TxID, []listenerEntry] |
| 82 | + txInfos CacheMap[translator.TxID, txInfo] |
| 83 | +} |
| 84 | + |
| 85 | +func (m *deliveryBasedFLM) onBlock(ctx context.Context, block *common.Block) error { |
| 86 | + logger.Infof("New block with %d txs detected [%d]", len(block.Data.Data), block.Header.Number) |
| 87 | + |
| 88 | + txs, err := m.mapper.Map(ctx, block) |
| 89 | + if err != nil { |
| 90 | + logger.Errorf("failed to process block [%d]: %v", block.Header.Number, err) |
| 91 | + return errors.Wrapf(err, "failed to process block [%d]", block.Header.Number) |
| 92 | + } |
| 93 | + |
| 94 | + invokedTxIDs := make([]translator.TxID, 0) |
| 95 | + |
| 96 | + m.mu.Lock() |
| 97 | + defer m.mu.Unlock() |
| 98 | + |
| 99 | + invokedListeners := 0 |
| 100 | + for _, txInfos := range txs { |
| 101 | + for ns, info := range txInfos { |
| 102 | + logger.Infof("Look for listeners of [%s:%s]", ns, info.txID) |
| 103 | + // We expect there to be only one namespace. |
| 104 | + // The complexity is better with a listenerEntry slice (because of the write operations) |
| 105 | + // If more namespaces are expected, it is worth switching to a map. |
| 106 | + listeners, ok := m.listeners.Get(info.txID) |
| 107 | + if ok { |
| 108 | + invokedTxIDs = append(invokedTxIDs, info.txID) |
| 109 | + } |
| 110 | + logger.Infof("Invoking %d listeners for [%s]", len(listeners), info.txID) |
| 111 | + for _, entry := range listeners { |
| 112 | + if len(entry.namespace) == 0 || len(ns) == 0 || entry.namespace == ns { |
| 113 | + invokedListeners++ |
| 114 | + go entry.listener.OnStatus(ctx, info.txID, info.status, info.message, info.requestHash) |
| 115 | + } |
| 116 | + } |
| 117 | + } |
| 118 | + } |
| 119 | + //m.mu.RUnlock() |
| 120 | + |
| 121 | + logger.Infof("Invoked %d listeners for %d TxIDs: [%v]. Removing listeners...", invokedListeners, len(invokedTxIDs), invokedTxIDs) |
| 122 | + |
| 123 | + //m.mu.Lock() |
| 124 | + //defer m.mu.Unlock() |
| 125 | + for _, txInfos := range txs { |
| 126 | + for ns, info := range txInfos { |
| 127 | + logger.Warnf("Mapping for ns [%s]", ns) |
| 128 | + m.txInfos.Put(info.txID, info) |
| 129 | + } |
| 130 | + } |
| 131 | + logger.Infof("Current size of cache: %d", m.txInfos.Len()) |
| 132 | + |
| 133 | + m.listeners.Delete(invokedTxIDs...) |
| 134 | + |
| 135 | + logger.Infof("Removed listeners for %d invoked TxIDs: %v", len(invokedTxIDs), invokedTxIDs) |
| 136 | + |
| 137 | + return nil |
| 138 | + |
| 139 | +} |
| 140 | + |
| 141 | +func (m *deliveryBasedFLM) AddFinalityListener(namespace string, txID string, listener driver.FinalityListener) error { |
| 142 | + m.mu.RLock() |
| 143 | + if txInfo, ok := m.txInfos.Get(txID); ok { |
| 144 | + defer m.mu.RUnlock() |
| 145 | + logger.Infof("Found tx [%s]. Invoking listener directly", txID) |
| 146 | + go listener.OnStatus(context.TODO(), txInfo.txID, txInfo.status, txInfo.message, txInfo.requestHash) |
| 147 | + return nil |
| 148 | + } |
| 149 | + m.mu.RUnlock() |
| 150 | + m.mu.Lock() |
| 151 | + logger.Infof("Checking if value has been added meanwhile for [%s]", txID) |
| 152 | + defer m.mu.Unlock() |
| 153 | + if txInfo, ok := m.txInfos.Get(txID); ok { |
| 154 | + logger.Infof("Found tx [%s]! Invoking listener directly", txID) |
| 155 | + go listener.OnStatus(context.TODO(), txInfo.txID, txInfo.status, txInfo.message, txInfo.requestHash) |
| 156 | + return nil |
| 157 | + } |
| 158 | + m.listeners.Update(txID, func(_ bool, listeners []listenerEntry) (bool, []listenerEntry) { |
| 159 | + return true, append(listeners, listenerEntry{namespace, listener}) |
| 160 | + }) |
| 161 | + return nil |
| 162 | +} |
| 163 | + |
| 164 | +func (m *deliveryBasedFLM) RemoveFinalityListener(txID string, listener driver.FinalityListener) error { |
| 165 | + logger.Infof("Manually invoked listener removal for [%s]", txID) |
| 166 | + m.mu.Lock() |
| 167 | + defer m.mu.Unlock() |
| 168 | + ok := m.listeners.Update(txID, func(_ bool, listeners []listenerEntry) (bool, []listenerEntry) { |
| 169 | + for i, entry := range listeners { |
| 170 | + if entry.listener == listener { |
| 171 | + listeners = append(listeners[:i], listeners[i+1:]...) |
| 172 | + } |
| 173 | + } |
| 174 | + return len(listeners) > 0, listeners |
| 175 | + }) |
| 176 | + if ok { |
| 177 | + return nil |
| 178 | + } |
| 179 | + return errors.Errorf("could not find listener [%v] in txid [%s]", listener, txID) |
| 180 | +} |
| 181 | + |
| 182 | +type txInfo struct { |
| 183 | + txID translator.TxID |
| 184 | + status driver.TxStatus |
| 185 | + message string |
| 186 | + requestHash []byte |
| 187 | +} |
| 188 | + |
| 189 | +type parallelBlockMapper struct { |
| 190 | + keyTranslator translator.KeyTranslator |
| 191 | + network string |
| 192 | + cap int |
| 193 | +} |
| 194 | + |
| 195 | +func NewParallelResponseMapper(cap int, network string, keyTranslator translator.KeyTranslator) *parallelBlockMapper { |
| 196 | + return ¶llelBlockMapper{cap: cap, network: network, keyTranslator: keyTranslator} |
| 197 | +} |
| 198 | + |
| 199 | +func (m *parallelBlockMapper) Map(ctx context.Context, block *common.Block) ([]map[driver2.Namespace]txInfo, error) { |
| 200 | + logger.Infof("Mapping block [%d]", block.Header.Number) |
| 201 | + eg := errgroup.Group{} |
| 202 | + eg.SetLimit(m.cap) |
| 203 | + results := make([]map[driver2.Namespace]txInfo, len(block.Data.Data)) |
| 204 | + for i, tx := range block.Data.Data { |
| 205 | + eg.Go(func() error { |
| 206 | + event, err := m.mapTxInfo(ctx, tx, block.Metadata, block.Header.Number, driver2.TxNum(i)) |
| 207 | + if err != nil { |
| 208 | + return err |
| 209 | + } |
| 210 | + results[i] = event |
| 211 | + logger.Infof("Put tx [%d:%d]: [%v]", block.Header.Number, i, event) |
| 212 | + return nil |
| 213 | + }) |
| 214 | + } |
| 215 | + if err := eg.Wait(); err != nil { |
| 216 | + return nil, err |
| 217 | + } |
| 218 | + return results, nil |
| 219 | +} |
| 220 | + |
| 221 | +func (m *parallelBlockMapper) mapTxInfo(ctx context.Context, tx []byte, block *common.BlockMetadata, blockNum driver2.BlockNum, txNum driver2.TxNum) (map[driver2.Namespace]txInfo, error) { |
| 222 | + _, payl, chdr, err := fabricutils.UnmarshalTx(tx) |
| 223 | + if err != nil { |
| 224 | + return nil, errors.Wrapf(err, "failed unmarshaling tx [%d:%d]", blockNum, txNum) |
| 225 | + } |
| 226 | + if common.HeaderType(chdr.Type) != common.HeaderType_ENDORSER_TRANSACTION { |
| 227 | + logger.Warnf("Type of TX [%d:%d] is [%d]. Skipping...", blockNum, txNum, chdr.Type) |
| 228 | + return nil, nil |
| 229 | + } |
| 230 | + rwSet, err := rwset.NewEndorserTransactionReader(m.network).Read(payl, chdr) |
| 231 | + if err != nil { |
| 232 | + return nil, errors.Wrapf(err, "failed extracting rwset") |
| 233 | + } |
| 234 | + key, err := m.keyTranslator.CreateTokenRequestKey(chdr.TxId) |
| 235 | + if err != nil { |
| 236 | + return nil, errors.Wrapf(err, "can't create for token request [%s]", chdr.TxId) |
| 237 | + } |
| 238 | + _, finalityEvent, err := committer.MapFinalityEvent(ctx, block, txNum, chdr.TxId) |
| 239 | + if err != nil { |
| 240 | + return nil, errors.Wrapf(err, "failed mapping finality event") |
| 241 | + } |
| 242 | + |
| 243 | + txInfos := make(map[driver2.Namespace]txInfo, len(rwSet.WriteSet.Writes)) |
| 244 | + logger.Infof("TX [%s] has %d namespaces", chdr.TxId, len(rwSet.WriteSet.Writes)) |
| 245 | + for ns, write := range rwSet.WriteSet.Writes { |
| 246 | + logger.Infof("TX [%s:%s] has %d writes", chdr.TxId, ns, len(write)) |
| 247 | + if requestHash, ok := write[key]; ok { |
| 248 | + txInfos[ns] = txInfo{ |
| 249 | + txID: chdr.TxId, |
| 250 | + status: finalityEvent.ValidationCode, |
| 251 | + message: finalityEvent.ValidationMessage, |
| 252 | + requestHash: requestHash, |
| 253 | + } |
| 254 | + } else { |
| 255 | + logger.Warnf("TX [%s:%s] did not have key [%s]. Found: %v", chdr.TxId, ns, key, write.Keys()) |
| 256 | + } |
| 257 | + } |
| 258 | + return txInfos, nil |
| 259 | +} |
0 commit comments