Skip to content

Commit 61f369c

Browse files
fix: corrected the cluster that was being saved to mirror node component in remote config, extended dual-cluster-full-test to test mirror node deploy (#1743)
Signed-off-by: Jeromy Cannon <[email protected]>
1 parent fb96fba commit 61f369c

File tree

5 files changed

+217
-112
lines changed

5 files changed

+217
-112
lines changed

src/commands/explorer.ts

+1
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@ export class ExplorerCommand extends BaseCommand {
7979
private static readonly DEPLOY_FLAGS_LIST = {
8080
required: [],
8181
optional: [
82+
flags.cacheDir,
8283
flags.chartDirectory,
8384
flags.clusterRef,
8485
flags.enableIngress,

src/commands/mirror-node.ts

+6-6
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ import {PathEx} from '../business/utils/path-ex.js';
4040
interface MirrorNodeDeployConfigClass {
4141
chartDirectory: string;
4242
clusterContext: string;
43+
clusterRef: ClusterRef;
4344
namespace: NamespaceName;
4445
enableIngress: boolean;
4546
mirrorStaticIp: string;
@@ -103,6 +104,7 @@ export class MirrorNodeCommand extends BaseCommand {
103104
private static readonly DEPLOY_FLAGS_LIST = {
104105
required: [],
105106
optional: [
107+
flags.cacheDir,
106108
flags.clusterRef,
107109
flags.chartDirectory,
108110
flags.deployment,
@@ -277,9 +279,8 @@ export class MirrorNodeCommand extends BaseCommand {
277279
// user defined values later to override predefined values
278280
ctx.config.valuesArg += await self.prepareValuesArg(ctx.config);
279281

280-
const clusterRef = this.configManager.getFlag<string>(flags.clusterRef) as string;
281-
ctx.config.clusterContext = clusterRef
282-
? this.localConfig.clusterRefs[clusterRef]
282+
ctx.config.clusterContext = ctx.config.clusterRef
283+
? this.localConfig.clusterRefs[ctx.config.clusterRef]
283284
: this.k8Factory.default().contexts().readCurrent();
284285

285286
await self.accountManager.loadNodeClient(
@@ -874,11 +875,10 @@ export class MirrorNodeCommand extends BaseCommand {
874875
task: async (ctx): Promise<void> => {
875876
await this.remoteConfigManager.modify(async remoteConfig => {
876877
const {
877-
config: {namespace},
878+
config: {namespace, clusterRef},
878879
} = ctx;
879-
const cluster = this.remoteConfigManager.currentCluster;
880880

881-
remoteConfig.components.add(new MirrorNodeComponent('mirrorNode', cluster, namespace.name));
881+
remoteConfig.components.add(new MirrorNodeComponent('mirrorNode', clusterRef, namespace.name));
882882
});
883883
},
884884
};

test/e2e/commands/dual-cluster-full.test.ts

+193-19
Original file line numberDiff line numberDiff line change
@@ -35,23 +35,40 @@ import {PodRef} from '../../../src/integration/kube/resources/pod/pod-ref.js';
3535
import {type SoloWinstonLogger} from '../../../src/core/logging/solo-winston-logger.js';
3636
import {type NodeAlias} from '../../../src/types/aliases.js';
3737
import * as constants from '../../../src/core/constants.js';
38+
import {type ExtendedNetServer} from '../../../src/types/index.js';
39+
import http from 'http';
40+
import {sleep} from '../../../src/core/helpers.js';
41+
import {type AccountManager} from '../../../src/core/account-manager.js';
42+
import {
43+
AccountCreateTransaction,
44+
Hbar,
45+
HbarUnit,
46+
PrivateKey,
47+
type TransactionReceipt,
48+
type TransactionResponse,
49+
} from '@hashgraph/sdk';
50+
import {type PackageDownloader} from '../../../src/core/package-downloader.js';
3851

3952
const testName: string = 'dual-cluster-full';
4053

4154
describe('Dual Cluster Full E2E Test', async function dualClusterFullE2eTest(): Promise<void> {
4255
this.bail(true);
4356
const namespace: NamespaceName = NamespaceName.of(testName);
44-
const deployment: string = `${testName}-deployment`;
45-
const testClusterRefs: ClusterRef[] = ['e2e-cluster-alpha', 'e2e-cluster-beta'];
57+
const deployment: DeploymentName = `${testName}-deployment`;
58+
const testClusterArray: ClusterRef[] = ['e2e-cluster-alpha', 'e2e-cluster-beta'];
4659
const soloTestCluster: string = getTestCluster();
4760
const testCluster: string =
4861
soloTestCluster.includes('c1') || soloTestCluster.includes('c2') ? soloTestCluster : `${soloTestCluster}-c1`;
4962
const contexts: string[] = [
5063
`${testCluster}`,
5164
`${testCluster.replace(soloTestCluster.includes('-c1') ? '-c1' : '-c2', soloTestCluster.includes('-c1') ? '-c2' : '-c1')}`,
5265
];
66+
const testClusterRefs: ClusterRefs = {};
67+
testClusterRefs[testClusterArray[0]] = contexts[0];
68+
testClusterRefs[testClusterArray[1]] = contexts[1];
5369
const testCacheDir: string = getTestCacheDir(testName);
5470
let testLogger: SoloWinstonLogger;
71+
const createdAccountIds: string[] = [];
5572

5673
// TODO the kube config context causes issues if it isn't one of the selected clusters we are deploying to
5774
before(async (): Promise<void> => {
@@ -86,13 +103,13 @@ describe('Dual Cluster Full E2E Test', async function dualClusterFullE2eTest():
86103

87104
it(`${testName}: solo cluster-ref connect`, async (): Promise<void> => {
88105
testLogger.info(`${testName}: beginning solo cluster-ref connect`);
89-
for (let index: number = 0; index < testClusterRefs.length; index++) {
90-
await main(soloClusterRefConnectArgv(testClusterRefs[index], contexts[index]));
106+
for (let index: number = 0; index < testClusterArray.length; index++) {
107+
await main(soloClusterRefConnectArgv(testClusterArray[index], contexts[index]));
91108
}
92109
const localConfig: LocalConfig = container.resolve<LocalConfig>(InjectTokens.LocalConfig);
93110
const clusterRefs: ClusterRefs = localConfig.clusterRefs;
94-
expect(clusterRefs[testClusterRefs[0]]).to.equal(contexts[0]);
95-
expect(clusterRefs[testClusterRefs[1]]).to.equal(contexts[1]);
111+
expect(clusterRefs[testClusterArray[0]]).to.equal(contexts[0]);
112+
expect(clusterRefs[testClusterArray[1]]).to.equal(contexts[1]);
96113
testLogger.info(`${testName}: finished solo cluster-ref connect`);
97114
});
98115

@@ -104,22 +121,22 @@ describe('Dual Cluster Full E2E Test', async function dualClusterFullE2eTest():
104121

105122
it(`${testName}: solo deployment add-cluster`, async (): Promise<void> => {
106123
testLogger.info(`${testName}: beginning solo deployment add-cluster`);
107-
for (let index: number = 0; index < testClusterRefs.length; index++) {
108-
await main(soloDeploymentAddClusterArgv(deployment, testClusterRefs[index], 1));
124+
for (let index: number = 0; index < testClusterArray.length; index++) {
125+
await main(soloDeploymentAddClusterArgv(deployment, testClusterArray[index], 1));
109126
}
110127
const remoteConfigManager: RemoteConfigManager = container.resolve(InjectTokens.RemoteConfigManager);
111128
expect(remoteConfigManager.isLoaded(), 'remote config manager should be loaded').to.be.true;
112129
const consensusNodes: Record<string, ConsensusNodeComponent> = remoteConfigManager.components.consensusNodes;
113130
expect(Object.entries(consensusNodes).length, 'consensus node count should be 2').to.equal(2);
114-
expect(consensusNodes['node1'].cluster).to.equal(testClusterRefs[0]);
115-
expect(consensusNodes['node2'].cluster).to.equal(testClusterRefs[1]);
131+
expect(consensusNodes['node1'].cluster).to.equal(testClusterArray[0]);
132+
expect(consensusNodes['node2'].cluster).to.equal(testClusterArray[1]);
116133
testLogger.info(`${testName}: finished solo deployment add-cluster`);
117134
});
118135

119136
it(`${testName}: solo cluster-ref setup`, async (): Promise<void> => {
120137
testLogger.info(`${testName}: beginning solo cluster-ref setup`);
121-
for (let index: number = 0; index < testClusterRefs.length; index++) {
122-
await main(soloClusterRefSetup(testClusterRefs[index]));
138+
for (let index: number = 0; index < testClusterArray.length; index++) {
139+
await main(soloClusterRefSetup(testClusterArray[index]));
123140
}
124141
testLogger.info(`${testName}: finishing solo cluster-ref setup`);
125142
});
@@ -193,11 +210,45 @@ describe('Dual Cluster Full E2E Test', async function dualClusterFullE2eTest():
193210
constants.NETWORK_PROXY_DELAY,
194211
);
195212
expect(haProxyPod).to.have.lengthOf(1);
213+
createdAccountIds.push(await verifyAccountCreateWasSuccessful(namespace, testClusterRefs));
214+
createdAccountIds.push(await verifyAccountCreateWasSuccessful(namespace, testClusterRefs));
196215
}
197216
}).timeout(Duration.ofMinutes(5).toMillis());
198217

199-
// TODO mirror node deploy
218+
it(`${testName}: mirror node deploy`, async (): Promise<void> => {
219+
await main(soloMirrorNodeDeployArgv(deployment, testClusterArray[1]));
220+
await verifyMirrorNodeDeployWasSuccessful(contexts, namespace, testLogger);
221+
// TODO validate the new accounts are showing up with the mirror node rest url
222+
}).timeout(Duration.ofMinutes(10).toMillis());
223+
200224
// TODO explorer deploy
225+
xit(`${testName}: explorer deploy`, async (): Promise<void> => {
226+
await main(soloExplorerDeployArgv(deployment, testClusterArray[1]));
227+
const k8Factory: K8Factory = container.resolve<K8Factory>(InjectTokens.K8Factory);
228+
const k8: K8 = k8Factory.getK8(contexts[1]);
229+
const hederaExplorerPods: Pod[] = await k8
230+
.pods()
231+
.list(namespace, [
232+
'app.kubernetes.io/instance=hedera-explorer',
233+
'app.kubernetes.io/name=hedera-explorer-chart',
234+
'app.kubernetes.io/component=hedera-explorer',
235+
]);
236+
expect(hederaExplorerPods).to.have.lengthOf(1);
237+
let portForwarder: ExtendedNetServer = null;
238+
try {
239+
portForwarder = await k8.pods().readByRef(hederaExplorerPods[0].podRef).portForward(8_080, 8_080);
240+
await sleep(Duration.ofSeconds(2));
241+
const guiUrl: string = 'http://127.0.0.1:8080/localnet/dashboard';
242+
const packageDownloader: PackageDownloader = container.resolve<PackageDownloader>(InjectTokens.PackageDownloader);
243+
expect(await packageDownloader.urlExists(guiUrl), 'the hedera explorer GUI URL should exist').to.be.true;
244+
// TODO validate the new accounts are showing up with the hedera explorer url
245+
} finally {
246+
if (portForwarder) {
247+
await k8.pods().readByRef(null).stopPortForward(portForwarder);
248+
}
249+
}
250+
});
251+
201252
// TODO json rpc relay deploy
202253
// TODO json rpc relay destroy
203254
// TODO explorer destroy
@@ -207,6 +258,7 @@ describe('Dual Cluster Full E2E Test', async function dualClusterFullE2eTest():
207258
await main(soloNetworkDestroyArgv(deployment));
208259
});
209260
});
261+
210262
function newArgv(): string[] {
211263
return ['${PATH}/node', '${SOLO_ROOT}/solo.ts'];
212264
}
@@ -236,7 +288,7 @@ function soloClusterRefConnectArgv(clusterRef: ClusterRef, context: string): str
236288
return argv;
237289
}
238290

239-
function soloDeploymentCreateArgv(deployment: string, namespace: NamespaceName): string[] {
291+
function soloDeploymentCreateArgv(deployment: DeploymentName, namespace: NamespaceName): string[] {
240292
const argv: string[] = newArgv();
241293
argv.push('deployment');
242294
argv.push('create');
@@ -248,7 +300,11 @@ function soloDeploymentCreateArgv(deployment: string, namespace: NamespaceName):
248300
return argv;
249301
}
250302

251-
function soloDeploymentAddClusterArgv(deployment: string, clusterRef: ClusterRef, numberOfNodes: number): string[] {
303+
function soloDeploymentAddClusterArgv(
304+
deployment: DeploymentName,
305+
clusterRef: ClusterRef,
306+
numberOfNodes: number,
307+
): string[] {
252308
const argv: string[] = newArgv();
253309
argv.push('deployment');
254310
argv.push('add-cluster');
@@ -285,7 +341,7 @@ function soloNodeKeysArgv(deployment: DeploymentName): string[] {
285341
return argv;
286342
}
287343

288-
function soloNetworkDeployArgv(deployment: string): string[] {
344+
function soloNetworkDeployArgv(deployment: DeploymentName): string[] {
289345
const argv: string[] = newArgv();
290346
argv.push('network');
291347
argv.push('deploy');
@@ -296,7 +352,7 @@ function soloNetworkDeployArgv(deployment: string): string[] {
296352
return argv;
297353
}
298354

299-
function soloNodeSetupArgv(deployment: string): string[] {
355+
function soloNodeSetupArgv(deployment: DeploymentName): string[] {
300356
const argv: string[] = newArgv();
301357
argv.push('node');
302358
argv.push('setup');
@@ -306,7 +362,7 @@ function soloNodeSetupArgv(deployment: string): string[] {
306362
return argv;
307363
}
308364

309-
function soloNodeStartArgv(deployment: string): string[] {
365+
function soloNodeStartArgv(deployment: DeploymentName): string[] {
310366
const argv: string[] = newArgv();
311367
argv.push('node');
312368
argv.push('start');
@@ -316,7 +372,125 @@ function soloNodeStartArgv(deployment: string): string[] {
316372
return argv;
317373
}
318374

319-
function soloNetworkDestroyArgv(deployment: string): string[] {
375+
async function verifyAccountCreateWasSuccessful(namespace: NamespaceName, clusterRefs: ClusterRefs): Promise<string> {
376+
const accountManager: AccountManager = container.resolve<AccountManager>(InjectTokens.AccountManager);
377+
try {
378+
await accountManager.refreshNodeClient(namespace, clusterRefs);
379+
expect(accountManager._nodeClient).not.to.be.null;
380+
const privateKey: PrivateKey = PrivateKey.generate();
381+
const amount: number = 100;
382+
383+
const newAccount: TransactionResponse = await new AccountCreateTransaction()
384+
.setKeyWithoutAlias(privateKey)
385+
.setInitialBalance(Hbar.from(amount, HbarUnit.Hbar))
386+
.execute(accountManager._nodeClient);
387+
388+
// Get the new account ID
389+
const getReceipt: TransactionReceipt = await newAccount.getReceipt(accountManager._nodeClient);
390+
const accountInfo: {accountId: string; privateKey: string; balance: number; publicKey: string} = {
391+
accountId: getReceipt.accountId.toString(),
392+
privateKey: privateKey.toString(),
393+
publicKey: privateKey.publicKey.toString(),
394+
balance: amount,
395+
};
396+
397+
expect(accountInfo.accountId).not.to.be.null;
398+
expect(accountInfo.balance).to.equal(amount);
399+
400+
return accountInfo.accountId;
401+
} finally {
402+
await accountManager.close();
403+
// @ts-expect-error - TS2341: Property _portForwards is private and only accessible within class AccountManager
404+
expect(accountManager._portForwards, 'port forwards should be empty after accountManager.close()').to.have.lengthOf(
405+
0,
406+
);
407+
}
408+
}
409+
410+
function soloMirrorNodeDeployArgv(deployment: DeploymentName, clusterRef: ClusterRef): string[] {
411+
const argv: string[] = newArgv();
412+
argv.push('mirror-node');
413+
argv.push('deploy');
414+
argv.push(optionFromFlag(Flags.deployment));
415+
argv.push(deployment);
416+
argv.push(optionFromFlag(Flags.clusterRef));
417+
argv.push(clusterRef);
418+
argv.push(optionFromFlag(Flags.pinger));
419+
argvPushGlobalFlags(argv, true, true);
420+
return argv;
421+
}
422+
423+
async function verifyMirrorNodeDeployWasSuccessful(
424+
contexts: string[],
425+
namespace: NamespaceName,
426+
testLogger: SoloWinstonLogger,
427+
): Promise<void> {
428+
const k8Factory: K8Factory = container.resolve<K8Factory>(InjectTokens.K8Factory);
429+
const k8: K8 = k8Factory.getK8(contexts[1]);
430+
const mirrorNodeRestPods: Pod[] = await k8
431+
.pods()
432+
.list(namespace, [
433+
'app.kubernetes.io/instance=mirror',
434+
'app.kubernetes.io/name=rest',
435+
'app.kubernetes.io/component=rest',
436+
]);
437+
expect(mirrorNodeRestPods).to.have.lengthOf(1);
438+
let portForwarder: ExtendedNetServer = null;
439+
try {
440+
portForwarder = await k8.pods().readByRef(mirrorNodeRestPods[0].podRef).portForward(5_551, 5_551);
441+
await sleep(Duration.ofSeconds(2));
442+
const queryUrl: string = 'http://localhost:5551/api/v1/network/nodes';
443+
let received: boolean = false;
444+
// wait until the transaction reached consensus and retrievable from the mirror node API
445+
while (!received) {
446+
const req: http.ClientRequest = http.request(
447+
queryUrl,
448+
{method: 'GET', timeout: 100, headers: {Connection: 'close'}},
449+
(res: http.IncomingMessage): void => {
450+
res.setEncoding('utf8');
451+
res.on('data', (chunk): void => {
452+
// convert chunk to json object
453+
const obj: {nodes: unknown[]} = JSON.parse(chunk);
454+
expect(
455+
obj.nodes?.length,
456+
"expect there to be two nodes in the mirror node's copy of the address book",
457+
).to.equal(2);
458+
// TODO need to enable this, but looks like mirror node currently is getting no service endpoints
459+
// expect(
460+
// obj.nodes[0].service_endpoints?.length,
461+
// 'expect there to be at least one service endpoint',
462+
// ).to.be.greaterThan(0);
463+
received = true;
464+
});
465+
},
466+
);
467+
req.on('error', (e: Error): void => {
468+
testLogger.debug(`problem with request: ${e.message}`, e);
469+
});
470+
req.end(); // make the request
471+
await sleep(Duration.ofSeconds(2));
472+
}
473+
await sleep(Duration.ofSeconds(1));
474+
} finally {
475+
if (portForwarder) {
476+
await k8.pods().readByRef(null).stopPortForward(portForwarder);
477+
}
478+
}
479+
}
480+
481+
function soloExplorerDeployArgv(deployment: DeploymentName, clusterRef: ClusterRef): string[] {
482+
const argv: string[] = newArgv();
483+
argv.push('explorer');
484+
argv.push('deploy');
485+
argv.push(optionFromFlag(Flags.deployment));
486+
argv.push(deployment);
487+
argv.push(optionFromFlag(Flags.clusterRef));
488+
argv.push(clusterRef);
489+
argvPushGlobalFlags(argv, true, true);
490+
return argv;
491+
}
492+
493+
function soloNetworkDestroyArgv(deployment: DeploymentName): string[] {
320494
const argv: string[] = newArgv();
321495
argv.push('network');
322496
argv.push('destroy');

0 commit comments

Comments
 (0)