File tree 2 files changed +38
-1
lines changed
2 files changed +38
-1
lines changed Original file line number Diff line number Diff line change @@ -4,7 +4,9 @@ instances:
4
4
isLocal : true
5
5
# user: liurui
6
6
# password: xxxxxxxx
7
+ # host: 192.168.1.1
7
8
# port: 1414
9
+ # channel: SYSTEM.ADMIN.SVRCONN
8
10
queuesMonitored : Q*
9
11
# customEventQueues:
10
12
# keystore:
@@ -19,11 +21,12 @@ instances:
19
21
prometheus.port : 16543
20
22
21
23
- queueManager : SATURN
22
- isLocal : false
24
+ isLocal : true
23
25
# user: liurui
24
26
# password: xxxxxxxx
25
27
# host: 192.168.1.1
26
28
# port: 1414
29
+ # channel: SYSTEM.ADMIN.SVRCONN
27
30
queuesMonitored : Q*
28
31
# customEventQueues:
29
32
# keystore:
Original file line number Diff line number Diff line change
1
+ # my global config
2
+ global :
3
+ scrape_interval : 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
4
+ evaluation_interval : 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
5
+ # scrape_timeout is set to the global default (10s).
6
+
7
+ # Alertmanager configuration
8
+ alerting :
9
+ alertmanagers :
10
+ - static_configs :
11
+ - targets :
12
+ # - alertmanager:9093
13
+
14
+ # Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
15
+ rule_files :
16
+ # - "first_rules.yml"
17
+ # - "second_rules.yml"
18
+
19
+ # A scrape configuration containing exactly one endpoint to scrape:
20
+ # Here it's Prometheus itself.
21
+ scrape_configs :
22
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
23
+ - job_name : " prometheus"
24
+
25
+ # metrics_path defaults to '/metrics'
26
+ # scheme defaults to 'http'.
27
+
28
+ static_configs :
29
+ - targets : ["localhost:9090"]
30
+
31
+ - job_name : ' ibmmq'
32
+ scrape_interval : 60s
33
+ static_configs :
34
+ - targets : ['192.168.108.128:16543']
You can’t perform that action at this time.
0 commit comments