@@ -37,6 +37,10 @@ class InvalidOutputPath(Exception):
37
37
r = redis .Redis (host = REDIS_HOST )
38
38
39
39
40
+ OID_IF_DESCR = '1.3.6.1.2.1.2.2.1.2'
41
+ OID_IF_SPEED = '1.3.6.1.2.1.2.2.1.5'
42
+
43
+
40
44
def _get_previous_counter_value (counter_ident ):
41
45
prev_value = r .hgetall (counter_ident )
42
46
if not prev_value : # empty dict
@@ -195,6 +199,33 @@ def send_results_to_grafolean(backend_url, bot_token, account_id, values):
195
199
196
200
class SNMPBot (Collector ):
197
201
202
+ @staticmethod
203
+ def _create_snmp_sesssion (job_info ):
204
+ # initialize SNMP session:
205
+ session_kwargs = {
206
+ "hostname" : job_info ["details" ]["ipv4" ],
207
+ "use_numeric" : True ,
208
+ }
209
+ cred = job_info ["credential_details" ]
210
+ snmp_version = int (cred ["version" ][5 :6 ])
211
+ session_kwargs ["version" ] = snmp_version
212
+ if snmp_version in [1 , 2 ]:
213
+ session_kwargs ["community" ] = cred ["snmpv12_community" ]
214
+ elif snmp_version == 3 :
215
+ session_kwargs = {
216
+ ** session_kwargs ,
217
+ "security_username" : cred ["snmpv3_securityName" ],
218
+ "security_level" : cred ["snmpv3_securityLevel" ], # easysnmp supports camelCase level names too
219
+ "privacy_protocol" : cred .get ("snmpv3_privProtocol" , 'DEFAULT' ),
220
+ "privacy_password" : cred .get ("snmpv3_privKey" , '' ),
221
+ "auth_protocol" : cred .get ("snmpv3_authProtocol" , 'DEFAULT' ),
222
+ "auth_password" : cred .get ("snmpv3_authKey" , '' ),
223
+ }
224
+ else :
225
+ raise Exception ("Invalid SNMP version" )
226
+ session = Session (** session_kwargs )
227
+ return session
228
+
198
229
@staticmethod
199
230
def do_snmp (* args , ** job_info ):
200
231
"""
@@ -249,29 +280,7 @@ def do_snmp(*args, **job_info):
249
280
ipv4 = job_info ["details" ]["ipv4" ],
250
281
))
251
282
252
- # initialize SNMP session:
253
- session_kwargs = {
254
- "hostname" : job_info ["details" ]["ipv4" ],
255
- "use_numeric" : True ,
256
- }
257
- cred = job_info ["credential_details" ]
258
- snmp_version = int (cred ["version" ][5 :6 ])
259
- session_kwargs ["version" ] = snmp_version
260
- if snmp_version in [1 , 2 ]:
261
- session_kwargs ["community" ] = cred ["snmpv12_community" ]
262
- elif snmp_version == 3 :
263
- session_kwargs = {
264
- ** session_kwargs ,
265
- "security_username" : cred ["snmpv3_securityName" ],
266
- "security_level" : cred ["snmpv3_securityLevel" ], # easysnmp supports camelCase level names too
267
- "privacy_protocol" : cred .get ("snmpv3_privProtocol" , 'DEFAULT' ),
268
- "privacy_password" : cred .get ("snmpv3_privKey" , '' ),
269
- "auth_protocol" : cred .get ("snmpv3_authProtocol" , 'DEFAULT' ),
270
- "auth_password" : cred .get ("snmpv3_authKey" , '' ),
271
- }
272
- else :
273
- raise Exception ("Invalid SNMP version" )
274
- session = Session (** session_kwargs )
283
+ session = SNMPBot ._create_snmp_sesssion (job_info )
275
284
276
285
# filter out only those sensors that are supposed to run at this interval:
277
286
affecting_intervals , = args
@@ -308,6 +317,88 @@ def do_snmp(*args, **job_info):
308
317
send_results_to_grafolean (job_info ['backend_url' ], job_info ['bot_token' ], job_info ['account_id' ], values )
309
318
310
319
320
+ @staticmethod
321
+ def update_if_entities (* args , ** job_info ):
322
+ log .info ("Running interfaces job for account [{account_id}], IP [{ipv4}]" .format (
323
+ account_id = job_info ["account_id" ],
324
+ ipv4 = job_info ["details" ]["ipv4" ],
325
+ ))
326
+
327
+ session = SNMPBot ._create_snmp_sesssion (job_info )
328
+
329
+ parent_entity_id = job_info ["entity_id" ]
330
+ account_id = job_info ["account_id" ]
331
+ backend_url = job_info ['backend_url' ]
332
+ bot_token = job_info ['bot_token' ]
333
+ # fetch interfaces and update the interface entities:
334
+ result_descr = session .walk (OID_IF_DESCR )
335
+ result_speed = session .walk (OID_IF_SPEED )
336
+
337
+ # make sure that indexes of results are aligned - we don't want to have incorrect data:
338
+ if any ([if_speed .oid_index != if_descr .oid_index for if_descr , if_speed in zip (result_descr , result_speed )]):
339
+ log .warning (f"Out-of-order results for interface names on entity { parent_entity_id } , sorting not yet implemented, bailing out!" )
340
+ return
341
+
342
+ # - get those entities on this account, which have this entity as their parent and filter them by type ('interface')
343
+ requests_session = requests .Session ()
344
+ url = f'{ backend_url } /accounts/{ account_id } /entities/?parent={ parent_entity_id } &entity_type=interface&b={ bot_token } '
345
+ r = requests_session .get (url )
346
+ r .raise_for_status ()
347
+ # existing_entities = {x['details']['snmp_index']: (x['name'], x['details']['speed_bps'], x['id'],) for x in r.json()['list']}
348
+ # Temporary, until we implement filtering in API:
349
+ existing_entities = {x ['details' ]['snmp_index' ]: (x ['name' ], x ['details' ]['speed_bps' ], x ['id' ],) for x in r .json ()['list' ] if x ["entity_type" ] == 'interface' and x ["parent" ] == parent_entity_id }
350
+
351
+ for if_descr_snmpvalue , if_speed_snmpvalue in zip (result_descr , result_speed ):
352
+ oid_index = if_descr_snmpvalue .oid_index
353
+ descr = if_descr_snmpvalue .value
354
+ speed_bps = if_speed_snmpvalue .value
355
+ # - for each new entity:
356
+ # - make sure it exists (if not, create it - POST)
357
+ if oid_index not in existing_entities :
358
+ log .debug (f"Entity with OID index { oid_index } is new, inserting." )
359
+ url = f'{ backend_url } /accounts/{ account_id } /entities/?b={ bot_token } '
360
+ payload = {
361
+ "name" : descr ,
362
+ "entity_type" : "interface" ,
363
+ "parent" : parent_entity_id ,
364
+ "details" :{
365
+ "snmp_index" : oid_index ,
366
+ "speed_bps" : speed_bps ,
367
+ },
368
+ }
369
+ r = requests_session .post (url , json = payload )
370
+ continue
371
+
372
+ # - make sure the description and speed are correct (if not, update them - PUT)
373
+ existing_descr , existing_speed , existing_id = existing_entities [oid_index ]
374
+ if existing_descr != descr or existing_speed != speed_bps :
375
+ log .debug (f"Entity with OID index { oid_index } changed data, updating." )
376
+ url = f'{ backend_url } /accounts/{ account_id } /entities/{ existing_id } /?b={ bot_token } '
377
+ payload = {
378
+ "name" : descr ,
379
+ "entity_type" : "interface" ,
380
+ # "parent": parent_entity_id, # changing entity parent is not possible
381
+ "details" :{
382
+ "snmp_index" : oid_index ,
383
+ "speed_bps" : speed_bps ,
384
+ },
385
+ }
386
+ r = requests_session .put (url , json = payload )
387
+ del existing_entities [oid_index ]
388
+ continue
389
+
390
+ # - mark it as processed
391
+ log .debug (f"Entity with OID index { oid_index } didn't change." )
392
+ del existing_entities [oid_index ]
393
+
394
+ # - for every existing entity that is not among the new ones, remove it (no point in keeping it - we don't keep old versions of enities data either)
395
+ for oid_index in existing_entities :
396
+ _ , _ , existing_id = existing_entities [oid_index ]
397
+ log .debug (f"Entity with OID index { oid_index } no longer exists, removing." )
398
+ url = f'{ backend_url } /accounts/{ account_id } /entities/{ existing_id } /?b={ bot_token } '
399
+ r = requests_session .delete (url )
400
+
401
+
311
402
def jobs (self ):
312
403
"""
313
404
Each entity (device) is a single job, no matter how many sensors it has. The reason is
@@ -316,9 +407,15 @@ def jobs(self):
316
407
for entity_info in self .fetch_job_configs ('snmp' ):
317
408
intervals = list (set ([sensor_info ["interval" ] for sensor_info in entity_info ["sensors" ]]))
318
409
job_info = { ** entity_info , "backend_url" : self .backend_url , "bot_token" : self .bot_token }
319
- job_id = str ( entity_info ["entity_id" ])
410
+ job_id = f' { entity_info ["entity_id" ]} '
320
411
yield job_id , intervals , SNMPBot .do_snmp , job_info
321
412
413
+ # We also collect interface data from each entity; the assumption is that everyone who wants
414
+ # to use SNMP also wants to know about network interfaces.
415
+ # Since `job_info` has all the necessary data, we simply pass it along:
416
+ job_id = f'{ entity_info ["entity_id" ]} -interfaces'
417
+ yield job_id , [5 * 60 ], SNMPBot .update_if_entities , job_info
418
+
322
419
323
420
def wait_for_grafolean (backend_url ):
324
421
while True :
0 commit comments