-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathcommunity_intelligence_system.py
More file actions
1228 lines (1031 loc) · 52.2 KB
/
community_intelligence_system.py
File metadata and controls
1228 lines (1031 loc) · 52.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Community Intelligence System for XMRT-Ecosystem
This module provides advanced community analytics, sentiment analysis, and engagement prediction
for the autonomous DAO platform. It integrates with the existing multi-agent system to provide
intelligent insights into community behavior and trends.
Author: Enhanced by AI Assistant
Version: 1.0.0
License: MIT
"""
import asyncio
import json
import logging
import time
from datetime import datetime, timedelta
from dataclasses import dataclass, asdict, field
from typing import Dict, List, Optional, Tuple, Any, Set
from enum import Enum
import pandas as pd
import numpy as np
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import networkx as nx
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, IsolationForest
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict, Counter
import re
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SentimentType(Enum):
"""Sentiment classification types"""
POSITIVE = "positive"
NEGATIVE = "negative"
NEUTRAL = "neutral"
MIXED = "mixed"
class EngagementLevel(Enum):
"""Community engagement levels"""
VERY_HIGH = "very_high"
HIGH = "high"
MEDIUM = "medium"
LOW = "low"
VERY_LOW = "very_low"
class CommunityRole(Enum):
"""Community member roles"""
LEADER = "leader"
INFLUENCER = "influencer"
CONTRIBUTOR = "contributor"
OBSERVER = "observer"
NEW_MEMBER = "new_member"
@dataclass
class SentimentScore:
"""Sentiment analysis results"""
compound: float
positive: float
negative: float
neutral: float
classification: SentimentType
confidence: float
timestamp: datetime = field(default_factory=datetime.now)
@dataclass
class CommunityMember:
"""Community member profile"""
user_id: str
username: str
join_date: datetime
total_contributions: int
engagement_score: float
sentiment_history: List[SentimentScore] = field(default_factory=list)
influence_score: float = 0.0
role: CommunityRole = CommunityRole.NEW_MEMBER
topics_of_interest: List[str] = field(default_factory=list)
last_activity: Optional[datetime] = None
reputation_score: float = 0.0
@dataclass
class EngagementMetrics:
"""Community engagement metrics"""
total_members: int
active_members: int
engagement_rate: float
sentiment_distribution: Dict[str, float]
top_topics: List[str]
influence_network_density: float
growth_rate: float
retention_rate: float
timestamp: datetime = field(default_factory=datetime.now)
@dataclass
class CommunityInsight:
"""Community analysis insight"""
insight_type: str
title: str
description: str
importance: float
actionable_recommendations: List[str]
supporting_data: Dict[str, Any]
timestamp: datetime = field(default_factory=datetime.now)
class CommunityIntelligenceSystem:
"""
Advanced Community Intelligence System for analyzing and predicting
community behavior, sentiment, and engagement patterns.
"""
def __init__(self, github_manager=None, analytics_engine=None):
self.github_manager = github_manager
self.analytics_engine = analytics_engine
self.sentiment_analyzer = SentimentIntensityAnalyzer()
self.members: Dict[str, CommunityMember] = {}
self.engagement_history: List[EngagementMetrics] = []
self.insights: List[CommunityInsight] = []
self.social_graph = nx.DiGraph()
# ML Models
self.engagement_predictor = None
self.topic_clusterer = KMeans(n_clusters=10, random_state=42)
self.anomaly_detector = IsolationForest(contamination=0.1, random_state=42)
self.tfidf_vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
# Cache and state
self.sentiment_cache = {}
self.topic_cache = {}
self.last_analysis_time = None
logger.info("Community Intelligence System initialized")
async def analyze_community_sentiment(self,
text_data: List[str],
user_ids: List[str] = None,
context: str = "general") -> Dict[str, Any]:
"""
Analyze sentiment patterns in community communications
Args:
text_data: List of text content to analyze
user_ids: Optional list of user IDs corresponding to text
context: Context of the analysis (e.g., 'pr_comments', 'issues')
Returns:
Comprehensive sentiment analysis results
"""
try:
if not text_data:
return {"error": "No text data provided"}
logger.info(f"Analyzing sentiment for {len(text_data)} texts in context: {context}")
sentiment_scores = []
individual_sentiments = []
for i, text in enumerate(text_data):
if not text or len(text.strip()) < 3:
continue
# Multiple sentiment analysis approaches
vader_score = self.sentiment_analyzer.polarity_scores(text)
textblob_sentiment = TextBlob(text).sentiment
# Combine scores for more accurate analysis
compound = (vader_score['compound'] + textblob_sentiment.polarity) / 2
positive = vader_score['pos']
negative = vader_score['neg']
neutral = vader_score['neu']
# Classify sentiment
if compound >= 0.05:
classification = SentimentType.POSITIVE
elif compound <= -0.05:
classification = SentimentType.NEGATIVE
else:
classification = SentimentType.NEUTRAL
confidence = abs(compound)
sentiment_score = SentimentScore(
compound=compound,
positive=positive,
negative=negative,
neutral=neutral,
classification=classification,
confidence=confidence
)
sentiment_scores.append(sentiment_score)
individual_sentiments.append({
'text': text[:100] + "..." if len(text) > 100 else text,
'sentiment': asdict(sentiment_score),
'user_id': user_ids[i] if user_ids and i < len(user_ids) else None
})
# Update member sentiment history if user provided
if user_ids and i < len(user_ids):
await self._update_member_sentiment(user_ids[i], sentiment_score)
if not sentiment_scores:
return {"error": "No valid text data to analyze"}
# Calculate aggregate metrics
avg_compound = np.mean([s.compound for s in sentiment_scores])
avg_positive = np.mean([s.positive for s in sentiment_scores])
avg_negative = np.mean([s.negative for s in sentiment_scores])
avg_neutral = np.mean([s.neutral for s in sentiment_scores])
sentiment_distribution = {
SentimentType.POSITIVE.value: sum(1 for s in sentiment_scores if s.classification == SentimentType.POSITIVE) / len(sentiment_scores),
SentimentType.NEGATIVE.value: sum(1 for s in sentiment_scores if s.classification == SentimentType.NEGATIVE) / len(sentiment_scores),
SentimentType.NEUTRAL.value: sum(1 for s in sentiment_scores if s.classification == SentimentType.NEUTRAL) / len(sentiment_scores)
}
# Detect sentiment trends
sentiment_trend = await self._analyze_sentiment_trends(sentiment_scores, context)
# Generate insights
insights = await self._generate_sentiment_insights(
sentiment_scores, sentiment_distribution, sentiment_trend
)
analysis_result = {
'context': context,
'total_analyzed': len(sentiment_scores),
'aggregate_sentiment': {
'compound': avg_compound,
'positive': avg_positive,
'negative': avg_negative,
'neutral': avg_neutral
},
'sentiment_distribution': sentiment_distribution,
'sentiment_trend': sentiment_trend,
'individual_sentiments': individual_sentiments,
'insights': insights,
'timestamp': datetime.now().isoformat()
}
logger.info(f"Sentiment analysis completed. Average compound: {avg_compound:.3f}")
return analysis_result
except Exception as e:
logger.error(f"Error in sentiment analysis: {str(e)}")
return {"error": f"Sentiment analysis failed: {str(e)}"}
async def predict_engagement(self,
member_id: str = None,
timeframe_days: int = 7) -> Dict[str, Any]:
"""
Predict future engagement levels for community members
Args:
member_id: Specific member to predict for (None for community-wide)
timeframe_days: Days into the future to predict
Returns:
Engagement predictions and recommendations
"""
try:
logger.info(f"Predicting engagement for {timeframe_days} days")
if member_id and member_id in self.members:
# Individual member prediction
return await self._predict_individual_engagement(member_id, timeframe_days)
else:
# Community-wide prediction
return await self._predict_community_engagement(timeframe_days)
except Exception as e:
logger.error(f"Error in engagement prediction: {str(e)}")
return {"error": f"Engagement prediction failed: {str(e)}"}
async def analyze_social_network(self) -> Dict[str, Any]:
"""
Analyze the community social network and influence patterns
Returns:
Social network analysis results
"""
try:
logger.info("Analyzing community social network")
if self.social_graph.number_of_nodes() == 0:
await self._build_social_graph()
# Calculate network metrics
network_metrics = {
'total_nodes': self.social_graph.number_of_nodes(),
'total_edges': self.social_graph.number_of_edges(),
'density': nx.density(self.social_graph),
'average_clustering': nx.average_clustering(self.social_graph.to_undirected()),
'is_connected': nx.is_weakly_connected(self.social_graph)
}
# Identify key influencers
influencers = await self._identify_influencers()
# Detect communities within the network
communities = await self._detect_communities()
# Analyze information flow
info_flow = await self._analyze_information_flow()
analysis_result = {
'network_metrics': network_metrics,
'key_influencers': influencers,
'communities': communities,
'information_flow': info_flow,
'recommendations': await self._generate_network_recommendations(
network_metrics, influencers, communities
),
'timestamp': datetime.now().isoformat()
}
logger.info(f"Social network analysis completed. Network density: {network_metrics['density']:.3f}")
return analysis_result
except Exception as e:
logger.error(f"Error in social network analysis: {str(e)}")
return {"error": f"Social network analysis failed: {str(e)}"}
async def detect_community_anomalies(self) -> Dict[str, Any]:
"""
Detect unusual patterns in community behavior
Returns:
Anomaly detection results and alerts
"""
try:
logger.info("Detecting community anomalies")
# Collect behavioral features
features = await self._extract_behavioral_features()
if len(features) < 10:
return {"warning": "Insufficient data for anomaly detection"}
# Detect anomalies using isolation forest
anomaly_scores = self.anomaly_detector.fit_predict(features)
anomaly_probabilities = self.anomaly_detector.score_samples(features)
# Identify anomalous patterns
anomalies = []
for i, (score, prob) in enumerate(zip(anomaly_scores, anomaly_probabilities)):
if score == -1: # Anomaly detected
anomalies.append({
'index': i,
'anomaly_score': prob,
'severity': 'high' if prob < -0.5 else 'medium'
})
# Generate anomaly insights
insights = await self._analyze_anomalies(anomalies, features)
detection_result = {
'total_data_points': len(features),
'anomalies_detected': len(anomalies),
'anomaly_rate': len(anomalies) / len(features),
'anomalies': anomalies,
'insights': insights,
'recommendations': await self._generate_anomaly_recommendations(anomalies),
'timestamp': datetime.now().isoformat()
}
logger.info(f"Anomaly detection completed. Found {len(anomalies)} anomalies")
return detection_result
except Exception as e:
logger.error(f"Error in anomaly detection: {str(e)}")
return {"error": f"Anomaly detection failed: {str(e)}"}
async def generate_community_report(self,
timeframe_days: int = 30) -> Dict[str, Any]:
"""
Generate comprehensive community intelligence report
Args:
timeframe_days: Days to include in the report
Returns:
Comprehensive community analysis report
"""
try:
logger.info(f"Generating community report for {timeframe_days} days")
start_date = datetime.now() - timedelta(days=timeframe_days)
# Gather all analysis components
sentiment_analysis = await self._get_timeframe_sentiment_analysis(start_date)
engagement_metrics = await self._calculate_engagement_metrics(start_date)
social_analysis = await self.analyze_social_network()
growth_analysis = await self._analyze_community_growth(start_date)
topic_analysis = await self._analyze_trending_topics(start_date)
# Generate key insights
key_insights = await self._generate_key_insights(
sentiment_analysis, engagement_metrics, social_analysis,
growth_analysis, topic_analysis
)
# Create actionable recommendations
recommendations = await self._generate_strategic_recommendations(
sentiment_analysis, engagement_metrics, social_analysis
)
report = {
'report_period': {
'start_date': start_date.isoformat(),
'end_date': datetime.now().isoformat(),
'days_analyzed': timeframe_days
},
'executive_summary': await self._create_executive_summary(
sentiment_analysis, engagement_metrics, growth_analysis
),
'sentiment_analysis': sentiment_analysis,
'engagement_metrics': engagement_metrics,
'social_network_analysis': social_analysis,
'growth_analysis': growth_analysis,
'topic_analysis': topic_analysis,
'key_insights': key_insights,
'strategic_recommendations': recommendations,
'risk_assessment': await self._assess_community_risks(
sentiment_analysis, engagement_metrics
),
'generated_at': datetime.now().isoformat()
}
logger.info("Community report generated successfully")
return report
except Exception as e:
logger.error(f"Error generating community report: {str(e)}")
return {"error": f"Report generation failed: {str(e)}"}
async def _update_member_sentiment(self, user_id: str, sentiment_score: SentimentScore):
"""Update member's sentiment history"""
if user_id not in self.members:
self.members[user_id] = CommunityMember(
user_id=user_id,
username=user_id, # Will be updated with actual username later
join_date=datetime.now(),
total_contributions=0,
engagement_score=0.0
)
member = self.members[user_id]
member.sentiment_history.append(sentiment_score)
# Keep only recent sentiment history (last 100 entries)
if len(member.sentiment_history) > 100:
member.sentiment_history = member.sentiment_history[-100:]
# Update member's overall sentiment score
recent_sentiments = member.sentiment_history[-10:] # Last 10 sentiments
if recent_sentiments:
avg_sentiment = np.mean([s.compound for s in recent_sentiments])
member.engagement_score = max(0, min(1, (avg_sentiment + 1) / 2)) # Normalize to 0-1
async def _analyze_sentiment_trends(self,
sentiment_scores: List[SentimentScore],
context: str) -> Dict[str, Any]:
"""Analyze sentiment trends over time"""
try:
if len(sentiment_scores) < 2:
return {"trend": "insufficient_data"}
# Calculate trend direction
recent_scores = [s.compound for s in sentiment_scores[-10:]]
early_scores = [s.compound for s in sentiment_scores[:10]]
recent_avg = np.mean(recent_scores)
early_avg = np.mean(early_scores) if len(early_scores) > 0 else recent_avg
trend_direction = recent_avg - early_avg
if trend_direction > 0.1:
trend = "improving"
elif trend_direction < -0.1:
trend = "declining"
else:
trend = "stable"
# Calculate volatility
all_scores = [s.compound for s in sentiment_scores]
volatility = np.std(all_scores)
return {
"trend": trend,
"trend_magnitude": abs(trend_direction),
"volatility": volatility,
"recent_average": recent_avg,
"early_average": early_avg,
"sample_size": len(sentiment_scores)
}
except Exception as e:
logger.error(f"Error analyzing sentiment trends: {str(e)}")
return {"trend": "error", "error": str(e)}
async def _generate_sentiment_insights(self,
sentiment_scores: List[SentimentScore],
distribution: Dict[str, float],
trend: Dict[str, Any]) -> List[CommunityInsight]:
"""Generate actionable insights from sentiment analysis"""
insights = []
try:
# Insight 1: Overall sentiment health
avg_compound = np.mean([s.compound for s in sentiment_scores])
if avg_compound > 0.3:
insights.append(CommunityInsight(
insight_type="positive_sentiment",
title="Strong Positive Community Sentiment",
description=f"Community sentiment is strongly positive with an average score of {avg_compound:.3f}",
importance=0.8,
actionable_recommendations=[
"Leverage positive momentum for major announcements",
"Consider launching new community initiatives",
"Highlight and celebrate community achievements"
],
supporting_data={"average_sentiment": avg_compound, "sample_size": len(sentiment_scores)}
))
elif avg_compound < -0.2:
insights.append(CommunityInsight(
insight_type="negative_sentiment",
title="Community Sentiment Concerns",
description=f"Community sentiment shows concerning negative trends with score {avg_compound:.3f}",
importance=0.9,
actionable_recommendations=[
"Investigate root causes of negative sentiment",
"Increase community engagement and communication",
"Address specific concerns raised by members",
"Consider conducting detailed community feedback sessions"
],
supporting_data={"average_sentiment": avg_compound, "sample_size": len(sentiment_scores)}
))
# Insight 2: Sentiment distribution analysis
negative_ratio = distribution.get(SentimentType.NEGATIVE.value, 0)
if negative_ratio > 0.3:
insights.append(CommunityInsight(
insight_type="high_negativity",
title="High Proportion of Negative Sentiment",
description=f"{negative_ratio*100:.1f}% of communications show negative sentiment",
importance=0.8,
actionable_recommendations=[
"Implement proactive community moderation",
"Create positive engagement campaigns",
"Address common pain points in the community"
],
supporting_data={"negative_ratio": negative_ratio, "distribution": distribution}
))
# Insight 3: Trend analysis
if trend.get("trend") == "declining":
insights.append(CommunityInsight(
insight_type="declining_sentiment",
title="Declining Sentiment Trend Detected",
description=f"Sentiment has declined by {trend.get('trend_magnitude', 0):.3f} points recently",
importance=0.9,
actionable_recommendations=[
"Identify and address causes of declining sentiment",
"Increase transparency in communications",
"Launch targeted improvement initiatives"
],
supporting_data=trend
))
elif trend.get("trend") == "improving":
insights.append(CommunityInsight(
insight_type="improving_sentiment",
title="Positive Sentiment Momentum",
description=f"Sentiment has improved by {trend.get('trend_magnitude', 0):.3f} points recently",
importance=0.7,
actionable_recommendations=[
"Maintain current engagement strategies",
"Document successful practices for future use",
"Consider expanding successful initiatives"
],
supporting_data=trend
))
except Exception as e:
logger.error(f"Error generating sentiment insights: {str(e)}")
return insights
async def _predict_individual_engagement(self,
member_id: str,
timeframe_days: int) -> Dict[str, Any]:
"""Predict engagement for a specific member"""
try:
member = self.members.get(member_id)
if not member:
return {"error": f"Member {member_id} not found"}
# Extract features for prediction
features = await self._extract_member_features(member)
# Simple prediction based on historical patterns
recent_sentiment = np.mean([s.compound for s in member.sentiment_history[-10:]]) if member.sentiment_history else 0
engagement_trend = member.engagement_score
# Predict future engagement level
predicted_score = (recent_sentiment + engagement_trend) / 2
if predicted_score > 0.7:
predicted_level = EngagementLevel.HIGH
elif predicted_score > 0.4:
predicted_level = EngagementLevel.MEDIUM
else:
predicted_level = EngagementLevel.LOW
return {
'member_id': member_id,
'current_engagement_score': member.engagement_score,
'predicted_engagement_score': predicted_score,
'predicted_level': predicted_level.value,
'confidence': 0.75, # Placeholder confidence
'timeframe_days': timeframe_days,
'recommendations': await self._generate_member_recommendations(member, predicted_level)
}
except Exception as e:
logger.error(f"Error predicting individual engagement: {str(e)}")
return {"error": f"Individual engagement prediction failed: {str(e)}"}
async def _predict_community_engagement(self, timeframe_days: int) -> Dict[str, Any]:
"""Predict community-wide engagement trends"""
try:
if not self.members:
return {"error": "No member data available for prediction"}
# Calculate current community metrics
total_members = len(self.members)
active_members = sum(1 for m in self.members.values()
if m.last_activity and
(datetime.now() - m.last_activity).days <= 7)
current_engagement_rate = active_members / total_members if total_members > 0 else 0
# Analyze engagement trends
engagement_scores = [m.engagement_score for m in self.members.values()]
avg_engagement = np.mean(engagement_scores) if engagement_scores else 0
# Simple trend prediction
if len(self.engagement_history) > 1:
recent_rates = [h.engagement_rate for h in self.engagement_history[-5:]]
trend = np.polyfit(range(len(recent_rates)), recent_rates, 1)[0]
else:
trend = 0
# Predict future engagement
predicted_rate = max(0, min(1, current_engagement_rate + (trend * timeframe_days / 7)))
# Determine prediction confidence based on data quality
confidence = min(0.9, 0.3 + (len(self.engagement_history) * 0.1))
return {
'current_metrics': {
'total_members': total_members,
'active_members': active_members,
'engagement_rate': current_engagement_rate,
'average_engagement_score': avg_engagement
},
'prediction': {
'predicted_engagement_rate': predicted_rate,
'trend_direction': 'increasing' if trend > 0 else 'decreasing' if trend < 0 else 'stable',
'trend_magnitude': abs(trend),
'confidence': confidence,
'timeframe_days': timeframe_days
},
'recommendations': await self._generate_community_recommendations(
current_engagement_rate, predicted_rate, trend
)
}
except Exception as e:
logger.error(f"Error predicting community engagement: {str(e)}")
return {"error": f"Community engagement prediction failed: {str(e)}"}
async def _build_social_graph(self):
"""Build social network graph from member interactions"""
try:
self.social_graph.clear()
# Add all members as nodes
for member_id, member in self.members.items():
self.social_graph.add_node(member_id,
username=member.username,
engagement_score=member.engagement_score,
role=member.role.value)
# For now, create synthetic relationships based on similar engagement patterns
# In a real implementation, this would use actual interaction data
member_list = list(self.members.keys())
for i, member1 in enumerate(member_list):
for member2 in member_list[i+1:]:
score1 = self.members[member1].engagement_score
score2 = self.members[member2].engagement_score
# Create edge if engagement scores are similar (indicating potential interaction)
if abs(score1 - score2) < 0.3 and np.random.random() > 0.7:
weight = 1 - abs(score1 - score2)
self.social_graph.add_edge(member1, member2, weight=weight)
logger.info(f"Social graph built with {self.social_graph.number_of_nodes()} nodes and {self.social_graph.number_of_edges()} edges")
except Exception as e:
logger.error(f"Error building social graph: {str(e)}")
async def _identify_influencers(self) -> List[Dict[str, Any]]:
"""Identify key influencers in the community"""
try:
if self.social_graph.number_of_nodes() == 0:
return []
# Calculate various centrality measures
degree_centrality = nx.degree_centrality(self.social_graph)
betweenness_centrality = nx.betweenness_centrality(self.social_graph)
closeness_centrality = nx.closeness_centrality(self.social_graph)
eigenvector_centrality = nx.eigenvector_centrality(self.social_graph, max_iter=1000)
influencers = []
for node in self.social_graph.nodes():
member = self.members.get(node)
if member:
influence_score = (
degree_centrality.get(node, 0) * 0.3 +
betweenness_centrality.get(node, 0) * 0.3 +
closeness_centrality.get(node, 0) * 0.2 +
eigenvector_centrality.get(node, 0) * 0.2
)
member.influence_score = influence_score
# Update role based on influence
if influence_score > 0.7:
member.role = CommunityRole.LEADER
elif influence_score > 0.5:
member.role = CommunityRole.INFLUENCER
elif influence_score > 0.3:
member.role = CommunityRole.CONTRIBUTOR
influencers.append({
'member_id': node,
'username': member.username,
'influence_score': influence_score,
'role': member.role.value,
'engagement_score': member.engagement_score,
'centrality_measures': {
'degree': degree_centrality.get(node, 0),
'betweenness': betweenness_centrality.get(node, 0),
'closeness': closeness_centrality.get(node, 0),
'eigenvector': eigenvector_centrality.get(node, 0)
}
})
# Sort by influence score
influencers.sort(key=lambda x: x['influence_score'], reverse=True)
return influencers[:20] # Top 20 influencers
except Exception as e:
logger.error(f"Error identifying influencers: {str(e)}")
return []
async def _detect_communities(self) -> List[Dict[str, Any]]:
"""Detect communities within the social network"""
try:
if self.social_graph.number_of_nodes() < 3:
return []
# Convert to undirected graph for community detection
undirected_graph = self.social_graph.to_undirected()
# Use Louvain community detection
import community as community_louvain
partition = community_louvain.best_partition(undirected_graph)
# Organize communities
communities = defaultdict(list)
for node, comm_id in partition.items():
communities[comm_id].append(node)
community_list = []
for comm_id, members in communities.items():
if len(members) >= 2: # Only include communities with at least 2 members
avg_engagement = np.mean([self.members[m].engagement_score for m in members if m in self.members])
community_list.append({
'community_id': comm_id,
'members': members,
'size': len(members),
'average_engagement': avg_engagement,
'key_members': [m for m in members if m in self.members and self.members[m].influence_score > 0.5]
})
# Sort by size
community_list.sort(key=lambda x: x['size'], reverse=True)
return community_list
except ImportError:
logger.warning("python-louvain not available, using simple clustering")
# Fallback: simple clustering based on engagement scores
return await self._simple_community_detection()
except Exception as e:
logger.error(f"Error detecting communities: {str(e)}")
return []
async def _simple_community_detection(self) -> List[Dict[str, Any]]:
"""Simple community detection based on engagement scores"""
try:
if not self.members:
return []
# Group members by engagement level
high_engagement = []
medium_engagement = []
low_engagement = []
for member_id, member in self.members.items():
if member.engagement_score > 0.7:
high_engagement.append(member_id)
elif member.engagement_score > 0.3:
medium_engagement.append(member_id)
else:
low_engagement.append(member_id)
communities = []
if high_engagement:
communities.append({
'community_id': 0,
'members': high_engagement,
'size': len(high_engagement),
'average_engagement': np.mean([self.members[m].engagement_score for m in high_engagement]),
'key_members': high_engagement,
'type': 'high_engagement'
})
if medium_engagement:
communities.append({
'community_id': 1,
'members': medium_engagement,
'size': len(medium_engagement),
'average_engagement': np.mean([self.members[m].engagement_score for m in medium_engagement]),
'key_members': [m for m in medium_engagement if self.members[m].influence_score > 0.4],
'type': 'medium_engagement'
})
if low_engagement:
communities.append({
'community_id': 2,
'members': low_engagement,
'size': len(low_engagement),
'average_engagement': np.mean([self.members[m].engagement_score for m in low_engagement]),
'key_members': [],
'type': 'low_engagement'
})
return communities
except Exception as e:
logger.error(f"Error in simple community detection: {str(e)}")
return []
async def _analyze_information_flow(self) -> Dict[str, Any]:
"""Analyze how information flows through the community network"""
try:
if self.social_graph.number_of_nodes() == 0:
return {"error": "No network data available"}
# Calculate flow metrics
avg_path_length = 0
diameter = 0
try:
if nx.is_weakly_connected(self.social_graph):
avg_path_length = nx.average_shortest_path_length(self.social_graph)
diameter = nx.diameter(self.social_graph)
else:
# For disconnected graphs, analyze largest component
largest_cc = max(nx.weakly_connected_components(self.social_graph), key=len)
subgraph = self.social_graph.subgraph(largest_cc)
if len(largest_cc) > 1:
avg_path_length = nx.average_shortest_path_length(subgraph)
diameter = nx.diameter(subgraph)
except nx.NetworkXError:
pass
# Identify information bottlenecks (nodes with high betweenness centrality)
betweenness = nx.betweenness_centrality(self.social_graph)
bottlenecks = [node for node, centrality in betweenness.items() if centrality > 0.1]
# Calculate clustering coefficient
clustering = nx.average_clustering(self.social_graph.to_undirected())
return {
'average_path_length': avg_path_length,
'network_diameter': diameter,
'clustering_coefficient': clustering,
'information_bottlenecks': bottlenecks,
'connectivity': {
'is_connected': nx.is_weakly_connected(self.social_graph),
'number_of_components': nx.number_weakly_connected_components(self.social_graph)
},
'flow_efficiency': 1 / avg_path_length if avg_path_length > 0 else 0
}
except Exception as e:
logger.error(f"Error analyzing information flow: {str(e)}")
return {"error": f"Information flow analysis failed: {str(e)}"}
async def _generate_network_recommendations(self,
network_metrics: Dict[str, Any],
influencers: List[Dict[str, Any]],
communities: List[Dict[str, Any]]) -> List[str]:
"""Generate recommendations based on network analysis"""
recommendations = []
try:
# Network density recommendations
density = network_metrics.get('density', 0)
if density < 0.1:
recommendations.append("Network density is low. Consider initiatives to increase member interactions and connections.")
elif density > 0.7:
recommendations.append("Network is highly dense. Focus on efficient information dissemination and preventing echo chambers.")
# Influencer recommendations
if len(influencers) > 0:
top_influencer_score = influencers[0].get('influence_score', 0)
if top_influencer_score > 0.8:
recommendations.append("Leverage top influencers for important announcements and community initiatives.")
if len([i for i in influencers if i.get('influence_score', 0) > 0.5]) < 3:
recommendations.append("Consider developing more community leaders to distribute influence more evenly.")
# Community structure recommendations
if len(communities) > 5:
recommendations.append("Multiple communities detected. Consider cross-community engagement initiatives.")
elif len(communities) == 1:
recommendations.append("Single community structure may limit diverse perspectives. Encourage sub-group formation.")
# Connectivity recommendations
if not network_metrics.get('is_connected', False):
recommendations.append("Network has disconnected components. Focus on bridging isolated groups.")
except Exception as e:
logger.error(f"Error generating network recommendations: {str(e)}")
return recommendations
async def _extract_behavioral_features(self) -> np.ndarray:
"""Extract behavioral features for anomaly detection"""
try:
features = []
for member_id, member in self.members.items():
# Calculate various behavioral metrics
avg_sentiment = np.mean([s.compound for s in member.sentiment_history]) if member.sentiment_history else 0
sentiment_variance = np.var([s.compound for s in member.sentiment_history]) if len(member.sentiment_history) > 1 else 0
days_since_join = (datetime.now() - member.join_date).days if member.join_date else 0
days_since_activity = (datetime.now() - member.last_activity).days if member.last_activity else 999
feature_vector = [
member.engagement_score,
member.total_contributions,
member.influence_score,
avg_sentiment,
sentiment_variance,
len(member.sentiment_history),
days_since_join,
days_since_activity,
member.reputation_score
]
features.append(feature_vector)
return np.array(features) if features else np.array([]).reshape(0, 9)
except Exception as e:
logger.error(f"Error extracting behavioral features: {str(e)}")
return np.array([]).reshape(0, 9)
async def _analyze_anomalies(self,
anomalies: List[Dict[str, Any]],
features: np.ndarray) -> List[CommunityInsight]:
"""Analyze detected anomalies and generate insights"""
insights = []