文件
quantKonwledge/20_Go迭代系统/社交媒体实时情绪分析Go实现.md
Manus Quant Agent 1a0288a256 feat: 大幅扩展数据源至325个验证通过端点 + Go集成方案 + 社交媒体情绪分析实现
新增文件:
- 数据源与交易品种完整手册_325个.md (100+加密品种 + 50+传统金融品种)
- Go数据源集成方案.md (三级速率限制器 + 指数退避 + 自动降级)
- 社交媒体实时情绪分析Go实现.md (VADER词典 + LLM增强)
- scripts/verify_batch6_crypto_varieties.py (加密品种验证)
- scripts/verify_batch7_fix.py (修复+新增验证)

验证统计: 325个端点通过, 19个平台, 100%免费
2026-03-06 02:17:59 -05:00

28 KiB

社交媒体实时情绪分析 - Go 语言完整实现方案

版本v1.0
更新时间2026-03-06
覆盖平台Reddit8 子版块、X/TwitterNitter RSS、恐惧贪婪指数
分析方法VADER 情绪词典 + 关键词权重 + 时间衰减 + LLM 增强(可选)
全部免费100%
作者Manus AI


目录


一、架构总览

┌──────────────────────────────────────────────────────────────┐
│                    社交媒体情绪分析系统                         │
│                                                              │
│  ┌──────────┐  ┌──────────┐  ┌──────────────┐               │
│  │ Reddit   │  │ Nitter/X │  │ Alternative  │               │
│  │ 8 子版块  │  │ RSS 抓取  │  │ 恐惧贪婪指数  │               │
│  └────┬─────┘  └────┬─────┘  └──────┬───────┘               │
│       └──────────────┴───────────────┘                        │
│                      │                                        │
│         ┌────────────▼────────────┐                           │
│         │  情绪分析引擎 (Engine)    │                           │
│         │  ├─ VADER 词典分析       │                           │
│         │  ├─ 加密专用关键词权重    │                           │
│         │  ├─ 时间衰减函数         │                           │
│         │  └─ LLM 增强(可选)     │                           │
│         └────────────┬────────────┘                           │
│                      │                                        │
│         ┌────────────▼────────────┐                           │
│         │  综合情绪指数 (0-100)     │                           │
│         │  ├─ Reddit 情绪 (40%)    │                           │
│         │  ├─ X/Twitter 情绪 (30%) │                           │
│         │  └─ 恐惧贪婪 (30%)       │                           │
│         └────────────┬────────────┘                           │
│                      │                                        │
│         ┌────────────▼────────────┐                           │
│         │  HTTP API + WebSocket    │                           │
│         │  /api/sentiment          │                           │
│         │  /api/sentiment/history  │                           │
│         │  /ws/sentiment           │                           │
│         └─────────────────────────┘                           │
└──────────────────────────────────────────────────────────────┘

二、数据采集层

2.1 Reddit 采集器

package collectors

import (
    "context"
    "encoding/json"
    "fmt"
    "io"
    "net/http"
    "time"
)

// RedditPost Reddit 帖子
type RedditPost struct {
    Title       string    `json:"title"`
    Selftext    string    `json:"selftext"`
    Score       int       `json:"score"`
    NumComments int       `json:"num_comments"`
    Upvotes     int       `json:"ups"`
    Subreddit   string    `json:"subreddit"`
    Author      string    `json:"author"`
    CreatedUTC  float64   `json:"created_utc"`
    URL         string    `json:"url"`
    Flair       string    `json:"link_flair_text"`
}

// RedditCollector Reddit 数据采集器
type RedditCollector struct {
    client     *http.Client
    subreddits []string
}

func NewRedditCollector() *RedditCollector {
    return &RedditCollector{
        client: &http.Client{Timeout: 15 * time.Second},
        subreddits: []string{
            "Bitcoin",             // BTC 核心社区
            "CryptoCurrency",     // 加密综合
            "ethtrader",          // ETH 交易
            "SatoshiStreetBets",  // 加密投机
            "defi",               // DeFi 讨论
            "solana",             // SOL 生态
            "wallstreetbets",     // 传统金融投机
            "algotrading",        // 算法交易
        },
    }
}

// FetchSubreddit 获取子版块热帖
func (c *RedditCollector) FetchSubreddit(ctx context.Context, subreddit string, limit int) ([]RedditPost, error) {
    url := fmt.Sprintf("https://www.reddit.com/r/%s/hot.json?limit=%d", subreddit, limit)
    req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
    req.Header.Set("User-Agent", "QuantKnowledge:v2.0 (by /u/quantbot)")

    resp, err := c.client.Do(req)
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()

    if resp.StatusCode != 200 {
        return nil, fmt.Errorf("HTTP %d", resp.StatusCode)
    }

    body, _ := io.ReadAll(resp.Body)
    var raw struct {
        Data struct {
            Children []struct {
                Data RedditPost `json:"data"`
            } `json:"children"`
        } `json:"data"`
    }
    if err := json.Unmarshal(body, &raw); err != nil {
        return nil, err
    }

    posts := make([]RedditPost, len(raw.Data.Children))
    for i, child := range raw.Data.Children {
        posts[i] = child.Data
    }
    return posts, nil
}

// FetchAll 采集所有子版块
func (c *RedditCollector) FetchAll(ctx context.Context) (map[string][]RedditPost, error) {
    results := make(map[string][]RedditPost)
    for _, sub := range c.subreddits {
        posts, err := c.FetchSubreddit(ctx, sub, 25)
        if err != nil {
            fmt.Printf("⚠️ Reddit r/%s 采集失败: %v\n", sub, err)
            continue
        }
        results[sub] = posts
        // Reddit 限速: 10 req/min
        time.Sleep(7 * time.Second)
    }
    return results, nil
}

2.2 Nitter/X 采集器

// NitterCollector X/Twitter 数据采集器(通过 Nitter RSS
type NitterCollector struct {
    client   *http.Client
    accounts []string
    baseURL  string
}

func NewNitterCollector() *NitterCollector {
    return &NitterCollector{
        client: &http.Client{Timeout: 15 * time.Second},
        accounts: []string{
            "elonmusk",        // 马斯克(市场影响力最大)
            "saborchain",      // 加密 KOL
            "CryptoCapo_",     // 加密分析师
            "inversebrah",     // 加密交易员
            "DefiIgnas",       // DeFi 分析师
            "lookonchain",     // 链上分析
            "whale_alert",     // 巨鲸预警
            "BitcoinMagazine", // BTC 媒体
        },
        baseURL: "https://nitter.net",
    }
}

// Tweet 推文结构
type Tweet struct {
    Author    string    `json:"author"`
    Content   string    `json:"content"`
    Timestamp time.Time `json:"timestamp"`
    Link      string    `json:"link"`
}

// FetchUserTweets 获取用户最新推文(通过 RSS
func (c *NitterCollector) FetchUserTweets(ctx context.Context, username string) ([]Tweet, error) {
    url := fmt.Sprintf("%s/%s/rss", c.baseURL, username)
    req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)
    req.Header.Set("User-Agent", "QuantKnowledge/2.0")

    resp, err := c.client.Do(req)
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()

    if resp.StatusCode != 200 {
        return nil, fmt.Errorf("HTTP %d", resp.StatusCode)
    }

    body, _ := io.ReadAll(resp.Body)
    // 解析 RSS XML
    return parseRSSToTweets(body, username)
}

// parseRSSToTweets 解析 RSS 为推文列表
func parseRSSToTweets(data []byte, author string) ([]Tweet, error) {
    // 使用 encoding/xml 解析 RSS
    type RSSItem struct {
        Title       string `xml:"title"`
        Description string `xml:"description"`
        Link        string `xml:"link"`
        PubDate     string `xml:"pubDate"`
    }
    type RSS struct {
        Channel struct {
            Items []RSSItem `xml:"item"`
        } `xml:"channel"`
    }

    var rss RSS
    if err := xml.Unmarshal(data, &rss); err != nil {
        return nil, err
    }

    tweets := make([]Tweet, 0, len(rss.Channel.Items))
    for _, item := range rss.Channel.Items {
        t, _ := time.Parse(time.RFC1123Z, item.PubDate)
        // 去除 HTML 标签
        content := stripHTML(item.Description)
        tweets = append(tweets, Tweet{
            Author:    author,
            Content:   content,
            Timestamp: t,
            Link:      item.Link,
        })
    }
    return tweets, nil
}

2.3 恐惧贪婪指数采集器

// FearGreedCollector 恐惧贪婪指数采集器
type FearGreedCollector struct {
    client *http.Client
}

func NewFearGreedCollector() *FearGreedCollector {
    return &FearGreedCollector{
        client: &http.Client{Timeout: 10 * time.Second},
    }
}

// FearGreedData 恐惧贪婪数据
type FearGreedData struct {
    Value          int       `json:"value"`
    Classification string    `json:"classification"`
    Timestamp      time.Time `json:"timestamp"`
}

// Fetch 获取当前恐惧贪婪指数
func (c *FearGreedCollector) Fetch(ctx context.Context) (*FearGreedData, error) {
    url := "https://api.alternative.me/fng/?limit=1"
    req, _ := http.NewRequestWithContext(ctx, "GET", url, nil)

    resp, err := c.client.Do(req)
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()

    body, _ := io.ReadAll(resp.Body)
    var raw struct {
        Data []struct {
            Value               string `json:"value"`
            ValueClassification string `json:"value_classification"`
            Timestamp           string `json:"timestamp"`
        } `json:"data"`
    }
    if err := json.Unmarshal(body, &raw); err != nil {
        return nil, err
    }
    if len(raw.Data) == 0 {
        return nil, fmt.Errorf("no data")
    }

    val, _ := strconv.Atoi(raw.Data[0].Value)
    ts, _ := strconv.ParseInt(raw.Data[0].Timestamp, 10, 64)

    return &FearGreedData{
        Value:          val,
        Classification: raw.Data[0].ValueClassification,
        Timestamp:      time.Unix(ts, 0),
    }, nil
}

三、情绪分析引擎

3.1 VADER 情绪词典Go 移植版)

package sentiment

import (
    "math"
    "strings"
)

// ─── 加密货币专用情绪词典 ────────────────────────────────────────

// SentimentLexicon 情绪词典
var CryptoLexicon = map[string]float64{
    // 极度看涨 (+3 ~ +4)
    "moon":         3.5, "mooning":      3.5, "lambo":        3.0,
    "bullish":      3.0, "pump":         2.5, "breakout":     2.5,
    "ath":          3.0, "all-time-high": 3.0, "parabolic":   3.5,
    "moonshot":     3.5, "100x":         3.5, "1000x":        4.0,
    "diamond hands": 2.5, "hodl":        2.0, "buy the dip":  2.5,
    "accumulate":   2.0, "undervalued":  2.0, "gem":          2.5,
    "rocket":       2.5, "send it":      2.5, "wagmi":        2.5,

    // 看涨 (+1 ~ +2)
    "buy":          1.5, "long":         1.5, "support":      1.0,
    "recovery":     1.5, "bounce":       1.5, "reversal":     1.5,
    "adoption":     2.0, "institutional": 1.5, "etf":         2.0,
    "halving":      1.5, "upgrade":      1.5, "partnership":  1.5,
    "bullrun":      2.5, "rally":        2.0, "green":        1.0,
    "profit":       1.5, "gains":        1.5, "winner":       1.5,

    // 看跌 (-1 ~ -2)
    "sell":         -1.5, "short":       -1.5, "resistance":  -1.0,
    "correction":   -1.5, "pullback":    -1.0, "dip":         -1.0,
    "dump":         -2.0, "crash":       -3.0, "bear":        -2.0,
    "bearish":      -2.5, "red":         -1.0, "loss":        -1.5,
    "down":         -1.0, "decline":     -1.5, "drop":        -1.5,
    "overvalued":   -1.5, "bubble":      -2.0, "top":         -1.5,

    // 极度看跌 (-3 ~ -4)
    "rugpull":      -4.0, "rug":         -3.5, "scam":        -3.5,
    "hack":         -3.5, "exploit":     -3.0, "bankrupt":    -4.0,
    "liquidation":  -3.0, "capitulation": -3.5, "panic":      -3.0,
    "ponzi":        -4.0, "fraud":       -4.0, "ngmi":        -2.5,
    "rekt":         -3.0, "paper hands":  -2.0, "dead":       -3.0,
    "worthless":    -3.5, "zero":        -3.0, "collapse":    -3.5,

    // 恐惧/不确定 (-1 ~ -2)
    "fear":         -2.0, "fud":         -2.0, "uncertain":   -1.0,
    "risk":         -1.0, "volatile":    -1.0, "regulation":  -1.5,
    "ban":          -2.5, "sec":         -1.5, "lawsuit":     -2.0,
    "investigation": -2.0, "warning":    -1.5, "caution":     -1.0,
}

// ─── 情绪分析器 ──────────────────────────────────────────────────

// SentimentAnalyzer 情绪分析器
type SentimentAnalyzer struct {
    lexicon map[string]float64
}

func NewSentimentAnalyzer() *SentimentAnalyzer {
    return &SentimentAnalyzer{lexicon: CryptoLexicon}
}

// SentimentResult 分析结果
type SentimentResult struct {
    Score       float64  `json:"score"`       // -1.0 ~ +1.0
    Magnitude   float64  `json:"magnitude"`   // 情绪强度 0 ~ 1.0
    Label       string   `json:"label"`       // bullish/bearish/neutral
    TopKeywords []string `json:"top_keywords"` // 匹配到的关键词
}

// Analyze 分析单条文本情绪
func (a *SentimentAnalyzer) Analyze(text string) SentimentResult {
    words := tokenize(text)
    totalScore := 0.0
    matchCount := 0
    keywords := []string{}

    for _, word := range words {
        if score, ok := a.lexicon[word]; ok {
            totalScore += score
            matchCount++
            keywords = append(keywords, word)
        }
    }

    // 归一化到 -1 ~ +1
    normalized := 0.0
    if matchCount > 0 {
        normalized = totalScore / (math.Sqrt(totalScore*totalScore + 15.0))
    }

    // 情绪强度0~1
    magnitude := math.Abs(normalized)

    // 标签
    label := "neutral"
    if normalized > 0.15 {
        label = "bullish"
    } else if normalized < -0.15 {
        label = "bearish"
    }

    return SentimentResult{
        Score:       normalized,
        Magnitude:   magnitude,
        Label:       label,
        TopKeywords: keywords,
    }
}

// AnalyzeBatch 批量分析
func (a *SentimentAnalyzer) AnalyzeBatch(texts []string) SentimentResult {
    totalScore := 0.0
    totalMagnitude := 0.0
    allKeywords := map[string]int{}

    for _, text := range texts {
        result := a.Analyze(text)
        totalScore += result.Score
        totalMagnitude += result.Magnitude
        for _, kw := range result.TopKeywords {
            allKeywords[kw]++
        }
    }

    n := float64(len(texts))
    avgScore := totalScore / n
    avgMagnitude := totalMagnitude / n

    label := "neutral"
    if avgScore > 0.15 {
        label = "bullish"
    } else if avgScore < -0.15 {
        label = "bearish"
    }

    // 取 TOP5 关键词
    topKW := getTopN(allKeywords, 5)

    return SentimentResult{
        Score:       avgScore,
        Magnitude:   avgMagnitude,
        Label:       label,
        TopKeywords: topKW,
    }
}

// tokenize 分词(简单空格分割 + 小写化)
func tokenize(text string) []string {
    text = strings.ToLower(text)
    // 去除标点
    replacer := strings.NewReplacer(
        ".", "", ",", "", "!", "", "?", "",
        "(", "", ")", "", "[", "", "]", "",
        "#", "", "@", "", "$", "",
    )
    text = replacer.Replace(text)
    return strings.Fields(text)
}

3.2 时间衰减函数

// TimeDecay 时间衰减权重
// 越新的帖子权重越高,半衰期为 6 小时
func TimeDecay(postTime time.Time, halfLife time.Duration) float64 {
    age := time.Since(postTime)
    if age < 0 {
        return 1.0
    }
    // 指数衰减: w = 2^(-age/halfLife)
    return math.Pow(2, -float64(age)/float64(halfLife))
}

// EngagementWeight 互动权重
// 根据帖子的 score 和评论数计算权重
func EngagementWeight(score int, comments int) float64 {
    // log(1 + score) * log(1 + comments)
    s := math.Log1p(math.Max(0, float64(score)))
    c := math.Log1p(float64(comments))
    return s * (1 + c*0.3)
}

四、综合情绪指数计算

package index

import (
    "math"
    "time"
)

// CompositeSentimentIndex 综合情绪指数
type CompositeSentimentIndex struct {
    Value          float64   `json:"value"`          // 0-100
    Label          string    `json:"label"`          // Extreme Fear / Fear / Neutral / Greed / Extreme Greed
    RedditScore    float64   `json:"reddit_score"`   // -1 ~ +1
    TwitterScore   float64   `json:"twitter_score"`  // -1 ~ +1
    FearGreedValue int       `json:"fear_greed"`     // 0-100
    TopBullish     []string  `json:"top_bullish"`    // 看涨关键词
    TopBearish     []string  `json:"top_bearish"`    // 看跌关键词
    PostCount      int       `json:"post_count"`     // 分析帖子数
    Timestamp      time.Time `json:"timestamp"`
}

// CompositeWeights 综合权重配置
type CompositeWeights struct {
    Reddit    float64 // 默认 0.40
    Twitter   float64 // 默认 0.30
    FearGreed float64 // 默认 0.30
}

var DefaultWeights = CompositeWeights{
    Reddit:    0.40,
    Twitter:   0.30,
    FearGreed: 0.30,
}

// Calculate 计算综合情绪指数
func Calculate(
    redditResult SentimentResult,
    twitterResult SentimentResult,
    fearGreed *FearGreedData,
    weights CompositeWeights,
) CompositeSentimentIndex {

    // 将各分数归一化到 0-100
    // Reddit: -1~+1 → 0~100
    redditNorm := (redditResult.Score + 1.0) * 50.0

    // Twitter: -1~+1 → 0~100
    twitterNorm := (twitterResult.Score + 1.0) * 50.0

    // 恐惧贪婪: 已经是 0-100
    fgNorm := float64(fearGreed.Value)

    // 加权平均
    composite := redditNorm*weights.Reddit +
        twitterNorm*weights.Twitter +
        fgNorm*weights.FearGreed

    // 限制在 0-100
    composite = math.Max(0, math.Min(100, composite))

    // 标签
    label := classifyIndex(composite)

    return CompositeSentimentIndex{
        Value:          math.Round(composite*10) / 10,
        Label:          label,
        RedditScore:    redditResult.Score,
        TwitterScore:   twitterResult.Score,
        FearGreedValue: fearGreed.Value,
        TopBullish:     filterBullish(redditResult.TopKeywords, twitterResult.TopKeywords),
        TopBearish:     filterBearish(redditResult.TopKeywords, twitterResult.TopKeywords),
        PostCount:      len(redditResult.TopKeywords) + len(twitterResult.TopKeywords),
        Timestamp:      time.Now(),
    }
}

func classifyIndex(value float64) string {
    switch {
    case value <= 20:
        return "Extreme Fear"
    case value <= 40:
        return "Fear"
    case value <= 60:
        return "Neutral"
    case value <= 80:
        return "Greed"
    default:
        return "Extreme Greed"
    }
}

五、LLM 增强分析(可选)

package llm

import (
    "context"
    "encoding/json"
    "os"

    "github.com/openai/openai-go"
)

// LLMSentimentAnalyzer LLM 增强情绪分析
type LLMSentimentAnalyzer struct {
    client *openai.Client
    model  string
}

func NewLLMAnalyzer() *LLMSentimentAnalyzer {
    client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
    return &LLMSentimentAnalyzer{
        client: client,
        model:  "gpt-4.1-nano", // 使用最便宜的模型
    }
}

// LLMSentimentResult LLM 分析结果
type LLMSentimentResult struct {
    Score       float64  `json:"score"`        // -1.0 ~ +1.0
    Confidence  float64  `json:"confidence"`   // 0 ~ 1.0
    Summary     string   `json:"summary"`      // 一句话总结
    KeyTopics   []string `json:"key_topics"`   // 关键话题
    RiskSignals []string `json:"risk_signals"` // 风险信号
}

// AnalyzeBatch 使用 LLM 批量分析社交媒体帖子
func (a *LLMSentimentAnalyzer) AnalyzeBatch(ctx context.Context, posts []string) (*LLMSentimentResult, error) {
    // 拼接帖子(最多 20 条,控制 token 消耗)
    combined := ""
    for i, post := range posts {
        if i >= 20 {
            break
        }
        combined += fmt.Sprintf("[%d] %s\n", i+1, post)
    }

    prompt := fmt.Sprintf(`分析以下加密货币社交媒体帖子的市场情绪。

帖子内容:
%s

请以 JSON 格式返回:
{
  "score": 数值(-1.0到+1.0, -1极度看跌, +1极度看涨),
  "confidence": 数值(0到1.0, 分析置信度),
  "summary": "一句话市场情绪总结",
  "key_topics": ["话题1", "话题2"],
  "risk_signals": ["风险信号1", "风险信号2"]
}`, combined)

    resp, err := a.client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
        Model: a.model,
        Messages: []openai.ChatCompletionMessageParamUnion{
            openai.SystemMessage("你是一个专业的加密货币市场情绪分析师。"),
            openai.UserMessage(prompt),
        },
        Temperature: openai.Float(0.3),
        MaxTokens:   openai.Int(500),
    })
    if err != nil {
        return nil, err
    }

    var result LLMSentimentResult
    if err := json.Unmarshal([]byte(resp.Choices[0].Message.Content), &result); err != nil {
        return nil, err
    }
    return &result, nil
}

知识点LLM 增强分析是可选模块。使用 gpt-4.1-nano 模型,每次分析 20 条帖子约消耗 1000 tokens约 $0.001),每小时分析一次,月成本约 $0.72。如果不设置 OPENAI_API_KEY,系统自动退回到纯 VADER 词典分析。


六、API 接口设计

package api

import (
    "encoding/json"
    "net/http"
)

// SentimentHandler 情绪分析 API 处理器
type SentimentHandler struct {
    engine *SentimentEngine
}

// RegisterRoutes 注册路由
func (h *SentimentHandler) RegisterRoutes(mux *http.ServeMux) {
    mux.HandleFunc("GET /api/sentiment", h.GetCurrentSentiment)
    mux.HandleFunc("GET /api/sentiment/history", h.GetSentimentHistory)
    mux.HandleFunc("GET /api/sentiment/reddit", h.GetRedditSentiment)
    mux.HandleFunc("GET /api/sentiment/twitter", h.GetTwitterSentiment)
    mux.HandleFunc("GET /api/sentiment/fear-greed", h.GetFearGreed)
    mux.HandleFunc("GET /api/sentiment/keywords", h.GetTopKeywords)
}

// GetCurrentSentiment 获取当前综合情绪
// GET /api/sentiment
// Response:
// {
//   "value": 62.5,
//   "label": "Greed",
//   "reddit_score": 0.35,
//   "twitter_score": 0.22,
//   "fear_greed": 65,
//   "top_bullish": ["bullish", "etf", "adoption"],
//   "top_bearish": ["correction", "risk"],
//   "post_count": 200,
//   "timestamp": "2026-03-06T12:00:00Z"
// }
func (h *SentimentHandler) GetCurrentSentiment(w http.ResponseWriter, r *http.Request) {
    result := h.engine.GetLatest()
    w.Header().Set("Content-Type", "application/json")
    json.NewEncoder(w).Encode(result)
}

// GetSentimentHistory 获取情绪历史
// GET /api/sentiment/history?hours=24
func (h *SentimentHandler) GetSentimentHistory(w http.ResponseWriter, r *http.Request) {
    hours := r.URL.Query().Get("hours")
    if hours == "" {
        hours = "24"
    }
    history := h.engine.GetHistory(hours)
    w.Header().Set("Content-Type", "application/json")
    json.NewEncoder(w).Encode(history)
}

七、完整 main.go 入口

package main

import (
    "context"
    "fmt"
    "log"
    "net/http"
    "os"
    "os/signal"
    "syscall"
    "time"
)

func main() {
    fmt.Println("🧠 社交媒体情绪分析系统启动中...")

    // 初始化组件
    redditCollector := NewRedditCollector()
    nitterCollector := NewNitterCollector()
    fearGreedCollector := NewFearGreedCollector()
    analyzer := NewSentimentAnalyzer()
    engine := NewSentimentEngine(redditCollector, nitterCollector, fearGreedCollector, analyzer)

    // 可选LLM 增强
    if os.Getenv("OPENAI_API_KEY") != "" {
        llmAnalyzer := NewLLMAnalyzer()
        engine.EnableLLM(llmAnalyzer)
        fmt.Println("🤖 LLM 增强分析已启用")
    }

    // 启动定时采集(每小时一次)
    ctx, cancel := context.WithCancel(context.Background())
    defer cancel()

    go engine.StartScheduler(ctx, 1*time.Hour)

    // 立即执行一次
    go engine.RunOnce(ctx)

    // 启动 HTTP 服务
    mux := http.NewServeMux()
    handler := &SentimentHandler{engine: engine}
    handler.RegisterRoutes(mux)

    // 健康检查
    mux.HandleFunc("GET /health", func(w http.ResponseWriter, r *http.Request) {
        w.Write([]byte(`{"status":"ok","service":"sentiment-analyzer","version":"1.0.0"}`))
    })

    server := &http.Server{
        Addr:    ":8081",
        Handler: mux,
    }

    // 优雅关闭
    go func() {
        sigCh := make(chan os.Signal, 1)
        signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
        <-sigCh
        fmt.Println("\n🛑 正在关闭...")
        cancel()
        server.Shutdown(context.Background())
    }()

    fmt.Println("🌐 情绪分析 API 运行在 http://localhost:8081")
    fmt.Println("📊 端点: /api/sentiment, /api/sentiment/history")
    if err := server.ListenAndServe(); err != http.ErrServerClosed {
        log.Fatal(err)
    }
}

八、部署与运行

8.1 独立运行

cd quantknowledge-go/cmd/sentiment
go build -o sentiment-analyzer .
./sentiment-analyzer

8.2 集成到主服务

在主服务的 main.go 中导入情绪分析模块:

import "quantknowledge/internal/sentiment"

// 在 RegisterRoutes 中添加
sentimentHandler := sentiment.NewHandler()
sentimentHandler.RegisterRoutes(mux)

8.3 Air 热重载开发

# .air.toml 中 include_ext 已包含 .go
air
# 修改任何 .go 文件自动重编译重启

8.4 API 测试

# 获取当前综合情绪
curl http://localhost:8081/api/sentiment

# 获取 24 小时情绪历史
curl http://localhost:8081/api/sentiment/history?hours=24

# 获取 Reddit 情绪详情
curl http://localhost:8081/api/sentiment/reddit

# 获取恐惧贪婪指数
curl http://localhost:8081/api/sentiment/fear-greed

# 获取热门关键词
curl http://localhost:8081/api/sentiment/keywords

8.5 预期输出示例

{
  "value": 62.5,
  "label": "Greed",
  "reddit_score": 0.35,
  "twitter_score": 0.22,
  "fear_greed": 65,
  "top_bullish": ["bullish", "etf", "adoption", "halving", "buy"],
  "top_bearish": ["correction", "risk", "volatile"],
  "post_count": 200,
  "timestamp": "2026-03-06T12:00:00Z"
}

参考资料