公众号AI文章写作教程:DeepSeek与豆包高级定制功能开发实战

AI写作基础设置与API对接

要在公众号中实现高级AI文章写作功能,首先需要完成AI模型API的基础对接。目前市场上主流的AI写作模型包括DeepSeek、豆包、Gemini、文心一言、通义千问等,它们都提供了REST API接口供开发者调用。

以DeepSeek为例,接入其API需要以下步骤:


import requests

api_key = "your_deepseek_api_key"
url = "https://api.deepseek.com/v1/chat/completions"

headers = {
    "Content-Type": "application/json",
    "Authorization": f"Bearer {api_key}"
}

data = {
    "model": "deepseek-chat",
    "messages": [
        {"role": "system", "content": "你是一个专业的公众号文章作者"},
        {"role": "user", "content": "请写一篇关于AI写作的公众号文章"}
    ],
    "temperature": 0.7,
    "max_tokens": 2000
}

response = requests.post(url, headers=headers, json=data)
result = response.json()
print(result['choices'][0]['message']['content'])

豆包AI的API接入方式类似,但需要注意其特定的参数要求:


import requests

api_key = "your_douban_api_key"
url = "https://api.douban.com/v1/ai/generate"

headers = {
    "Content-Type": "application/json",
    "Authorization": f"Bearer {api_key}"
}

data = {
    "model": "douban-pro",
    "prompt": "请写一篇关于AI写作的公众号文章",
    "temperature": 0.7,
    "max_tokens": 2000,
    "top_p": 0.9
}

response = requests.post(url, headers=headers, json=data)
result = response.json()
print(result['generated_text'])

高级提示词工程与模板系统

实现高质量的公众号AI文章写作,关键在于构建高级提示词工程系统。这不仅仅是简单的提问,而是需要设计结构化、可复用的提示词模板。

以下是一个高级提示词模板系统的实现示例:


class ArticleTemplate:
    def __init__(self, topic, style, length, target_audience, key_points):
        self.topic = topic
        self.style = style
        self.length = length
        self.target_audience = target_audience
        self.key_points = key_points
    
    def generate_prompt(self):
        prompt = f"""
        请以{self.style}的风格,为{self.target_audience}撰写一篇关于{self.topic}的公众号文章。
        文章长度约为{self.length}字。
        
        请确保文章包含以下要点:
        """
        
        for point in self.key_points:
            prompt += f"- {point}n"
        
        prompt += """
        文章结构要求:
        1. 引人入胜的开头
        2. 3-5个主要段落,每个段落围绕一个核心观点
        3. 实用的例子或案例
        4. 总结性结尾
        
        请使用简洁明了的语言,避免过于技术性的术语,确保内容易于理解。
        """
        
        return prompt

 使用示例
tech_article = ArticleTemplate(
    topic="AI写作工具在公众号运营中的应用",
    style="专业但不晦涩",
    length="1500",
    target_audience="公众号运营新手",
    key_points=[
        "AI写作工具的基本原理",
        "如何选择合适的AI写作工具",
        "AI写作与人工编辑的结合方法",
        "提高AI写作质量的技巧"
    ]
)

prompt = tech_article.generate_prompt()
print(prompt)

自定义知识库与上下文管理

为了使AI生成的公众号文章更具针对性和专业性,我们需要构建自定义知识库系统,使AI能够访问特定领域的专业知识。

以下是基于向量数据库的自定义知识库实现方案:


import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

class KnowledgeBase:
    def __init__(self):
        self.documents = []
        self.vectorizer = TfidfVectorizer()
        self.doc_vectors = None
    
    def add_document(self, title, content):
        self.documents.append({"title": title, "content": content})
         重新计算所有文档的向量
        contents = [doc["content"] for doc in self.documents]
        self.doc_vectors = self.vectorizer.fit_transform(contents)
    
    def search(self, query, top_k=3):
        query_vec = self.vectorizer.transform([query])
        similarities = cosine_similarity(query_vec, self.doc_vectors)
        top_indices = np.argsort(similarities[0])[-top_k:][::-1]
        
        results = []
        for idx in top_indices:
            results.append({
                "title": self.documents[idx]["title"],
                "content": self.documents[idx]["content"],
                "similarity": similarities[0][idx]
            })
        
        return results

 使用示例
kb = KnowledgeBase()
kb.add_document("AI写作基础", "AI写作是利用人工智能技术自动生成文本内容的过程...")
kb.add_document("公众号运营技巧", "成功的公众号运营需要关注内容质量、发布频率、读者互动...")

 搜索相关文档
results = kb.search("如何提高AI写作质量")
for result in results:
    print(f"标题: {result['title']}")
    print(f"相似度: {result['similarity']}")
    print(f"内容片段: {result['content'][:100]}...")
    print("---")

将知识库与AI写作结合,可以创建更智能的写作系统:


def generate_article_with_knowledge(topic, knowledge_base, api_key):
     首先从知识库中检索相关信息
    relevant_docs = knowledge_base.search(topic)
    
     构建包含知识库信息的提示词
    context = "nn".join([f"{doc['title']}: {doc['content']}" for doc in relevant_docs])
    
    prompt = f"""
    基于以下参考信息,撰写一篇关于{topic}的公众号文章:
    
    参考信息:
    {context}
    
    请确保文章内容基于上述参考信息,同时加入你自己的见解和分析。
    文章应该结构清晰,包含引言、主体和结论部分。
    """
    
     调用AI API生成文章
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}"
    }
    
    data = {
        "model": "deepseek-chat",
        "messages": [
            {"role": "system", "content": "你是一个专业的公众号文章作者"},
            {"role": "user", "content": prompt}
        ],
        "temperature": 0.7,
        "max_tokens": 3000
    }
    
    response = requests.post("https://api.deepseek.com/v1/chat/completions", headers=headers, json=data)
    result = response.json()
    
    return result['choices'][0]['message']['content']

多模型融合与质量优化系统

为了提高公众号AI文章写作的质量,可以采用多模型融合策略,结合不同AI模型的优势。以下是一个多模型融合系统的实现:


class MultiModelWriter:
    def __init__(self, model_configs):
        self.models = model_configs
    
    def generate_with_model(self, model_name, prompt):
        config = self.models[model_name]
        
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {config['api_key']}"
        }
        
        data = {
            "model": config["model_name"],
            "messages": [
                {"role": "system", "content": config["system_prompt"]},
                {"role": "user", "content": prompt}
            ],
            "temperature": config.get("temperature", 0.7),
            "max_tokens": config.get("max_tokens", 2000)
        }
        
        response = requests.post(config["api_url"], headers=headers, json=data)
        result = response.json()
        
         根据不同模型的响应格式提取内容
        if model_name == "deepseek":
            return result['choices'][0]['message']['content']
        elif model_name == "douban":
            return result['generated_text']
        elif model_name == "gemini":
            return result['candidates'][0]['content']
    
    def generate_article(self, topic, style, length):
         为不同模型定制提示词
        deepseek_prompt = f"请以{style}的风格,撰写一篇关于{topic}的公众号文章,长度约{length}字。"
        douban_prompt = f"创作一篇{length}字左右的公众号文章,主题是{topic},风格要求{style}。"
        gemini_prompt = f"Write a WeChat official account article about {topic} in {style} style, approximately {length} characters."
        
         并行生成多个版本
        articles = {
            "deepseek": self.generate_with_model("deepseek", deepseek_prompt),
            "douban": self.generate_with_model("douban", douban_prompt),
            "gemini": self.generate_with_model("gemini", gemini_prompt)
        }
        
        return articles

 使用示例
model_configs = {
    "deepseek": {
        "api_key": "your_deepseek_api_key",
        "api_url": "https://api.deepseek.com/v1/chat/completions",
        "model_name": "deepseek-chat",
        "system_prompt": "你是一个专业的公众号文章作者",
        "temperature": 0.7,
        "max_tokens": 2000
    },
    "douban": {
        "api_key": "your_douban_api_key",
        "api_url": "https://api.douban.com/v1/ai/generate",
        "model_name": "douban-pro",
        "system_prompt": "你是一个经验丰富的公众号内容创作者",
        "temperature": 0.8,
        "max_tokens": 2000
    },
    "gemini": {
        "api_key": "your_gemini_api_key",
        "api_url": "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
        "model_name": "gemini-pro",
        "system_prompt": "You are a professional WeChat official account article writer",
        "temperature": 0.7,
        "max_tokens": 2000
    }
}

writer = MultiModelWriter(model_configs)
articles = writer.generate_article("AI写作工具的发展趋势", "专业而不晦涩", "1500")

for model, article in articles.items():
    print(f"=== {model} 生成的文章 ===")
    print(article[:200] + "...")
    print("n")

自动化内容优化与SEO增强

为了提高AI生成的公众号文章的搜索引擎优化效果,我们可以实现一个自动化内容优化系统:


import re
from collections import Counter

class ContentOptimizer:
    def __init__(self, target_keywords):
        self.target_keywords = target_keywords
    
    def analyze_keyword_density(self, content):
         清理文本,移除标点符号
        clean_content = re.sub(r'[^ws]', '', content.lower())
        words = clean_content.split()
        total_words = len(words)
        
         计算关键词密度
        keyword_counts = Counter(words)
        keyword_density = {}
        
        for keyword in self.target_keywords:
            keyword_lower = keyword.lower()
            if keyword_lower in keyword_counts:
                density = (keyword_counts[keyword_lower] / total_words)  100
                keyword_density[keyword] = density
        
        return keyword_density
    
    def optimize_headings(self, content):
         确保H1包含主要关键词
        lines = content.split('n')
        optimized_lines = []
        
        for line in lines:
             检查是否是标题行(以开头)
            if line.startswith(' '):
                 H1标题
                if not any(keyword in line for keyword in self.target_keywords[:1]):
                     如果H1不包含主要关键词,添加它
                    line = f" {self.target_keywords[0]}:{line[2:]}"
            elif line.startswith(' '):
                 H2标题
                if not any(keyword in line for keyword in self.target_keywords[:2]):
                     如果H2不包含前两个关键词,尝试添加一个
                    for keyword in self.target_keywords[:2]:
                        if keyword not in line:
                            line = f" {keyword}与{line[3:]}"
                            break
            
            optimized_lines.append(line)
        
        return 'n'.join(optimized_lines)
    
    def add_internal_links(self, content, existing_articles):
         在内容中添加内部链接
        for keyword in self.target_keywords:
             查找关键词出现的位置
            keyword_pattern = re.compile(f'\b{re.escape(keyword)}\b', re.IGNORECASE)
            
             替换关键词为链接
            def replace_with_link(match):
                 查找相关的现有文章
                related_articles = [article for article in existing_articles if keyword.lower() in article['title'].lower()]
                if related_articles:
                     使用第一个相关文章
                    article = related_articles[0]
                    return f"[{match.group()}]({article['url']})"
                return match.group()
            
            content = keyword_pattern.sub(replace_with_link, content)
        
        return content
    
    def optimize_content(self, content, existing_articles=None):
        if existing_articles is None:
            existing_articles = []
        
         分析关键词密度
        keyword_density = self.analyze_keyword_density(content)
        print("关键词密度分析:")
        for keyword, density in keyword_density.items():
            print(f"{keyword}: {density:.2f}%")
        
         优化标题
        optimized_content = self.optimize_headings(content)
        
         添加内部链接
        optimized_content = self.add_internal_links(optimized_content, existing_articles)
        
        return optimized_content

 使用示例
target_keywords = ["AI写作", "公众号运营", "内容创作", "DeepSeek"]
optimizer = ContentOptimizer(target_keywords)

 示例文章内容
sample_content = """
 如何提高内容创作效率

在当今数字化时代,内容创作变得越来越重要。许多企业和个人都在寻找提高创作效率的方法。

 工具选择

选择合适的工具是提高效率的第一步。市面上有许多工具可以帮助创作者更快地完成工作。

 实践技巧

除了工具外,掌握一些实践技巧也很重要。例如,建立内容日历、批量创作等。
"""

 模拟现有文章
existing_articles = [
    {"title": "AI写作工具全面评测", "url": "https://example.com/ai-writing-tools-review"},
    {"title": "公众号运营最佳实践", "url": "https://example.com/wechat-account-best-practices"}
]

 优化内容
optimized_content = optimizer.optimize_content(sample_content, existing_articles)
print("n优化后的内容:")
print(optimized_content)

定制化工作流与自动化发布

为了实现公众号AI文章写作的完整自动化,我们需要构建一个定制化工作流系统,从内容生成到最终发布:


import schedule
import time
from datetime import datetime

class WeChatPublisher:
    def __init__(self, app_id, app_secret):
        self.app_id = app_id
        self.app_secret = app_secret
        self.access_token = None
        self.token_expires = 0
    
    def get_access_token(self):
         检查当前token是否有效
        if self.access_token and time.time() = article["publish_time"]:
                try:
                     生成文章
                    articles = self.ai_writer.generate_article(
                        article["topic"], 
                        article["style"], 
                        article["length"]
                    )
                    
                     选择最佳版本(这里简单选择第一个模型的结果)
                    content = articles["deepseek"]
                    
                     优化内容
                    optimized_content = self.content_optimizer.optimize_content(content)
                    
                     发布文章
                    media_id = self.wechat_publisher.publish_article(
                        title=article["topic"],
                        content=optimized_content,
                        thumb_media_path=article["thumb_media_path"]
                    )
                    
                    article["status"] = "published"
                    article["media_id"] = media_id
                    article["published_at"] = current_time
                    
                    print(f"文章已成功发布: {article['topic']}")
                except Exception as e:
                    article["status"] = "failed"
                    article["error"] = str(e)
                    print(f"发布文章失败: {article['topic']}, 错误: {e}")

 使用示例
 初始化各个组件
model_configs = {
     ... 同上 ...
}
ai_writer = MultiModelWriter(model_configs)

target_keywords = ["AI写作", "公众号运营"]
content_optimizer = ContentOptimizer(target_keywords)

wechat_publisher = WeChatPublisher(
    app_id="your_app_id",
    app_secret="your_app_secret"
)

 创建工作流
workflow = ArticleWorkflow(ai_writer, content_optimizer, wechat_publisher)

 安排文章发布
workflow.schedule_article(
    topic="AI写作工具在公众号运营中的应用",
    style="专业而不晦涩",
    length="1500",
    publish_time=datetime(2025, 8, 31, 10, 0),   2025年8月31日上午10点
    thumb_media_path="path/to/thumb/image.jpg"
)

 定时处理计划中的文章
def job():
    workflow.process_scheduled_articles()

 每小时检查一次
schedule.every().hour.do(job)

while True:
    schedule.run_pending()
    time.sleep(60)

效果评估与持续优化系统

为了确保AI生成的公众号文章质量不断提升,我们需要建立一个效果评估与持续优化系统:


import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler

class ArticlePerformanceTracker:
    def __init__(self):
        self.articles_data = []
    
    def add_article_data(self, article_id, title, publish_date, read_count, like_count, comment_count, share_count, ai_model_used, prompt_complexity):
        article_data = {
            "article_id": article_id,
            "title": title,
            "publish_date": publish_date,
            "read_count": read_count,
            "like_count": like_count,
            "comment_count": comment_count,
            "share_count": share_count,
            "ai_model_used": ai_model_used,
            "prompt_complexity": prompt_complexity,
            "engagement_rate": (like_count + comment_count + share_count) / read_count if read_count > 0 else 0
        }
        
        self.articles_data.append(article_data)
    
    def get_performance_dataframe(self):
        return pd.DataFrame(self.articles_data)
    
    def analyze_model_performance(self):
        df = self.get_performance_dataframe()
        
         按模型分组计算平均表现
        model_performance = df.groupby('ai_model_used').agg({
            'read_count': 'mean',
            'like_count': 'mean',
            'comment_count': 'mean',
            'share_count': 'mean',
            'engagement_rate': 'mean'
        })
        
        return model_performance
    
    def analyze_prompt_complexity_impact(self):
        df = self.get_performance_dataframe()
        
         计算提示词复杂度与参与度的相关性
        correlation = df[['prompt_complexity', 'engagement_rate']].corr()
        
         可视化关系
        plt.figure(figsize=(10, 6))
        plt.scatter(df['prompt_complexity'], df['engagement_rate'])
        plt.title('提示词复杂度与参与度的关系')
        plt.xlabel('提示词复杂度')
        plt.ylabel('参与度')
        plt.grid(True)
        
         添加趋势线
        X = df[['prompt_complexity']]
        y = df['engagement_rate']
        
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(X)
        
        model = LinearRegression()
        model.fit(X_scaled, y)
        
        x_range = np.linspace(X['prompt_complexity'].min(), X['prompt_complexity'].max(), 100)
        x_range_scaled = scaler.transform(x_range.reshape(-1, 1))
        y_range = model.predict(x_range_scaled)
        
        plt.plot(x_range, y_range, color='red', linewidth=2)
        
        plt.savefig('prompt_complexity_impact.png')
        plt.close()
        
        return correlation, 'prompt_complexity_impact.png'
    
    def generate_performance_report(self):
        model_performance = self.analyze_model_performance()
        correlation, chart_path = self.analyze_prompt_complexity_impact()
        
        report = f"""
        AI文章表现分析报告
        ==================
        
        模型表现对比:
        {model_performance.to_string()}
        
        提示词复杂度与参与度相关性:
        {correlation.to_string()}
        
        分析图表已保存至: {chart_path}
        """
        
        return report

class AIModelOptimizer:
    def __init__(self, performance_tracker):
        self.performance_tracker = performance_tracker
        self.model_configs = {}
        self.optimization_history = []
    
    def add_model_config(self, model_name, config):
        self.model_configs[model_name] = config
    
    def optimize_model_parameters(self, model_name):
         获取该模型的历史表现数据
        df = self.performance_tracker.get_performance_dataframe()
        model_data = df[df['ai_model_used'] == model_name]
        
        if len(model_data)  model_data['engagement_rate'].quantile(0.7)]
        
        if len(high_engagement_articles) > 0:
             如果高参与度文章的平均提示词复杂度较高,则增加模型的创造性
            if high_engagement_articles['prompt_complexity'].mean() > model_data['prompt_complexity'].mean():
                best_config['temperature'] = min(best_config.get('temperature', 0.7) + 0.1, 1.0)
            else:
                best_config['temperature'] = max(best_config.get('temperature', 0.7) - 0.1, 0.0)
        
         记录优化历史
        optimization_record = {
            "model_name": model_name,
            "timestamp": datetime.now(),
            "old_config": self.model_configs[model_name].copy(),
            "new_config": best_config.copy(),
            "reason": "基于参与度分析调整温度参数"
        }
        
        self.optimization_history.append(optimization_record)
        
         更新模型配置
        self.model_configs[model_name] = best_config
        
        return f"{model_name} 模型参数已优化: 温度调整为 {best_config['temperature']}"

 使用示例
 初始化性能追踪器
tracker = ArticlePerformanceTracker()

 添加一些示例数据
tracker.add_article_data(
    article_id="001",
    title="AI写作工具的发展趋势",
    publish_date="2025-08-20",
    read_count=1500,
    like_count=120,
    comment_count=30,
    share_count=25,
    ai_model_used="deepseek",
    prompt_complexity=0.8
)

tracker.add_article_data(
    article_id="002",
    title="如何提高公众号内容质量",
    publish_date="2025-08-22",
    read_count=2000,
    like_count=180,
    comment_count=45,
    share_count=40,
    ai_model_used="douban",
    prompt_complexity=0.6
)

 初始化优化器
optimizer = AIModelOptimizer(tracker)

 添加模型配置
optimizer.add_model_config("deepseek", {
    "temperature": 0.7,
    "max_tokens": 2000,
    "top_p": 0.9
})

optimizer.add_model_config("douban", {
    "temperature": 0.8,
    "max_tokens": 2000,
    "top_p": 0.85
})

 生成性能报告
report = tracker.generate_performance_report()
print(report)

 优化模型参数
deepseek_optimization = optimizer.optimize_model_parameters("deepseek")
print(deepseek_optimization)

douban_optimization = optimizer.optimize_model_parameters("douban")
print(douban_optimization)

通过以上高级定制化开发,我们可以构建一个完整的公众号AI文章写作系统,从内容生成、优化到发布和效果评估的全流程自动化。这个系统不仅能够提高内容创作效率,还能通过持续优化不断提升文章质量和用户参与度。