新久久魔域下载2025年本地AI部署完全指南:从Ollama到vLLM的全方位实践教程
复制
import re import matplotlib.pyplot as plt import numpy as np from collections import defaultdict, Counter def analyze_inference_logs(log_file, time_window="1h"): """分析推理日志以监控性能和错误""" # 初始化统计数据 request_times = [] error_count = 0 model_usage = Counter() status_codes = Counter() # 正则表达式模式 time_pattern = r'\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})\]' model_pattern = r'model=(\w+)' time_taken_pattern = r'time_taken=(\d+\.\d+)s' status_pattern = r'status=(\d{3})' error_pattern = r'ERROR|error|Error' try: with open(log_file, 'r') as f: for line in f: # 提取时间戳 time_match = re.search(time_pattern, line) if time_match: # 可以在这里添加时间窗口过滤逻辑 pass # 统计错误 if re.search(error_pattern, line): error_count += 1 # 提取模型使用情况 model_match = re.search(model_pattern, line) if model_match: model_usage[model_match.group(1)] += 1 # 提取状态码 status_match = re.search(status_pattern, line) if status_match: status_codes[status_match.group(1)] += 1 # 提取请求时间 time_taken_match = re.search(time_taken_pattern, line) if time_taken_match: request_times.append(float(time_taken_match.group(1))) except Exception as e: print(f"分析日志时出错: {str(e)}") return None # 计算性能指标 if request_times: avg_time = np.mean(request_times) min_time = np.min(request_times) max_time = np.max(request_times) p95_time = np.percentile(request_times, 95) p99_time = np.percentile(request_times, 99) else: avg_time = min_time = max_time = p95_time = p99_time = 0 # 生成报告 report = { "total_requests": len(request_times), "error_count": error_count, "error_rate": error_count / len(request_times) if request_times else 0, "avg_response_time": avg_time, "min_response_time": min_time, "max_response_time": max_time, "p95_response_time": p95_time, "p99_response_time": p99_time, "model_usage": dict(model_usage), "status_codes": dict(status_codes) } # 可视化结果 self.visualize_performance(report) return report def visualize_performance(report): """可视化性能报告""" # 创建图表 fig, axs = plt.subplots(2, 2, figsize=(15, 10)) # 响应时间分布 times = [report["min_response_time"], report["avg_response_time"], report["p95_response_time"], report["p99_response_time"], report["max_response_time"]] labels = ["Min", "Avg", "p95", "p99", "Max"] axs[0, 0].bar(labels, times) axs[0, 0].set_title("Response Time Distribution (seconds)") axs[0, 0].set_ylabel("Time (s)") # 模型使用情况 if report["model_usage"]: models = list(report["model_usage"].keys()) counts = list(report["model_usage"].values()) axs[0, 1].pie(counts, labels=models, autopct='%1.1f%%') axs[0, 1].set_title("Model Usage Distribution") # 状态码分布 if report["status_codes"]: codes = list(report["status_codes"].keys()) counts = list(report["status_codes"].values()) axs[1, 0].bar(codes, counts) axs[1, 0].set_title("Status Code Distribution") axs[1, 0].set_xlabel("Status Code") axs[1, 0].set_ylabel("Count") # 错误率 error_rate = report["error_rate"] * 100 success_rate = 100 - error_rate axs[1, 1].pie([success_rate, error_rate], labels=["Success", "Error"], autopct='%1.1f%%', colors=['green', 'red']) axs[1, 1].set_title("Success vs Error Rate") plt.tight_layout() plt.savefig("performance_report.png") print("性能报告图表已保存为 performance_report.png")2025-11-27 16:29 点击量:0