Forráskód Böngészése

[ci]: Introduce a regular inspection mechanism for abnormal CI reports (#10852)

Yuqiang Wang 2 hónapja
szülő
commit
ba509f92c2

+ 6 - 3
.github/workflows/bsp_buildings.yml

@@ -13,9 +13,6 @@ name: RT-Thread BSP Static Build Check
 # Controls when the action will run. Triggers the workflow on push or pull request
 # events but only for the RT-Thread organization master branch
 on:
-  # Runs at 16:00 UTC (BeiJing 00:00) every day
-  schedule:
-    - cron:  '0 16 * * *'
   push:
     branches:
       - master
@@ -46,6 +43,12 @@ on:
     types:
       - online-pkgs-static-building-trigger-event
   workflow_dispatch:
+    inputs:
+      trigger_type:
+        description: '触发类型'
+        required: false
+        default: 'manual'
+        type: string
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

+ 200 - 0
.github/workflows/scheduled-ci-trigger.yml

@@ -0,0 +1,200 @@
+name: Weekly CI Scheduler
+
+on:
+  # Runs at 08:00 Beijing time every day
+  schedule:
+    - cron: '0 0 * * *'
+  workflow_dispatch:
+    inputs:
+      debug:
+        description: 'Debug mode'
+        required: false
+        default: 'false'
+
+env:
+  TARGET_WORKFLOWS: '["RT-Thread BSP Static Build Check", "utest_auto_run"]'
+  DISCUSSION_CATEGORY: "Github Action Exception Reports"
+
+jobs:
+  trigger-and-monitor:
+    name: Trigger and Monitor CIs
+    runs-on: ubuntu-latest
+    outputs:
+      failed_workflows: ${{ steps.collect-results.outputs.failed_workflows }}
+      total_workflows: ${{ steps.collect-results.outputs.total_workflows }}
+      has_results: ${{ steps.collect-results.outputs.has_results }}
+    
+    steps:
+    - name: Checkout repository
+      uses: actions/checkout@v4
+
+    - name: Install Python dependencies
+      run: |
+        python -m pip install --upgrade pip
+        pip install requests
+
+    - name: Record start time
+      id: start-time
+      run: |
+        echo "start_time=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
+        echo "Start time: $(date -u +'%Y-%m-%dT%H:%M:%SZ')"
+
+    - name: Trigger CI workflows directly
+      id: trigger-ci
+      run: |
+        python tools/ci/scheduled-ci-trigger/trigger_workflows_direct.py
+      env:
+        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        TARGET_WORKFLOWS: ${{ env.TARGET_WORKFLOWS }}
+
+    - name: Wait for workflows to appear
+      id: wait-for-workflows
+      run: |
+        echo "Waiting for workflows to appear in API..."
+        python tools/ci/scheduled-ci-trigger/wait_for_workflows.py "${{ steps.start-time.outputs.start_time }}"
+      env:
+        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        TARGET_WORKFLOWS: ${{ env.TARGET_WORKFLOWS }}
+
+    - name: Monitor CI workflows
+      id: monitor-ci
+      run: |
+        python tools/ci/scheduled-ci-trigger/monitor_workflows.py "${{ steps.start-time.outputs.start_time }}"
+      env:
+        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        TARGET_WORKFLOWS: ${{ env.TARGET_WORKFLOWS }}
+
+    - name: Collect monitoring results
+      id: collect-results
+      run: |
+        echo "Checking for monitoring results..."
+        if [ -f "monitoring_results.json" ]; then
+          echo "monitoring_results.json found"
+          FAILED_COUNT=$(python -c "import json; data=json.load(open('monitoring_results.json')); print(len([w for w in data if w.get('conclusion') == 'failure']))")
+          TOTAL_COUNT=$(python -c "import json; data=json.load(open('monitoring_results.json')); print(len(data))")
+          echo "failed_workflows=$FAILED_COUNT" >> $GITHUB_OUTPUT
+          echo "total_workflows=$TOTAL_COUNT" >> $GITHUB_OUTPUT
+          echo "has_results=true" >> $GITHUB_OUTPUT
+          echo "Results: $FAILED_COUNT failed out of $TOTAL_COUNT total"
+        else
+          echo "monitoring_results.json not found"
+          echo "failed_workflows=0" >> $GITHUB_OUTPUT
+          echo "total_workflows=0" >> $GITHUB_OUTPUT
+          echo "has_results=false" >> $GITHUB_OUTPUT
+        fi
+
+    - name: Generate detailed report
+      if: steps.collect-results.outputs.has_results == 'true' && steps.collect-results.outputs.failed_workflows != '0'
+      id: generate-report
+      run: |
+        echo "Generating detailed report..."
+        python tools/ci/scheduled-ci-trigger/generate_report.py
+        echo "Report generation completed"
+
+    - name: Upload report artifact
+      if: steps.collect-results.outputs.has_results == 'true' && steps.collect-results.outputs.failed_workflows != '0'
+      uses: actions/upload-artifact@v4
+      with:
+        name: ci-failure-report
+        path: |
+          monitoring_results.json
+          failure_details.md
+        retention-days: 7
+
+  create-discussion:
+    name: Create Discussion Report
+    needs: trigger-and-monitor
+    if: needs.trigger-and-monitor.outputs.has_results == 'true' && needs.trigger-and-monitor.outputs.failed_workflows != '0'
+    runs-on: ubuntu-latest
+    
+    steps:
+    - name: Checkout repository
+      uses: actions/checkout@v4
+
+    - name: Download report artifact
+      uses: actions/download-artifact@v4
+      with:
+        name: ci-failure-report
+
+    - name: Create Discussion
+      uses: actions/github-script@v6
+      env:
+        DISCUSSION_CATEGORY: ${{ env.DISCUSSION_CATEGORY }}
+      with:
+        script: |
+          const fs = require('fs');
+          
+          const reportPath = './failure_details.md';
+          
+          let reportContent = fs.readFileSync(reportPath, 'utf8');
+          
+          // 提取日期从第一行: # YYYYMMDD_ci_integration-failed-report
+          const lines = reportContent.split('\n');
+          const firstLine = lines[0].trim();
+          const dateMatch = firstLine.match(/# (\d{8})_ci_integration-failed-report/);
+          
+          if (!dateMatch) {
+            console.error('Failed to extract date from first line:', firstLine);
+            process.exit(1);
+          }
+          
+          const dateString = dateMatch[1];
+          const discussionTitle = `${dateString}_ci_integration-failed-report`;
+          
+          // === 关键修复:移除第一行(用于提取的隐藏行) ===
+          reportContent = lines.slice(1).join('\n').trim();
+          
+          // 获取仓库ID和分类ID
+          const getRepoQuery = `
+            query($owner: String!, $repo: String!) {
+              repository(owner: $owner, name: $repo) {
+                id
+                discussionCategories(first: 20) {
+                  nodes {
+                    id
+                    name
+                  }
+                }
+              }
+            }
+          `;
+          
+          const repoData = await github.graphql(getRepoQuery, {
+            owner: context.repo.owner,
+            repo: context.repo.repo
+          });
+          
+          const repositoryId = repoData.repository.id;
+          const categories = repoData.repository.discussionCategories.nodes;
+          const targetCategory = categories.find(cat => cat.name === process.env.DISCUSSION_CATEGORY);
+          
+          if (!targetCategory) {
+            console.error('Category not found:', process.env.DISCUSSION_CATEGORY);
+            process.exit(1);
+          }
+          
+          const createDiscussionMutation = `
+            mutation($repositoryId: ID!, $categoryId: ID!, $title: String!, $body: String!) {
+              createDiscussion(input: {
+                repositoryId: $repositoryId
+                categoryId: $categoryId
+                title: $title
+                body: $body
+              }) {
+                discussion {
+                  id
+                  title
+                  url
+                }
+              }
+            }
+          `;
+          
+          const result = await github.graphql(createDiscussionMutation, {
+            repositoryId: repositoryId,
+            categoryId: targetCategory.id,
+            title: discussionTitle,
+            body: reportContent  // 使用清理后的内容(无第一行)
+          });
+          
+          console.log('Discussion created successfully:', result.createDiscussion.discussion.url);

+ 7 - 0
.github/workflows/utest_auto_run.yml

@@ -18,6 +18,13 @@ on:
       - documentation/**
       - '**/README.md'
       - '**/README_zh.md'
+  workflow_dispatch:
+    inputs:
+      trigger_type:
+        description: '触发类型'
+        required: false
+        default: 'manual'
+        type: string
 
 concurrency:
   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

+ 135 - 0
tools/ci/scheduled-ci-trigger/generate_report.py

@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+import json
+import os
+from datetime import datetime, timedelta
+from typing import List, Dict, Any
+
+def load_monitoring_results() -> List[Dict[str, Any]]:
+    """加载 monitoring_results.json"""
+    if not os.path.exists("monitoring_results.json"):
+        print("No monitoring results found")
+        return []
+    try:
+        with open("monitoring_results.json", "r", encoding="utf-8") as f:
+            return json.load(f)
+    except (json.JSONDecodeError, OSError) as e:
+        print(f"Error loading monitoring_results.json: {e}")
+        return []
+
+def get_beijing_time() -> datetime:
+    return datetime.utcnow() + timedelta(hours=8)
+
+def format_time(dt: datetime) -> str:
+    return dt.strftime("%Y-%m-%d %H:%M")
+
+def classify_error(step_name: str, job_name: str) -> str:
+    """错误类型分类"""
+    step_lower = step_name.lower()
+    if any(x in step_lower for x in ["test", "suite", "pytest", "unittest"]):
+        return "TEST_FAILURE"
+    if "lint" in step_lower or "flake8" in step_lower:
+        return "LINT_ERROR"
+    if "build" in step_lower or "compile" in step_lower:
+        return "BUILD_ERROR"
+    if "deploy" in step_lower or "upload" in step_lower or "publish" in step_lower:
+        return "DEPLOY_ERROR"
+    if "check" in step_lower or "validate" in step_lower or "verify" in step_lower:
+        return "VALIDATION_ERROR"
+    if "generate" in step_lower or "render" in step_lower:
+        return "GENERATION_ERROR"
+    return "UNKNOWN"
+
+def generate_report():
+    """生成符合最新样式的故障聚合报告"""
+    results = load_monitoring_results()
+    if not results:
+        return
+
+    failed_workflows = [r for r in results if r.get('conclusion') == 'failure']
+    if not failed_workflows:
+        print("No failed workflows to report")
+        return
+
+    now = get_beijing_time()
+    date_str = now.strftime("%Y%m%d")
+
+    # 时间范围
+    created_times = [
+        datetime.fromisoformat(r["created_at"].replace("Z", "+00:00")) + timedelta(hours=8)
+        for r in failed_workflows
+    ]
+    updated_times = [
+        datetime.fromisoformat(r["updated_at"].replace("Z", "+00:00")) + timedelta(hours=8)
+        for r in failed_workflows
+    ]
+    start_time = min(created_times)
+    end_time = max(updated_times)
+
+    total = len(results)
+    failed_count = len(failed_workflows)
+    success_rate = 0.0 if total == 0 else round((total - failed_count) / total * 100, 1)
+
+    # === 第一行:用于 JS 提取标题(必须)===
+    report = f"# {date_str}_ci_integration-failed-report\n\n"
+
+    # === 第二行:用户看到的主标题(H1)===
+    report += f"# 🚨 {date_str} GitHub Actions 故障聚合报告 | Incident Aggregate Report\n\n"
+
+    # === 执行概览 ===
+    report += f"## 执行概览 | Executive Summary\n"
+    report += f"- **监控时间范围 | Monitoring Period**: {format_time(start_time)}–{format_time(end_time)} (UTC+8)\n"
+    report += f"- **检测到失败运行 | Failed Runs Detected**: {failed_count}个\n"
+    report += f"- **成功率 | Success Rate**: {success_rate}% \n\n"
+
+    # === 故障详情 ===
+    report += f"## 🔍 故障详情 | Failure Details\n\n"
+
+    for wf in failed_workflows:
+        run_id = wf.get("run_id", "N/A")
+        name = wf["name"]
+        html_url = wf.get("html_url", "#")
+        details = wf.get("failure_details", [])
+
+        report += f"**📌 Run-{run_id}** | [{name}]({html_url})\n"
+
+        if not details:
+            report += "└─ 无失败作业详情 | No details of failed jobs\n\n"
+            continue
+
+        failed_jobs = [j for j in details if j.get("steps")]
+        for i, job in enumerate(failed_jobs):
+            job_name = job["name"]
+            steps = job["steps"]
+            job_prefix = "└─" if i == len(failed_jobs) - 1 else "├─"
+            report += f"{job_prefix} **失败作业 | Failed Job**: {job_name}\n"
+
+            for j, step in enumerate(steps):
+                step_name = step["name"]
+                step_num = step["number"]
+                error_type = classify_error(step_name, job_name)
+                step_prefix = "   └─" if j == len(steps) - 1 else "   ├─"
+                report += f"{step_prefix} **失败步骤 | Failed Step**: {step_name} (Step {step_num})\n"
+                indent = "      " if j == len(steps) - 1 else "   │   "
+                report += f"{indent}**错误类型 | Error Type**: `{error_type}`\n"
+        report += "\n"
+
+    # === Team Collaboration & Support ===
+    report += f"## 👥 团队协作与支持 | Team Collaboration & Support\n\n"
+    report += f"请求维护支持:本报告需要RT-Thread官方团队的专业经验进行审核与指导。 \n"
+    report += f"Call for Maintenance Support: This report requires the expertise of the RT-Thread official team for review and guidance.\n\n"
+    report += f"提审负责人:@Rbb666 @kurisaW\n"
+    report += f"Requested Reviewers from RT-Thread: @Rbb666 @kurisaW\n\n"
+    report += f"烦请尽快关注此事,万分感谢。  \n"
+    report += f"Your prompt attention to this matter is greatly appreciated.\n"
+
+    # 保存
+    try:
+        with open("failure_details.md", "w", encoding="utf-8") as f:
+            f.write(report.rstrip() + "\n")
+        print("Report generated: failure_details.md")
+        print(f"Report size: {os.path.getsize('failure_details.md')} bytes")
+    except Exception as e:
+        print(f"Error writing report: {e}")
+
+if __name__ == "__main__":
+    generate_report()

+ 227 - 0
tools/ci/scheduled-ci-trigger/monitor_workflows.py

@@ -0,0 +1,227 @@
+#!/usr/bin/env python3
+import os
+import json
+import requests
+import time
+import sys
+from datetime import datetime, timezone
+
+def monitor_workflows(github_token, repo, workflow_names, start_time):
+    """监控工作流运行"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    monitoring_results = []
+    
+    for workflow_name in workflow_names:
+        print(f"\n=== Monitoring {workflow_name} ===")
+        
+        try:
+            workflow_id = get_workflow_id(github_token, repo, workflow_name)
+            if not workflow_id:
+                monitoring_results.append({
+                    "name": workflow_name,
+                    "status": "error",
+                    "conclusion": "error", 
+                    "error": "Workflow not found"
+                })
+                continue
+            
+            # 查找开始时间后的运行
+            runs = get_recent_runs(github_token, repo, workflow_id, start_time)
+            
+            if not runs:
+                print(f"No runs found for {workflow_name} after {start_time}")
+                # 尝试查找任何正在运行的工作流
+                all_runs = get_all_runs(github_token, repo, workflow_id, 10)
+                if all_runs:
+                    latest_run = all_runs[0]
+                    print(f"Using latest run instead: {latest_run['id']} created at {latest_run['created_at']}")
+                    result = monitor_single_run(github_token, repo, latest_run["id"], workflow_name)
+                    monitoring_results.append(result)
+                else:
+                    monitoring_results.append({
+                        "name": workflow_name,
+                        "status": "not_found",
+                        "conclusion": "not_found",
+                        "error": f"No runs found after {start_time}"
+                    })
+            else:
+                # 监控找到的运行
+                run_to_monitor = runs[0]  # 取最新的一个
+                print(f"Monitoring run: {run_to_monitor['id']}")
+                result = monitor_single_run(github_token, repo, run_to_monitor["id"], workflow_name)
+                monitoring_results.append(result)
+                
+        except Exception as e:
+            print(f"Error monitoring {workflow_name}: {str(e)}")
+            monitoring_results.append({
+                "name": workflow_name,
+                "status": "error",
+                "conclusion": "error",
+                "error": str(e)
+            })
+    
+    return monitoring_results
+
+def get_all_runs(github_token, repo, workflow_id, per_page=10):
+    """获取所有运行"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    url = f"https://api.github.com/repos/{repo}/actions/workflows/{workflow_id}/runs"
+    params = {"per_page": per_page}
+    
+    response = requests.get(url, headers=headers, params=params)
+    if response.status_code == 200:
+        return response.json()["workflow_runs"]
+    return []
+
+def get_recent_runs(github_token, repo, workflow_id, start_time):
+    """获取开始时间后的运行"""
+    all_runs = get_all_runs(github_token, repo, workflow_id, 10)
+    start_time_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00'))
+    
+    recent_runs = []
+    for run in all_runs:
+        run_time = datetime.fromisoformat(run["created_at"].replace('Z', '+00:00'))
+        if run_time >= start_time_dt:
+            recent_runs.append(run)
+    
+    return recent_runs
+
+def monitor_single_run(github_token, repo, run_id, workflow_name):
+    """监控单个运行"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    max_wait_time = 1800  # 30分钟
+    check_interval = 30
+    start_time = time.time()
+    
+    print(f"Monitoring {workflow_name} (run {run_id})")
+    
+    while time.time() - start_time < max_wait_time:
+        url = f"https://api.github.com/repos/{repo}/actions/runs/{run_id}"
+        response = requests.get(url, headers=headers)
+        
+        if response.status_code != 200:
+            print(f"Error getting run status: {response.status_code}")
+            time.sleep(check_interval)
+            continue
+        
+        run_data = response.json()
+        status = run_data["status"]
+        conclusion = run_data.get("conclusion")
+        
+        print(f"  {workflow_name}: status={status}, conclusion={conclusion}")
+        
+        if status == "completed":
+            result = {
+                "name": workflow_name,
+                "run_id": run_id,
+                "status": status,
+                "conclusion": conclusion,
+                "html_url": run_data["html_url"],
+                "created_at": run_data["created_at"],
+                "updated_at": run_data["updated_at"]
+            }
+            
+            if conclusion == "failure":
+                result["failure_details"] = get_failure_logs(github_token, repo, run_id)
+            
+            return result
+        
+        time.sleep(check_interval)
+    
+    # 超时
+    return {
+        "name": workflow_name,
+        "run_id": run_id,
+        "status": "timed_out",
+        "conclusion": "timed_out",
+        "html_url": f"https://github.com/{repo}/actions/runs/{run_id}",
+        "error": "Monitoring timed out after 30 minutes"
+    }
+
+def get_failure_logs(github_token, repo, run_id):
+    """获取失败日志"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    try:
+        jobs_url = f"https://api.github.com/repos/{repo}/actions/runs/{run_id}/jobs"
+        jobs_response = requests.get(jobs_url, headers=headers)
+        
+        failure_details = []
+        
+        if jobs_response.status_code == 200:
+            jobs_data = jobs_response.json()["jobs"]
+            for job in jobs_data:
+                if job["conclusion"] == "failure":
+                    job_info = {
+                        "name": job["name"],
+                        "steps": []
+                    }
+                    
+                    for step in job["steps"]:
+                        if step["conclusion"] == "failure":
+                            job_info["steps"].append({
+                                "name": step["name"],
+                                "number": step["number"]
+                            })
+                    
+                    failure_details.append(job_info)
+        
+        return failure_details
+    except Exception as e:
+        print(f"Error getting failure logs: {e}")
+        return []
+
+def get_workflow_id(github_token, repo, workflow_name):
+    """获取工作流ID"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    url = f"https://api.github.com/repos/{repo}/actions/workflows"
+    response = requests.get(url, headers=headers)
+    
+    if response.status_code == 200:
+        workflows = response.json()["workflows"]
+        for workflow in workflows:
+            if workflow["name"] == workflow_name:
+                return workflow["id"]
+    return None
+
+def main():
+    github_token = os.getenv("GITHUB_TOKEN")
+    repo = os.getenv("GITHUB_REPOSITORY")
+    workflows_json = os.getenv("TARGET_WORKFLOWS")
+    start_time = sys.argv[1] if len(sys.argv) > 1 else datetime.now(timezone.utc).isoformat()
+    
+    if not all([github_token, repo, workflows_json]):
+        raise ValueError("Missing required environment variables")
+    
+    workflows = json.loads(workflows_json)
+    results = monitor_workflows(github_token, repo, workflows, start_time)
+    
+    with open("monitoring_results.json", "w") as f:
+        json.dump(results, f, indent=2)
+    
+    print(f"\n=== Monitoring Summary ===")
+    for result in results:
+        status_icon = "✅" if result.get("conclusion") == "success" else "❌" if result.get("conclusion") == "failure" else "⚠️"
+        print(f"{status_icon} {result['name']}: {result.get('conclusion', 'unknown')}")
+
+if __name__ == "__main__":
+    main()

+ 1 - 0
tools/ci/scheduled-ci-trigger/requirements.txt

@@ -0,0 +1 @@
+requests>=2.25.1

+ 98 - 0
tools/ci/scheduled-ci-trigger/trigger_workflows_direct.py

@@ -0,0 +1,98 @@
+#!/usr/bin/env python3
+import os
+import json
+import requests
+import time
+from datetime import datetime, timezone
+
+def trigger_workflow_directly(workflow_name, github_token, repo):
+    """直接触发工作流"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    # 首先获取工作流ID
+    workflow_id = get_workflow_id(github_token, repo, workflow_name)
+    if not workflow_id:
+        print(f"✗ Workflow '{workflow_name}' not found")
+        return False
+    
+    # 使用 workflow_dispatch API 直接触发
+    dispatch_url = f"https://api.github.com/repos/{repo}/actions/workflows/{workflow_id}/dispatches"  # 🔧 修复:添加这行
+    
+    # 根据工作流实际定义的输入参数进行调整
+    dispatch_data = {
+        "ref": "master",
+        "inputs": {
+            "trigger_type": "scheduled"  # 使用工作流实际定义的输入参数
+        }
+    }
+    
+    try:
+        print(f"Triggering workflow: {workflow_name} (ID: {workflow_id})")
+        response = requests.post(dispatch_url, headers=headers, json=dispatch_data)  # 🔧 修复:现在 dispatch_url 已定义
+        
+        if response.status_code == 204:
+            print(f"✓ Successfully triggered workflow: {workflow_name}")
+            return True
+        else:
+            print(f"✗ Failed to trigger {workflow_name}: {response.status_code}")
+            print(f"Response: {response.text}")
+            return False
+            
+    except Exception as e:
+        print(f"✗ Error triggering {workflow_name}: {str(e)}")
+        return False
+
+def get_workflow_id(github_token, repo, workflow_name):
+    """获取工作流ID"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    url = f"https://api.github.com/repos/{repo}/actions/workflows"
+    response = requests.get(url, headers=headers)
+    
+    if response.status_code == 200:
+        workflows = response.json()["workflows"]
+        for workflow in workflows:
+            if workflow["name"] == workflow_name:
+                return workflow["id"]
+        print(f"Available workflows: {[w['name'] for w in workflows]}")
+    else:
+        print(f"Failed to get workflows: {response.status_code}")
+    
+    return None
+
+def main():
+    github_token = os.getenv("GITHUB_TOKEN")
+    repo = os.getenv("GITHUB_REPOSITORY")
+    workflows_json = os.getenv("TARGET_WORKFLOWS")
+    
+    if not all([github_token, repo, workflows_json]):
+        raise ValueError("Missing required environment variables")
+    
+    try:
+        workflows = json.loads(workflows_json)
+    except json.JSONDecodeError:
+        raise ValueError("Invalid TARGET_WORKFLOWS JSON format")
+    
+    print(f"Directly triggering {len(workflows)} workflows...")
+    
+    success_count = 0
+    for i, workflow in enumerate(workflows):
+        success = trigger_workflow_directly(workflow, github_token, repo)
+        if success:
+            success_count += 1
+        
+        # 在触发之间等待
+        if i < len(workflows) - 1:
+            print("Waiting 10 seconds before next trigger...")
+            time.sleep(10)
+    
+    print(f"Triggering completed: {success_count}/{len(workflows)} successful")
+
+if __name__ == "__main__":
+    main()

+ 113 - 0
tools/ci/scheduled-ci-trigger/wait_for_workflows.py

@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+import os
+import json
+import requests
+import time
+import sys
+from datetime import datetime, timezone
+
+def wait_for_workflows_to_appear(github_token, repo, workflow_names, start_time, max_wait=300):
+    """等待工作流出现在API中"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    print(f"Waiting for {len(workflow_names)} workflows to appear...")
+    print(f"Start time: {start_time}")
+    print(f"Max wait time: {max_wait} seconds")
+    
+    found_workflows = set()
+    start_timestamp = time.time()
+    
+    while time.time() - start_timestamp < max_wait:
+        all_found = True
+        
+        for workflow_name in workflow_names:
+            if workflow_name in found_workflows:
+                continue
+                
+            workflow_id = get_workflow_id(github_token, repo, workflow_name)
+            if not workflow_id:
+                print(f"Workflow {workflow_name} not found, skipping")
+                found_workflows.add(workflow_name)
+                continue
+            
+            # 检查是否有新的运行
+            runs = get_recent_runs(github_token, repo, workflow_id, start_time)
+            if runs:
+                print(f"✓ Found new run for {workflow_name}: {runs[0]['id']}")
+                found_workflows.add(workflow_name)
+            else:
+                print(f"⏳ Waiting for {workflow_name}...")
+                all_found = False
+        
+        if all_found:
+            print("✓ All workflows have started!")
+            return True
+        
+        time.sleep(10)  # 每10秒检查一次
+    
+    print("⚠️ Timeout waiting for workflows to appear")
+    print(f"Found {len(found_workflows)} out of {len(workflow_names)} workflows")
+    return False
+
+def get_workflow_id(github_token, repo, workflow_name):
+    """获取工作流ID"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    url = f"https://api.github.com/repos/{repo}/actions/workflows"
+    response = requests.get(url, headers=headers)
+    
+    if response.status_code == 200:
+        workflows = response.json()["workflows"]
+        for workflow in workflows:
+            if workflow["name"] == workflow_name:
+                return workflow["id"]
+    return None
+
+def get_recent_runs(github_token, repo, workflow_id, start_time):
+    """获取开始时间后的运行"""
+    headers = {
+        "Authorization": f"token {github_token}",
+        "Accept": "application/vnd.github.v3+json"
+    }
+    
+    url = f"https://api.github.com/repos/{repo}/actions/workflows/{workflow_id}/runs"
+    params = {"per_page": 5}
+    
+    response = requests.get(url, headers=headers, params=params)
+    if response.status_code != 200:
+        return []
+    
+    runs = response.json()["workflow_runs"]
+    start_time_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00'))
+    
+    recent_runs = []
+    for run in runs:
+        run_time = datetime.fromisoformat(run["created_at"].replace('Z', '+00:00'))
+        if run_time >= start_time_dt:
+            recent_runs.append(run)
+    
+    return recent_runs
+
+def main():
+    github_token = os.getenv("GITHUB_TOKEN")
+    repo = os.getenv("GITHUB_REPOSITORY")
+    workflows_json = os.getenv("TARGET_WORKFLOWS")
+    start_time = sys.argv[1] if len(sys.argv) > 1 else datetime.now(timezone.utc).isoformat()
+    
+    if not all([github_token, repo, workflows_json]):
+        raise ValueError("Missing required environment variables")
+    
+    workflows = json.loads(workflows_json)
+    success = wait_for_workflows_to_appear(github_token, repo, workflows, start_time)
+    
+    if not success:
+        print("Proceeding anyway, some workflows may not be detected...")
+
+if __name__ == "__main__":
+    main()