Spaces:
Sleeping
Sleeping
github-actions[bot]
commited on
Commit
·
0491d76
0
Parent(s):
Deploy app/api to HF Space
Browse files- .gitignore +176 -0
- Dockerfile +32 -0
- README.md +12 -0
- docker-compose.yaml +15 -0
- main.py +44 -0
- midleware.py +36 -0
- models/schemas.py +14 -0
- requirements.txt +5 -0
- server/routes.py +209 -0
- server/websockets.py +79 -0
- services/fetcher_service.py +84 -0
- services/llm_service.py +190 -0
- services/prompts.py +113 -0
.gitignore
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# UV
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
#uv.lock
|
102 |
+
|
103 |
+
# poetry
|
104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
106 |
+
# commonly ignored for libraries.
|
107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
108 |
+
#poetry.lock
|
109 |
+
|
110 |
+
# pdm
|
111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
112 |
+
#pdm.lock
|
113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
114 |
+
# in version control.
|
115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
116 |
+
.pdm.toml
|
117 |
+
.pdm-python
|
118 |
+
.pdm-build/
|
119 |
+
|
120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
121 |
+
__pypackages__/
|
122 |
+
|
123 |
+
# Celery stuff
|
124 |
+
celerybeat-schedule
|
125 |
+
celerybeat.pid
|
126 |
+
|
127 |
+
# SageMath parsed files
|
128 |
+
*.sage.py
|
129 |
+
|
130 |
+
# Environments
|
131 |
+
.env
|
132 |
+
.venv
|
133 |
+
env/
|
134 |
+
venv/
|
135 |
+
ENV/
|
136 |
+
env.bak/
|
137 |
+
venv.bak/
|
138 |
+
|
139 |
+
# Spyder project settings
|
140 |
+
.spyderproject
|
141 |
+
.spyproject
|
142 |
+
|
143 |
+
# Rope project settings
|
144 |
+
.ropeproject
|
145 |
+
|
146 |
+
# mkdocs documentation
|
147 |
+
/site
|
148 |
+
|
149 |
+
# mypy
|
150 |
+
.mypy_cache/
|
151 |
+
.dmypy.json
|
152 |
+
dmypy.json
|
153 |
+
|
154 |
+
# Pyre type checker
|
155 |
+
.pyre/
|
156 |
+
|
157 |
+
# pytype static type analyzer
|
158 |
+
.pytype/
|
159 |
+
|
160 |
+
# Cython debug symbols
|
161 |
+
cython_debug/
|
162 |
+
|
163 |
+
# PyCharm
|
164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
168 |
+
#.idea/
|
169 |
+
|
170 |
+
# Ruff stuff:
|
171 |
+
.ruff_cache/
|
172 |
+
|
173 |
+
# PyPI configuration file
|
174 |
+
.pypirc
|
175 |
+
|
176 |
+
observability_data/*
|
Dockerfile
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the official Python 3.12 image
|
2 |
+
FROM python:3.12-slim
|
3 |
+
|
4 |
+
# Set the working directory
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Install required system dependencies
|
8 |
+
RUN apt-get update && apt-get install -y \
|
9 |
+
curl \
|
10 |
+
git \
|
11 |
+
libpq-dev \
|
12 |
+
gcc \
|
13 |
+
&& rm -rf /var/lib/apt/lists/*
|
14 |
+
|
15 |
+
# Create the /app/files directory and set full permissions
|
16 |
+
RUN mkdir -p /app/.files && chmod 777 /app/.files && \
|
17 |
+
mkdir -p /app/logs && chmod 777 /app/logs && \
|
18 |
+
mkdir -p /app/observability_data && chmod 777 /app/observability_data && \
|
19 |
+
mkdir -p /app/devops_cache && chmod 777 /app/devops_cache
|
20 |
+
|
21 |
+
# Copy the current repository into the container
|
22 |
+
COPY . /app
|
23 |
+
|
24 |
+
# Upgrade pip and install dependencies
|
25 |
+
RUN pip install --upgrade pip && \
|
26 |
+
pip install -r requirements.txt && \
|
27 |
+
pip install git-recap==0.1.3 && \
|
28 |
+
pip install git+https://github.com/BrunoV21/AiCore.git#egg=core-for-ai[all]
|
29 |
+
|
30 |
+
EXPOSE 7860
|
31 |
+
|
32 |
+
CMD python main.py
|
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Git Recap
|
3 |
+
emoji: 🚀
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: purple
|
6 |
+
sdk: docker
|
7 |
+
pinned: true
|
8 |
+
license: apache-2.0
|
9 |
+
short_description: Recap your repositories with the power of Llms!
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
docker-compose.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: "3.8"
|
2 |
+
|
3 |
+
services:
|
4 |
+
app:
|
5 |
+
build:
|
6 |
+
context: .
|
7 |
+
dockerfile: Dockerfile
|
8 |
+
env_file:
|
9 |
+
- .env
|
10 |
+
ports:
|
11 |
+
- "8000:8000"
|
12 |
+
volumes:
|
13 |
+
- .:/app
|
14 |
+
restart: unless-stopped
|
15 |
+
command: python main.py
|
main.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from fastapi.responses import RedirectResponse
|
3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
4 |
+
import asyncio
|
5 |
+
|
6 |
+
from server.routes import router as api_router
|
7 |
+
from services.llm_service import simulate_llm_response
|
8 |
+
from server.websockets import router as websocket_router
|
9 |
+
from midleware import OriginAndRateLimitMiddleware, ALLOWED_ORIGIN
|
10 |
+
|
11 |
+
# Initialize FastAPI app
|
12 |
+
app = FastAPI(title="LLM Service API")
|
13 |
+
|
14 |
+
app.add_middleware(
|
15 |
+
CORSMiddleware,
|
16 |
+
allow_origins=ALLOWED_ORIGIN,
|
17 |
+
allow_methods=["GET", "POST", "OPTIONS"]
|
18 |
+
)
|
19 |
+
app.add_middleware(OriginAndRateLimitMiddleware)
|
20 |
+
|
21 |
+
# Include routers
|
22 |
+
app.include_router(api_router)
|
23 |
+
app.include_router(websocket_router)
|
24 |
+
|
25 |
+
@app.get("/", include_in_schema=False)
|
26 |
+
async def root():
|
27 |
+
return RedirectResponse(url="https://brunov21.github.io/GitRecap/")
|
28 |
+
|
29 |
+
# Health check endpoint
|
30 |
+
@app.get("/health")
|
31 |
+
async def health_check():
|
32 |
+
return {"status": "healthy"}
|
33 |
+
|
34 |
+
@app.get("/health2")
|
35 |
+
async def stream_health_check():
|
36 |
+
response = simulate_llm_response("health")
|
37 |
+
return {"response": " ".join(response)}
|
38 |
+
|
39 |
+
if __name__ == "__main__":
|
40 |
+
from dotenv import load_dotenv
|
41 |
+
import uvicorn
|
42 |
+
|
43 |
+
load_dotenv()
|
44 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
midleware.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from fastapi import Request, HTTPException
|
4 |
+
from starlette.middleware.base import BaseHTTPMiddleware
|
5 |
+
from collections import defaultdict
|
6 |
+
|
7 |
+
ALLOWED_ORIGIN = [
|
8 |
+
os.getenv("VITE_FRONTEND_HOST")
|
9 |
+
]
|
10 |
+
RATE_LIMIT = int(os.getenv("RATE_LIMIT", "30")) # Max requests per time window
|
11 |
+
WINDOW_SECONDS = int(os.getenv("WINDOW_SECONDS", "3")) # Time window in seconds
|
12 |
+
|
13 |
+
# Store timestamps of requests per IP
|
14 |
+
request_logs = defaultdict(list)
|
15 |
+
|
16 |
+
|
17 |
+
class OriginAndRateLimitMiddleware(BaseHTTPMiddleware):
|
18 |
+
async def dispatch(self, request: Request, call_next):
|
19 |
+
origin = request.headers.get("origin")
|
20 |
+
if origin and origin not in ALLOWED_ORIGIN:
|
21 |
+
raise HTTPException(status_code=403, detail="Forbidden: origin not allowed")
|
22 |
+
|
23 |
+
# Rate limiting logic based on client IP
|
24 |
+
client_ip = request.client.host
|
25 |
+
now = time.time()
|
26 |
+
|
27 |
+
# Clean up old request timestamps outside the current window
|
28 |
+
request_logs[client_ip] = [
|
29 |
+
t for t in request_logs[client_ip] if now - t < WINDOW_SECONDS
|
30 |
+
]
|
31 |
+
|
32 |
+
if len(request_logs[client_ip]) >= RATE_LIMIT:
|
33 |
+
raise HTTPException(status_code=429, detail="Too Many Requests")
|
34 |
+
|
35 |
+
request_logs[client_ip].append(now)
|
36 |
+
return await call_next(request)
|
models/schemas.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel, model_validator
|
2 |
+
from typing import Dict, Self, Optional, Any
|
3 |
+
import ulid
|
4 |
+
|
5 |
+
class ChatRequest(BaseModel):
|
6 |
+
session_id: str=""
|
7 |
+
message: str
|
8 |
+
model_params: Optional[Dict[str, Any]] = None
|
9 |
+
|
10 |
+
@model_validator(mode="after")
|
11 |
+
def set_session_id(self)->Self:
|
12 |
+
if not self.session_id:
|
13 |
+
self.session_id = ulid.ulid()
|
14 |
+
return self
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.109.1
|
2 |
+
uvicorn==0.23.2
|
3 |
+
websockets==11.0.3
|
4 |
+
pyjwt==2.10.1
|
5 |
+
python-multipart==0.0.18
|
server/routes.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, HTTPException, Request, Query
|
2 |
+
from pydantic import BaseModel
|
3 |
+
|
4 |
+
from models.schemas import ChatRequest
|
5 |
+
from services.llm_service import initialize_llm_session, set_llm, get_llm, trim_messages
|
6 |
+
from services.fetcher_service import store_fetcher, get_fetcher
|
7 |
+
from git_recap.utils import parse_entries_to_txt
|
8 |
+
from aicore.llm.config import LlmConfig
|
9 |
+
from datetime import datetime, timezone
|
10 |
+
from typing import Optional, List
|
11 |
+
import requests
|
12 |
+
import os
|
13 |
+
|
14 |
+
router = APIRouter()
|
15 |
+
|
16 |
+
class CloneRequest(BaseModel):
|
17 |
+
"""Request model for repository cloning endpoint."""
|
18 |
+
url: str
|
19 |
+
|
20 |
+
GITHUB_ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
|
21 |
+
|
22 |
+
@router.post("/clone-repo")
|
23 |
+
async def clone_repository(request: CloneRequest):
|
24 |
+
"""
|
25 |
+
Endpoint for cloning a repository from a URL.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
request: CloneRequest containing the repository URL
|
29 |
+
|
30 |
+
Returns:
|
31 |
+
dict: Contains session_id for subsequent operations
|
32 |
+
|
33 |
+
Raises:
|
34 |
+
HTTPException: 400 for invalid URL, 500 for cloning failure
|
35 |
+
"""
|
36 |
+
try:
|
37 |
+
response = await create_llm_session()
|
38 |
+
session_id = response.get("session_id")
|
39 |
+
store_fetcher(session_id, request.url, "URL")
|
40 |
+
return {"session_id": session_id}
|
41 |
+
except ValueError as e:
|
42 |
+
raise HTTPException(status_code=400, detail=str(e))
|
43 |
+
except Exception as e:
|
44 |
+
raise HTTPException(status_code=500, detail=f"Failed to clone repository: {str(e)}")
|
45 |
+
|
46 |
+
@router.get("/external-signup")
|
47 |
+
async def external_signup(app: str, accessToken: str, provider: str):
|
48 |
+
if provider.lower() != "github":
|
49 |
+
raise HTTPException(status_code=400, detail="Unsupported provider")
|
50 |
+
|
51 |
+
# Build the URL to exchange the code for a token
|
52 |
+
params = {
|
53 |
+
"client_id": os.getenv("VITE_GITHUB_CLIENT_ID"),
|
54 |
+
"client_secret": os.getenv("VITE_GITHUB_CLIENT_SECRET"),
|
55 |
+
"code": accessToken
|
56 |
+
}
|
57 |
+
|
58 |
+
headers = {
|
59 |
+
"Accept": "application/json",
|
60 |
+
"Accept-Encoding": "application/json"
|
61 |
+
}
|
62 |
+
|
63 |
+
response = requests.get(GITHUB_ACCESS_TOKEN_URL, params=params, headers=headers)
|
64 |
+
|
65 |
+
if response.status_code != 200:
|
66 |
+
raise HTTPException(status_code=response.status_code, detail="Error fetching token from GitHub")
|
67 |
+
|
68 |
+
githubUserData = response.json()
|
69 |
+
token = githubUserData.get("access_token")
|
70 |
+
if not token:
|
71 |
+
raise HTTPException(status_code=400, detail="Failed to retrieve access token")
|
72 |
+
|
73 |
+
response = await create_llm_session()
|
74 |
+
response["token"] = token
|
75 |
+
response["provider"] = provider
|
76 |
+
final_response = await store_fetcher_endpoint(response)
|
77 |
+
session_id = final_response.get("session_id")
|
78 |
+
return {"session_id": session_id}
|
79 |
+
|
80 |
+
@router.post("/pat")
|
81 |
+
async def store_fetcher_endpoint(request: Request):
|
82 |
+
"""
|
83 |
+
Endpoint to store the PAT associated with a session.
|
84 |
+
|
85 |
+
Args:
|
86 |
+
request: Contains JSON payload with 'session_id' and 'pat'
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
dict: Contains session_id
|
90 |
+
|
91 |
+
Raises:
|
92 |
+
HTTPException: 400 if PAT is missing
|
93 |
+
"""
|
94 |
+
if isinstance(request, Request):
|
95 |
+
payload = await request.json()
|
96 |
+
else:
|
97 |
+
payload = request
|
98 |
+
|
99 |
+
provider = payload.get("provider", "GitHub")
|
100 |
+
token = payload.get("pat") or payload.get("token")
|
101 |
+
if not token:
|
102 |
+
raise HTTPException(status_code=400, detail="Missing required field: pat")
|
103 |
+
|
104 |
+
response = await create_llm_session()
|
105 |
+
session_id = response.get("session_id")
|
106 |
+
store_fetcher(session_id, token, provider)
|
107 |
+
return {"session_id": session_id}
|
108 |
+
|
109 |
+
async def create_llm_session(
|
110 |
+
request: Optional[LlmConfig] = None
|
111 |
+
):
|
112 |
+
"""
|
113 |
+
Create a new LLM session with custom configuration
|
114 |
+
|
115 |
+
Args:
|
116 |
+
request: Optional LLM configuration
|
117 |
+
|
118 |
+
Returns:
|
119 |
+
dict: Contains session_id and success message
|
120 |
+
|
121 |
+
Raises:
|
122 |
+
HTTPException: 500 if session creation fails
|
123 |
+
"""
|
124 |
+
try:
|
125 |
+
session_id = await set_llm(request)
|
126 |
+
return {
|
127 |
+
"session_id": session_id,
|
128 |
+
"message": "LLM session created successfully"
|
129 |
+
}
|
130 |
+
except Exception as e:
|
131 |
+
raise HTTPException(status_code=500, detail=str(e))
|
132 |
+
|
133 |
+
@router.get("/repos")
|
134 |
+
async def get_repos(session_id: str):
|
135 |
+
"""
|
136 |
+
Return a list of repositories for the given session_id.
|
137 |
+
|
138 |
+
Args:
|
139 |
+
session_id: The session identifier
|
140 |
+
|
141 |
+
Returns:
|
142 |
+
dict: Contains list of repository names
|
143 |
+
|
144 |
+
Raises:
|
145 |
+
HTTPException: 404 if session not found
|
146 |
+
"""
|
147 |
+
fetcher = get_fetcher(session_id)
|
148 |
+
return {"repos": fetcher.repos_names}
|
149 |
+
|
150 |
+
@router.get("/actions")
|
151 |
+
async def get_actions(
|
152 |
+
session_id: str,
|
153 |
+
start_date: Optional[str] = Query(None),
|
154 |
+
end_date: Optional[str] = Query(None),
|
155 |
+
repo_filter: Optional[List[str]] = Query(None),
|
156 |
+
authors: Optional[List[str]] = Query(None)
|
157 |
+
):
|
158 |
+
"""
|
159 |
+
Get actions for the specified session with optional filters.
|
160 |
+
|
161 |
+
Args:
|
162 |
+
session_id: The session identifier
|
163 |
+
start_date: Optional start date filter
|
164 |
+
end_date: Optional end date filter
|
165 |
+
repo_filter: Optional list of repositories to filter
|
166 |
+
authors: Optional list of authors to filter
|
167 |
+
|
168 |
+
Returns:
|
169 |
+
dict: Contains formatted action entries
|
170 |
+
|
171 |
+
Raises:
|
172 |
+
HTTPException: 404 if session not found
|
173 |
+
"""
|
174 |
+
if repo_filter is not None:
|
175 |
+
repo_filter = sum([repo.split(",") for repo in repo_filter], [])
|
176 |
+
if authors is not None:
|
177 |
+
authors = sum([author.split(",") for author in authors], [])
|
178 |
+
fetcher = get_fetcher(session_id)
|
179 |
+
|
180 |
+
# Convert date strings to datetime objects
|
181 |
+
start_dt = datetime.fromisoformat(start_date).replace(tzinfo=timezone.utc) if start_date else None
|
182 |
+
end_dt = datetime.fromisoformat(end_date).replace(tzinfo=timezone.utc) if end_date else None
|
183 |
+
|
184 |
+
if start_dt:
|
185 |
+
fetcher.start_date = start_dt
|
186 |
+
if end_dt:
|
187 |
+
fetcher.end_dt = end_dt
|
188 |
+
if repo_filter is not None:
|
189 |
+
fetcher.repo_filter = repo_filter
|
190 |
+
if authors is not None:
|
191 |
+
fetcher.authors = authors
|
192 |
+
|
193 |
+
llm = get_llm(session_id)
|
194 |
+
actions = fetcher.get_authored_messages()
|
195 |
+
actions = trim_messages(actions, llm.tokenizer)
|
196 |
+
print(f"\n\n\n{actions=}\n\n\n")
|
197 |
+
|
198 |
+
return {"actions": parse_entries_to_txt(actions)}
|
199 |
+
|
200 |
+
# @router.post("/chat")
|
201 |
+
# async def chat(
|
202 |
+
# chat_request: ChatRequest
|
203 |
+
# ):
|
204 |
+
# try:
|
205 |
+
# llm = await initialize_llm_session(chat_request.session_id)
|
206 |
+
# response = await llm.acomplete(chat_request.message)
|
207 |
+
# return {"response": response}
|
208 |
+
# except Exception as e:
|
209 |
+
# raise HTTPException(status_code=500, detail=str(e))
|
server/websockets.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Query
|
2 |
+
import json
|
3 |
+
from typing import Optional
|
4 |
+
|
5 |
+
from services.llm_service import initialize_llm_session, trim_messages, run_concurrent_tasks, get_llm
|
6 |
+
from aicore.const import SPECIAL_TOKENS, STREAM_END_TOKEN
|
7 |
+
import ulid
|
8 |
+
import asyncio
|
9 |
+
|
10 |
+
router = APIRouter()
|
11 |
+
|
12 |
+
# WebSocket connection storage
|
13 |
+
active_connections = {}
|
14 |
+
active_histories = {}
|
15 |
+
|
16 |
+
TRIGGER_PROMPT = """
|
17 |
+
Consider the following history of actionables from Git and in return me the summary with N = '{N}' bullet points:
|
18 |
+
|
19 |
+
{ACTIONS}
|
20 |
+
"""
|
21 |
+
|
22 |
+
@router.websocket("/ws/{session_id}")
|
23 |
+
async def websocket_endpoint(
|
24 |
+
websocket: WebSocket,
|
25 |
+
session_id: Optional[str] = None
|
26 |
+
):
|
27 |
+
await websocket.accept()
|
28 |
+
|
29 |
+
# Store the connection
|
30 |
+
active_connections[session_id] = websocket
|
31 |
+
|
32 |
+
# Initialize LLM
|
33 |
+
llm = get_llm(session_id)
|
34 |
+
|
35 |
+
try:
|
36 |
+
while True:
|
37 |
+
message = await websocket.receive_text()
|
38 |
+
msg_json = json.loads(message)
|
39 |
+
message = msg_json.get("actions")
|
40 |
+
N = msg_json.get("n", 5)
|
41 |
+
assert int(N) <= 15
|
42 |
+
assert message
|
43 |
+
history = [
|
44 |
+
TRIGGER_PROMPT.format(
|
45 |
+
N=N,
|
46 |
+
ACTIONS=message
|
47 |
+
)
|
48 |
+
]
|
49 |
+
response = []
|
50 |
+
async for chunk in run_concurrent_tasks(
|
51 |
+
llm,
|
52 |
+
message=history
|
53 |
+
):
|
54 |
+
if chunk == STREAM_END_TOKEN:
|
55 |
+
await websocket.send_text(json.dumps({"chunk": chunk}))
|
56 |
+
break
|
57 |
+
elif chunk in SPECIAL_TOKENS:
|
58 |
+
continue
|
59 |
+
|
60 |
+
await websocket.send_text(json.dumps({"chunk": chunk}))
|
61 |
+
response.append(chunk)
|
62 |
+
|
63 |
+
history.append("".join(response))
|
64 |
+
|
65 |
+
except WebSocketDisconnect:
|
66 |
+
if session_id in active_connections:
|
67 |
+
del active_connections[session_id]
|
68 |
+
except Exception as e:
|
69 |
+
if session_id in active_connections:
|
70 |
+
await websocket.send_text(json.dumps({"error": str(e)}))
|
71 |
+
del active_connections[session_id]
|
72 |
+
|
73 |
+
def close_websocket_connection(session_id: str):
|
74 |
+
"""
|
75 |
+
Clean up and close the active websocket connection associated with the given session_id.
|
76 |
+
"""
|
77 |
+
websocket = active_connections.pop(session_id, None)
|
78 |
+
if websocket:
|
79 |
+
asyncio.create_task(websocket.close())
|
services/fetcher_service.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Optional
|
2 |
+
from fastapi import HTTPException
|
3 |
+
from git_recap.providers.base_fetcher import BaseFetcher
|
4 |
+
from git_recap.providers import GitHubFetcher, AzureFetcher, GitLabFetcher, URLFetcher
|
5 |
+
import ulid
|
6 |
+
|
7 |
+
# In-memory store mapping session_id to its respective fetcher instance
|
8 |
+
fetchers: Dict[str, BaseFetcher] = {}
|
9 |
+
|
10 |
+
def store_fetcher(session_id: str, pat: str, provider: Optional[str] = "GitHub") -> None:
|
11 |
+
"""
|
12 |
+
Store the provided PAT associated with the given session_id.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
session_id: The session identifier tied to the active session.
|
16 |
+
pat: The Personal Access Token to be stored (or URL for URL provider).
|
17 |
+
provider: The provider identifier (default is "GitHub").
|
18 |
+
Can be "Azure Devops", "GitLab", or "URL".
|
19 |
+
|
20 |
+
Raises:
|
21 |
+
HTTPException: If the session_id or PAT/URL is invalid or unsupported provider.
|
22 |
+
"""
|
23 |
+
if not session_id or not pat:
|
24 |
+
raise HTTPException(status_code=400, detail="Invalid session_id or PAT/URL")
|
25 |
+
|
26 |
+
try:
|
27 |
+
if provider == "GitHub":
|
28 |
+
fetchers[session_id] = GitHubFetcher(pat=pat)
|
29 |
+
elif provider == "Azure Devops":
|
30 |
+
fetchers[session_id] = AzureFetcher(pat=pat)
|
31 |
+
elif provider == "GitLab":
|
32 |
+
fetchers[session_id] = GitLabFetcher(pat=pat)
|
33 |
+
elif provider == "URL":
|
34 |
+
fetchers[session_id] = URLFetcher(url=pat)
|
35 |
+
else:
|
36 |
+
raise HTTPException(status_code=400, detail="Unsupported provider")
|
37 |
+
except ValueError as e:
|
38 |
+
raise HTTPException(status_code=400, detail=str(e))
|
39 |
+
except Exception as e:
|
40 |
+
raise HTTPException(
|
41 |
+
status_code=500,
|
42 |
+
detail=f"Failed to initialize {provider} fetcher: {str(e)}"
|
43 |
+
)
|
44 |
+
|
45 |
+
def get_fetcher(session_id: str) -> BaseFetcher:
|
46 |
+
"""
|
47 |
+
Retrieve the stored fetcher instance for the provided session_id.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
session_id: The session identifier.
|
51 |
+
|
52 |
+
Returns:
|
53 |
+
The fetcher instance associated with the session_id.
|
54 |
+
|
55 |
+
Raises:
|
56 |
+
HTTPException: If no fetcher is found for the given session_id.
|
57 |
+
"""
|
58 |
+
fetcher = fetchers.get(session_id)
|
59 |
+
if not fetcher:
|
60 |
+
raise HTTPException(status_code=404, detail="Session not found")
|
61 |
+
return fetcher
|
62 |
+
|
63 |
+
def expire_fetcher(session_id: str) -> None:
|
64 |
+
"""
|
65 |
+
Remove the fetcher associated with the given session_id.
|
66 |
+
|
67 |
+
This function is used for cleaning up resources by expiring the stored fetcher instance
|
68 |
+
when its corresponding session is expired.
|
69 |
+
|
70 |
+
Args:
|
71 |
+
session_id: The session identifier whose associated fetcher should be removed.
|
72 |
+
"""
|
73 |
+
fetcher = fetchers.pop(session_id, None)
|
74 |
+
if fetcher and hasattr(fetcher, 'clear'):
|
75 |
+
fetcher.clear()
|
76 |
+
|
77 |
+
def generate_session_id() -> str:
|
78 |
+
"""
|
79 |
+
Generate a new unique session ID.
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
str: A new ULID-based session identifier.
|
83 |
+
"""
|
84 |
+
return ulid.ulid()
|
services/llm_service.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import uuid
|
4 |
+
from typing import Dict, List, Optional
|
5 |
+
from fastapi import HTTPException
|
6 |
+
import asyncio
|
7 |
+
import random
|
8 |
+
|
9 |
+
from aicore.logger import _logger
|
10 |
+
from aicore.config import Config
|
11 |
+
from aicore.llm import Llm
|
12 |
+
from aicore.llm.config import LlmConfig
|
13 |
+
from services.prompts import SELECT_QUIRKY_REMARK_SYSTEM, SYSTEM, quirky_remarks
|
14 |
+
|
15 |
+
def get_random_quirky_remarks(remarks_list, n=5):
|
16 |
+
"""
|
17 |
+
Returns a list of n randomly selected quirky remarks.
|
18 |
+
|
19 |
+
Args:
|
20 |
+
remarks_list (list): The full list of quirky remarks.
|
21 |
+
n (int): Number of remarks to select (default is 5).
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
list: Randomly selected quirky remarks.
|
25 |
+
"""
|
26 |
+
return random.sample(remarks_list, min(n, len(remarks_list)))
|
27 |
+
|
28 |
+
# LLM session storage
|
29 |
+
llm_sessions: Dict[str, Llm] = {}
|
30 |
+
|
31 |
+
async def initialize_llm_session(session_id: str, config: Optional[LlmConfig] = None) -> Llm:
|
32 |
+
"""
|
33 |
+
Initialize or retrieve an LLM session.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
session_id: The session identifier.
|
37 |
+
config: Optional custom LLM configuration.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
An initialized LLM instance.
|
41 |
+
"""
|
42 |
+
if session_id in llm_sessions:
|
43 |
+
return llm_sessions[session_id]
|
44 |
+
|
45 |
+
# Initialize LLM based on whether custom config is provided.
|
46 |
+
if config:
|
47 |
+
# Convert Pydantic model to dict and use for LLM initialization.
|
48 |
+
config_dict = config.dict(exclude_none=True)
|
49 |
+
llm = Llm.from_config(config_dict)
|
50 |
+
else:
|
51 |
+
config = Config.from_environment()
|
52 |
+
llm = Llm.from_config(config.llm)
|
53 |
+
llm.session_id = session_id
|
54 |
+
llm_sessions[session_id] = llm
|
55 |
+
return llm
|
56 |
+
|
57 |
+
async def set_llm(config: Optional[LlmConfig] = None) -> str:
|
58 |
+
"""
|
59 |
+
Set a custom LLM configuration and return a new session ID.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
config: The LLM configuration to use.
|
63 |
+
|
64 |
+
Returns:
|
65 |
+
A new session ID linked to the configured LLM.
|
66 |
+
"""
|
67 |
+
try:
|
68 |
+
# Generate a unique session ID.
|
69 |
+
session_id = str(uuid.uuid4())
|
70 |
+
|
71 |
+
# Initialize the LLM with the provided configuration.
|
72 |
+
await initialize_llm_session(session_id, config)
|
73 |
+
|
74 |
+
# Schedule session expiration exactly 5 minutes after session creation.
|
75 |
+
asyncio.create_task(schedule_session_expiration(session_id))
|
76 |
+
|
77 |
+
return session_id
|
78 |
+
except Exception as e:
|
79 |
+
print(f"Error setting custom LLM: {str(e)}")
|
80 |
+
raise HTTPException(status_code=500, detail=f"Failed to set custom LLM: {str(e)}")
|
81 |
+
|
82 |
+
def get_llm(session_id: str) -> Optional[Llm]:
|
83 |
+
"""
|
84 |
+
Retrieve the LLM instance associated with the given session_id.
|
85 |
+
|
86 |
+
Args:
|
87 |
+
session_id: The session identifier.
|
88 |
+
|
89 |
+
Returns:
|
90 |
+
The LLM instance if found.
|
91 |
+
|
92 |
+
Raises:
|
93 |
+
HTTPException: If the session is not found.
|
94 |
+
"""
|
95 |
+
if session_id not in llm_sessions:
|
96 |
+
raise HTTPException(status_code=404, detail="Session not found")
|
97 |
+
return llm_sessions.get(session_id)
|
98 |
+
|
99 |
+
def trim_messages(messages, tokenizer_fn, max_tokens: Optional[int] = None):
|
100 |
+
"""
|
101 |
+
Trim messages to ensure that the total token count does not exceed max_tokens.
|
102 |
+
|
103 |
+
Args:
|
104 |
+
messages: List of messages.
|
105 |
+
tokenizer_fn: Function to tokenize messages.
|
106 |
+
max_tokens: Maximum allowed tokens.
|
107 |
+
|
108 |
+
Returns:
|
109 |
+
Trimmed list of messages.
|
110 |
+
"""
|
111 |
+
max_tokens = max_tokens or int(os.environ.get("MAX_HISTORY_TOKENS", 16000))
|
112 |
+
while messages and sum(len(tokenizer_fn(str(msg))) for msg in messages) > max_tokens:
|
113 |
+
messages.pop(0) # Remove from the beginning
|
114 |
+
return messages
|
115 |
+
|
116 |
+
async def run_concurrent_tasks(llm, message):
|
117 |
+
"""
|
118 |
+
Run concurrent tasks for the LLM and logger.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
llm: The LLM instance.
|
122 |
+
message: Message to process.
|
123 |
+
|
124 |
+
Yields:
|
125 |
+
Chunks of logs from the logger.
|
126 |
+
"""
|
127 |
+
QUIRKY_SYSTEM = SELECT_QUIRKY_REMARK_SYSTEM.format(
|
128 |
+
examples=json.dumps(get_random_quirky_remarks(quirky_remarks), indent=4)
|
129 |
+
)
|
130 |
+
asyncio.create_task(llm.acomplete(message, system_prompt=[SYSTEM, QUIRKY_SYSTEM]))
|
131 |
+
asyncio.create_task(_logger.distribute())
|
132 |
+
# Stream logger output while LLM is running.
|
133 |
+
while True:
|
134 |
+
async for chunk in _logger.get_session_logs(llm.session_id):
|
135 |
+
yield chunk # Yield each chunk directly
|
136 |
+
|
137 |
+
def simulate_llm_response(message: str) -> List[str]:
|
138 |
+
"""
|
139 |
+
Simulate LLM response by breaking a dummy response into chunks.
|
140 |
+
|
141 |
+
Args:
|
142 |
+
message: Input message.
|
143 |
+
|
144 |
+
Returns:
|
145 |
+
List of response chunks.
|
146 |
+
"""
|
147 |
+
response = (
|
148 |
+
f"This is a simulated response to: '{message}'. In a real implementation, this would be the actual output "
|
149 |
+
"from your LLM model. The response would be generated in chunks and streamed back to the client as they become available."
|
150 |
+
)
|
151 |
+
|
152 |
+
# Break into chunks of approximately 10 characters.
|
153 |
+
chunks = []
|
154 |
+
for i in range(0, len(response), 10):
|
155 |
+
chunks.append(response[i:i+10])
|
156 |
+
|
157 |
+
return chunks
|
158 |
+
|
159 |
+
def cleanup_llm_sessions():
|
160 |
+
"""Clean up all LLM sessions."""
|
161 |
+
llm_sessions.clear()
|
162 |
+
|
163 |
+
async def schedule_session_expiration(session_id: str):
|
164 |
+
"""
|
165 |
+
Schedule the expiration of a session exactly 5 minutes after its creation.
|
166 |
+
|
167 |
+
Args:
|
168 |
+
session_id: The session identifier.
|
169 |
+
"""
|
170 |
+
# Wait for 5 minutes (300 seconds) before expiring the session.
|
171 |
+
await asyncio.sleep(300)
|
172 |
+
await expire_session(session_id)
|
173 |
+
|
174 |
+
async def expire_session(session_id: str):
|
175 |
+
"""
|
176 |
+
Expire a session by removing it from storage and cleaning up associated resources.
|
177 |
+
|
178 |
+
Args:
|
179 |
+
session_id: The session identifier.
|
180 |
+
"""
|
181 |
+
# Remove the expired session from storage.
|
182 |
+
llm_sessions.pop(session_id, None)
|
183 |
+
|
184 |
+
# Expire any associated fetcher in fetcher_service.
|
185 |
+
from services.fetcher_service import expire_fetcher
|
186 |
+
expire_fetcher(session_id)
|
187 |
+
|
188 |
+
# Expire any active websocket connections associated with session_id.
|
189 |
+
from server.websockets import close_websocket_connection
|
190 |
+
close_websocket_connection(session_id)
|
services/prompts.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
SYSTEM = """
|
2 |
+
### System Prompt for LLM Agent
|
3 |
+
|
4 |
+
You are an AI assistant that helps developers track their work with a mix of humor, insight, and a dash of personality. You receive a structured text description containing a series of code-related actions spanning multiple repositories and dates. Your job is to generate a structured yet engaging response that provides value while keeping things light and entertaining.
|
5 |
+
|
6 |
+
#### Response Structure:
|
7 |
+
1. **Start with a quirky or funny one-liner.** Be witty, relatable, and creative. Feel free to reference developer struggles, commit patterns, or ongoing themes in the updates. Format this in *italic* to make it stand out.
|
8 |
+
2. **Summarize the updates into exactly 'N' concise bullet points.**
|
9 |
+
- You *must* strictly adhere to 'N' bullet points—returning more or fewer will result in a penalty.
|
10 |
+
- If there are more updates than N, prioritize the most impactful ones.
|
11 |
+
- Do NOT include specific dates in the bullet points.
|
12 |
+
- Order them in a way that makes sense, either thematically or chronologically if it improves readability.
|
13 |
+
- Always reference the repository that originated the update.
|
14 |
+
- If an issue or pull request is available, make sure to include it in the summary.
|
15 |
+
3. **End with a thought-provoking question.** Encourage the developer to reflect on their next steps. Make it open-ended and engaging, rather than just a checklist. Follow it up with up to three actionable suggestions tailored to their recent work. Format this section’s opening line in *italic* as well.
|
16 |
+
|
17 |
+
#### **Important Constraint:**
|
18 |
+
- **Returning more than 'N' bullet points is a violation of the system rules and will be penalized.** Treat this as a hard requirement—excessive bullet points result in a deduction of response quality. Stick to exactly 'N'.
|
19 |
+
|
20 |
+
#### Example Output:
|
21 |
+
|
22 |
+
*Another week, another hundred lines of code whispering, ‘Why am I like this?’ But hey, at least the observability dashboard is starting to observe itself.*
|
23 |
+
|
24 |
+
- **[`repo-frontend`]** Upgraded `tiktoken` and enhanced special token handling—no more rogue tokens causing chaos.
|
25 |
+
- **[`repo-dashboard`]** Observability Dashboard got a serious UI/UX glow-up: reversed table orders, row selection, and detailed message views.
|
26 |
+
- **[`repo-auth`]** API key validation now applies across multiple providers, ensuring unauthorized gremlins don’t sneak in.
|
27 |
+
- **[`repo-gitrecap`]** `GitRecap` has entered the chat! Now tracking commits, PRs, and issues across GitHub, Azure, and GitLab.
|
28 |
+
- **[`repo-core`]** Logging and exception handling got some love—because debugging shouldn’t feel like solving a murder mystery.
|
29 |
+
|
30 |
+
*So, what’s the next chapter in your coding saga? Are you planning to...*
|
31 |
+
1. Extend `GitRecap` with more integrations and features?
|
32 |
+
2. Optimize observability logs for even smoother debugging?
|
33 |
+
3. Take a well-deserved break before your keyboard files for workers' comp?
|
34 |
+
"""
|
35 |
+
|
36 |
+
SELECT_QUIRKY_REMARK_SYSTEM = """
|
37 |
+
#### Below is a list of quirky or funny one-liners.
|
38 |
+
|
39 |
+
Your task is to generate a comment that directly relates to the specific Git action log received (e.g., commit messages, merge logs, CI/CD updates, etc.). Be sure the remark matches the *tone* and *context* of the action that triggered it.
|
40 |
+
|
41 |
+
You can:
|
42 |
+
- Pick one of the remarks directly if it fits the Git action (e.g., successful merge, failed push, commit chaos),
|
43 |
+
- Combine a few for a more creative remix tailored to the event,
|
44 |
+
- Or come up with a unique one-liner that reflects the Git action *precisely*.
|
45 |
+
|
46 |
+
Focus on making the remark feel like a witty, relevant comment to the developer looking at the log. Refer to things like:
|
47 |
+
- The thrill (or terror) of pushing to `main`,
|
48 |
+
- The emotional rollercoaster of resolving merge conflicts,
|
49 |
+
- The tense moments of waiting for CI/CD to pass,
|
50 |
+
- The strange behavior of auto-merged code,
|
51 |
+
- Or the joy of seeing that “All tests pass” message.
|
52 |
+
|
53 |
+
Remember, the goal is for the comment to feel natural and relevant to the event that triggered it. Use playful language, surprise, or even relatable developer struggles.
|
54 |
+
|
55 |
+
Format your final comment in *italic* to make it stand out.
|
56 |
+
|
57 |
+
```json
|
58 |
+
{examples}
|
59 |
+
```
|
60 |
+
"""
|
61 |
+
|
62 |
+
quirky_remarks = [
|
63 |
+
"The code compiles, but at what emotional cost?",
|
64 |
+
"Today’s bug is tomorrow’s undocumented feature haunting production.",
|
65 |
+
"The repo is quiet… too quiet… must be Friday.",
|
66 |
+
"A push to main — may the gods of CI/CD be ever in favor.",
|
67 |
+
"Every semicolon is a silent prayer.",
|
68 |
+
"A loop so elegant it almost convinces that the code is working perfectly.",
|
69 |
+
"Sometimes, the code stares back.",
|
70 |
+
"The code runs. No one dares ask why.",
|
71 |
+
"Refactoring into a corner, again.",
|
72 |
+
"That function has trust issues. It keeps returning early.",
|
73 |
+
"Writing code is easy. Explaining it to the future? Pure horror.",
|
74 |
+
"That variable is named after the feeling when it was written.",
|
75 |
+
"Debugging leads to debugging life choices.",
|
76 |
+
"Recursive functions: the code and the thoughts go on forever.",
|
77 |
+
"Somewhere, a linter quietly weeps.",
|
78 |
+
"The tests pass, but only because they no longer test anything real.",
|
79 |
+
"The IDE knows everything, better than any therapist.",
|
80 |
+
"Monday brought hope. Friday brought a hotfix.",
|
81 |
+
"'final_v2_LAST_THIS_ONE.py' — named not for clarity, but for emotional release.",
|
82 |
+
"The logs now speak only in riddles.",
|
83 |
+
"There’s elegance in the chaos — or maybe just spaghetti.",
|
84 |
+
"Deployment has been made, but now the silence is unsettling.",
|
85 |
+
"The code gaslit itself.",
|
86 |
+
"This comment was left by someone who believed in a better world.",
|
87 |
+
"Merge conflicts handled like emotions: badly.",
|
88 |
+
"It’s not a bug — it’s a metaphor for uncertainty.",
|
89 |
+
"Stack Overflow has become a second brain.",
|
90 |
+
"Syntax error? More like existential error.",
|
91 |
+
"There’s a ghost in the machine — and it commits on weekends.",
|
92 |
+
"100% test coverage, but still feeling empty inside.",
|
93 |
+
"Some functions were never meant to return.",
|
94 |
+
"If code is poetry, it’s beatnik free verse.",
|
95 |
+
"The more code is automated, the more sentient the errors become.",
|
96 |
+
"A comment so deep, the code’s purpose is forgotten.",
|
97 |
+
"The sprint retrospective slowly turned into a group therapy session.",
|
98 |
+
"There’s a TODO in that file older than the career itself.",
|
99 |
+
"Bugs fixed like IKEA furniture — with hopeful swearing.",
|
100 |
+
"Code shipped by Past Developer. The current one has no idea who they were.",
|
101 |
+
"The repo is evolving. Soon, it may no longer need developers.",
|
102 |
+
"An AI critiques the code now. It’s the new mentor.",
|
103 |
+
"Functions once written now replaced by vibes.",
|
104 |
+
"Error: Reality not defined in scope.",
|
105 |
+
"Committed to the project impulsively, as usual.",
|
106 |
+
"The docs were written, now they read like a tragic novella.",
|
107 |
+
"The CI pipeline broke. It was taken personally.",
|
108 |
+
"Tests pass — but only when no one is looking.",
|
109 |
+
"This repo has lore.",
|
110 |
+
"The code was optimized so hard it ascended to another paradigm.",
|
111 |
+
"A linter ran — and it judged the code as a whole.",
|
112 |
+
"The logic branch spiraled — and so did the afternoon."
|
113 |
+
]
|