vijayvizag commited on
Commit
be94910
·
1 Parent(s): a3e70bc

initial code commit

Browse files
.gitattributes CHANGED
@@ -23,9 +23,7 @@
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
 
26
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
27
  *.tflite filter=lfs diff=lfs merge=lfs -text
28
  *.tgz filter=lfs diff=lfs merge=lfs -text
29
  *.wasm filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.py[cod]
3
+ *$py.class
4
+ .env
5
+ .venv
6
+ env/
7
+ venv/
8
+ ENV/
9
+ .streamlit/
10
+ .DS_Store
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: streamlit run app.py
README.md CHANGED
@@ -1,36 +1,44 @@
1
- ---
2
- title: Code To Doc Streamlit
3
- emoji: 🚀
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.44.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- short_description: doc gen
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
15
- # Code to Project Document Generator
16
-
17
- This is a Streamlit app that takes code files and a heading structure, and generates:
18
- - A DOCX project report
19
- - A PPTX presentation
20
-
21
- It uses lightweight models like T5 and tools like python-docx and pptx. Deployable on Hugging Face Spaces!
22
 
23
- ## Usage
24
- 1. Upload your code files (Python or React)
25
- 2. Upload a `headings.txt` file with your report headings
26
- 3. Click Generate
 
 
 
 
 
 
 
27
 
28
- ## Output
29
- - `project_report.docx`
30
- - `project_presentation.pptx`
31
 
32
- ## Run Locally
 
33
  ```bash
34
  pip install -r requirements.txt
 
 
 
 
35
  streamlit run app.py
36
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code Analyzer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ [![Streamlit App](https://static.streamlit.io/badges/streamlit_badge_black_white.svg)](https://huggingface.co/spaces/YOUR_USERNAME/code-analyzer)
4
+
5
+ This tool analyzes code projects and generates descriptive summaries along with answers to specific questions about the codebase.
6
+
7
+ ## Features
8
+ - Analyzes Python, Java, and React code files
9
+ - Detects technology stack and dependencies
10
+ - Measures code complexity metrics
11
+ - Generates project summaries using transformer models
12
+ - Interactive Streamlit interface with visualizations
13
+ - Provides targeted answers to specific questions about the codebase
14
 
15
+ ## Demo
16
+ You can try the live demo on [Hugging Face Spaces](https://huggingface.co/spaces/YOUR_USERNAME/code-analyzer)
 
17
 
18
+ ## Local Setup
19
+ 1. Install dependencies:
20
  ```bash
21
  pip install -r requirements.txt
22
+ ```
23
+
24
+ 2. Run the Streamlit app:
25
+ ```bash
26
  streamlit run app.py
27
  ```
28
+
29
+ ## Usage
30
+ 1. Upload your code files (supported: .py, .java, .js, .jsx, .ts, .tsx)
31
+ 2. Enter your analysis questions (or use the default ones)
32
+ 3. Click "Analyze Code" to get insights about your project
33
+
34
+ ## Analysis Capabilities
35
+ - Technology stack detection (languages, frameworks, dependencies)
36
+ - Code metrics (lines of code, class/function count, complexity)
37
+ - Project objective identification from documentation
38
+ - Customizable question-answering system
39
+ - Interactive visualizations of code metrics
40
+
41
+ ## Requirements
42
+ - Python 3.8+
43
+ - 4GB+ RAM
44
+ - CUDA-capable GPU (optional, for faster processing)
WELCOME.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🔍 Code Project Analyzer
2
+
3
+ Welcome to the Code Project Analyzer! This tool helps you analyze your code projects using AI-powered insights.
4
+
5
+ ## 🚀 Features
6
+ - Analyze Python, Java, and React code files
7
+ - Detect technology stack and dependencies
8
+ - Measure code complexity metrics
9
+ - Generate project summaries
10
+ - Get answers to specific questions about your code
11
+
12
+ ## 📝 How to Use
13
+ 1. Upload your code files (supported: .py, .java, .js, .jsx, .ts, .tsx)
14
+ 2. Enter your analysis questions or use the default ones
15
+ 3. Click "Analyze Code" to get insights about your project
16
+
17
+ ## 🎯 Example Questions
18
+ - What is the project's abstract?
19
+ - What is the system architecture?
20
+ - What are the software requirements?
21
+ - What are the hardware requirements?
22
+
23
+ ## 🔄 Results
24
+ You'll get:
25
+ - Project objective and tech stack overview
26
+ - Interactive code metrics visualization
27
+ - Detailed answers to your questions
28
+ - Complexity assessment
29
+
30
+ Made with ❤️ using Streamlit and Hugging Face
app.py CHANGED
@@ -1,29 +1,136 @@
1
  import streamlit as st
2
- from utils.summarizer import summarize_code
3
- from utils.doc_generator import generate_document, generate_pptx
4
  import os
 
 
 
 
 
5
 
6
- st.title("📄 Code to Project Document Generator")
7
- st.write("Upload your code files and a headings.txt file.")
 
 
 
8
 
9
- uploaded_files = st.file_uploader("Upload code files (Python/React)", accept_multiple_files=True)
10
- headings_file = st.file_uploader("Upload headings.txt", type="txt")
11
 
12
- if st.button("Generate Document") and uploaded_files and headings_file:
13
- with open("headings.txt", "wb") as f:
14
- f.write(headings_file.read())
 
 
 
 
 
15
 
16
- code_dir = "uploaded_code"
17
- os.makedirs(code_dir, exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- for file in uploaded_files:
20
- with open(os.path.join(code_dir, file.name), "wb") as f:
21
- f.write(file.read())
 
 
 
 
 
 
22
 
23
- sections = summarize_code(code_dir, "headings.txt")
24
- generate_document(sections)
25
- generate_pptx(sections)
 
 
 
26
 
27
- st.success("Documents generated!")
28
- st.download_button("Download DOCX", data=open("project_report.docx", "rb"), file_name="project_report.docx")
29
- st.download_button("Download PPTX", data=open("project_presentation.pptx", "rb"), file_name="project_presentation.pptx")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
 
 
2
  import os
3
+ import tempfile
4
+ import shutil
5
+ from code_analyzer import CodeAnalyzer
6
+ import plotly.express as px
7
+ import pandas as pd
8
 
9
+ st.set_page_config(
10
+ page_title="Code Analyzer",
11
+ page_icon="🔍",
12
+ layout="wide"
13
+ )
14
 
15
+ st.title("🔍 Code Project Analyzer")
16
+ st.write("Upload your code files and analyze them with AI-powered insights")
17
 
18
+ def create_metrics_chart(metrics):
19
+ """Create a bar chart for code metrics"""
20
+ df = pd.DataFrame({
21
+ 'Metric': list(metrics.keys()),
22
+ 'Value': list(metrics.values())
23
+ })
24
+ fig = px.bar(df, x='Metric', y='Value', title='Code Metrics')
25
+ return fig
26
 
27
+ def display_tech_stack(tech_stack):
28
+ """Display technology stack in an organized way"""
29
+ st.subheader("🛠️ Technology Stack")
30
+ cols = st.columns(3)
31
+
32
+ with cols[0]:
33
+ st.write("**Languages**")
34
+ if tech_stack["languages"]:
35
+ for lang in tech_stack["languages"]:
36
+ st.write(f"- {lang}")
37
+ else:
38
+ st.write("No languages detected")
39
+
40
+ with cols[1]:
41
+ st.write("**Frameworks**")
42
+ if tech_stack["frameworks"]:
43
+ for framework in tech_stack["frameworks"]:
44
+ st.write(f"- {framework}")
45
+ else:
46
+ st.write("No frameworks detected")
47
+
48
+ with cols[2]:
49
+ st.write("**Dependencies**")
50
+ if tech_stack["dependencies"]:
51
+ for dep in tech_stack["dependencies"]:
52
+ st.write(f"- {dep}")
53
+ else:
54
+ st.write("No dependencies detected")
55
 
56
+ def save_uploaded_files(uploaded_files):
57
+ """Save uploaded files to a temporary directory"""
58
+ temp_dir = tempfile.mkdtemp()
59
+ for uploaded_file in uploaded_files:
60
+ file_path = os.path.join(temp_dir, uploaded_file.name)
61
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
62
+ with open(file_path, "wb") as f:
63
+ f.write(uploaded_file.getbuffer())
64
+ return temp_dir
65
 
66
+ # File upload section
67
+ uploaded_files = st.file_uploader(
68
+ "Upload your code files",
69
+ accept_multiple_files=True,
70
+ type=['py', 'java', 'js', 'jsx', 'ts', 'tsx']
71
+ )
72
 
73
+ # Questions input
74
+ st.subheader("📝 Analysis Questions")
75
+ default_questions = """What is the project's abstract?
76
+ What is the system architecture?
77
+ What are the software requirements?
78
+ What are the hardware requirements?"""
79
+
80
+ questions = st.text_area(
81
+ "Enter your questions (one per line)",
82
+ value=default_questions,
83
+ height=150
84
+ )
85
+
86
+ analyze_button = st.button("🔍 Analyze Code")
87
+
88
+ if analyze_button and uploaded_files:
89
+ with st.spinner("Analyzing your code..."):
90
+ # Save uploaded files
91
+ temp_dir = save_uploaded_files(uploaded_files)
92
+
93
+ # Save questions to a temporary file
94
+ questions_file = os.path.join(temp_dir, "questions.txt")
95
+ with open(questions_file, "w") as f:
96
+ f.write(questions)
97
+
98
+ try:
99
+ # Run analysis
100
+ analyzer = CodeAnalyzer()
101
+ results = analyzer.analyze_project(temp_dir, questions_file)
102
+
103
+ # Display results in tabs
104
+ tab1, tab2, tab3 = st.tabs(["📊 Overview", "💻 Code Metrics", "❓ Q&A"])
105
+
106
+ with tab1:
107
+ st.subheader("🎯 Project Objective")
108
+ st.write(results["objective"])
109
+
110
+ display_tech_stack(results["tech_stack"])
111
+
112
+ with tab2:
113
+ st.subheader("📊 Code Metrics")
114
+ metrics_chart = create_metrics_chart(results["metrics"])
115
+ st.plotly_chart(metrics_chart, use_container_width=True)
116
+
117
+ # Complexity assessment
118
+ complexity = "Low" if results["metrics"]["complexity_score"] < 10 else \
119
+ "Medium" if results["metrics"]["complexity_score"] < 30 else "High"
120
+ st.info(f"Project Complexity: {complexity}")
121
+
122
+ with tab3:
123
+ st.subheader("❓ Analysis Results")
124
+ for question, answer in results["answers"].items():
125
+ with st.expander(question):
126
+ st.write(answer)
127
+
128
+ except Exception as e:
129
+ st.error(f"An error occurred during analysis: {str(e)}")
130
+
131
+ finally:
132
+ # Cleanup
133
+ shutil.rmtree(temp_dir)
134
+ else:
135
+ if analyze_button:
136
+ st.warning("Please upload some code files first!")
code_analyzer.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import os
3
+ import glob
4
+ import ast
5
+ import re
6
+ from typing import List, Dict, Set, Any
7
+ import pkg_resources
8
+ import importlib.util
9
+ from collections import defaultdict
10
+
11
+ class CodeAnalyzer:
12
+ def __init__(self):
13
+ # Using different models for different types of analysis
14
+ self.summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
15
+
16
+ def detect_technologies(self, code_files: Dict[str, str]) -> Dict[str, Any]:
17
+ """Detect technologies used in the project"""
18
+ tech_stack = {
19
+ "languages": set(),
20
+ "frameworks": set(),
21
+ "dependencies": set()
22
+ }
23
+
24
+ # Detect languages
25
+ extensions_map = {
26
+ '.py': 'Python',
27
+ '.js': 'JavaScript',
28
+ '.jsx': 'React/JavaScript',
29
+ '.ts': 'TypeScript',
30
+ '.tsx': 'React/TypeScript',
31
+ '.java': 'Java'
32
+ }
33
+
34
+ for file_path in code_files.keys():
35
+ ext = os.path.splitext(file_path)[1]
36
+ if ext in extensions_map:
37
+ tech_stack["languages"].add(extensions_map[ext])
38
+
39
+ # Analyze Python dependencies
40
+ for file_path, content in code_files.items():
41
+ if file_path.endswith('.py'):
42
+ try:
43
+ tree = ast.parse(content)
44
+ for node in ast.walk(tree):
45
+ if isinstance(node, ast.Import):
46
+ for name in node.names:
47
+ tech_stack["dependencies"].add(name.name.split('.')[0])
48
+ elif isinstance(node, ast.ImportFrom):
49
+ if node.module:
50
+ tech_stack["dependencies"].add(node.module.split('.')[0])
51
+ except:
52
+ continue
53
+
54
+ # Check if common frameworks are used
55
+ framework_indicators = {
56
+ 'django': 'Django',
57
+ 'flask': 'Flask',
58
+ 'fastapi': 'FastAPI',
59
+ 'react': 'React',
60
+ 'angular': 'Angular',
61
+ 'vue': 'Vue.js',
62
+ 'spring': 'Spring',
63
+ 'tensorflow': 'TensorFlow',
64
+ 'torch': 'PyTorch',
65
+ 'pandas': 'Pandas',
66
+ 'numpy': 'NumPy'
67
+ }
68
+
69
+ for dep in tech_stack["dependencies"]:
70
+ if dep.lower() in framework_indicators:
71
+ tech_stack["frameworks"].add(framework_indicators[dep.lower()])
72
+
73
+ return {k: list(v) for k, v in tech_stack.items()}
74
+
75
+ def analyze_code_complexity(self, code_files: Dict[str, str]) -> Dict[str, Any]:
76
+ """Analyze code complexity metrics"""
77
+ metrics = {
78
+ "total_lines": 0,
79
+ "code_lines": 0,
80
+ "class_count": 0,
81
+ "function_count": 0,
82
+ "complexity_score": 0
83
+ }
84
+
85
+ for file_path, content in code_files.items():
86
+ if file_path.endswith('.py'):
87
+ try:
88
+ tree = ast.parse(content)
89
+ metrics["class_count"] += sum(1 for node in ast.walk(tree) if isinstance(node, ast.ClassDef))
90
+ metrics["function_count"] += sum(1 for node in ast.walk(tree) if isinstance(node, ast.FunctionDef))
91
+
92
+ lines = content.split('\n')
93
+ metrics["total_lines"] += len(lines)
94
+ metrics["code_lines"] += sum(1 for line in lines if line.strip() and not line.strip().startswith('#'))
95
+
96
+ # Simple complexity score based on nesting depth and branches
97
+ complexity = 0
98
+ for node in ast.walk(tree):
99
+ if isinstance(node, (ast.If, ast.For, ast.While, ast.Try)):
100
+ complexity += 1
101
+ metrics["complexity_score"] += complexity
102
+ except:
103
+ continue
104
+
105
+ return metrics
106
+
107
+ def identify_objective(self, code_files: Dict[str, str]) -> str:
108
+ """Identify the main objective of the project"""
109
+ # Combine all Python docstrings and comments
110
+ all_docs = []
111
+ for file_path, content in code_files.items():
112
+ if file_path.endswith('.py'):
113
+ try:
114
+ tree = ast.parse(content)
115
+ for node in ast.walk(tree):
116
+ if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.Module)):
117
+ if ast.get_docstring(node):
118
+ all_docs.append(ast.get_docstring(node))
119
+ except:
120
+ continue
121
+
122
+ combined_docs = " ".join(all_docs)
123
+ if combined_docs:
124
+ return self.summarizer(combined_docs, max_length=100, min_length=30, do_sample=False)[0]['summary_text']
125
+ return "Unable to determine project objective from available documentation"
126
+
127
+ def read_code_files(self, directory: str) -> Dict[str, str]:
128
+ """Read all code files from the given directory"""
129
+ code_files = {}
130
+ extensions = ['.py', '.java', '.jsx', '.js', '.ts', '.tsx']
131
+
132
+ for ext in extensions:
133
+ for file_path in glob.glob(f"{directory}/**/*{ext}", recursive=True):
134
+ try:
135
+ with open(file_path, 'r', encoding='utf-8') as f:
136
+ code_files[file_path] = f.read()
137
+ except Exception as e:
138
+ print(f"Error reading {file_path}: {e}")
139
+
140
+ return code_files
141
+
142
+ def generate_summary(self, code: str, context: str = "") -> str:
143
+ """Generate a summary for the given code with optional context"""
144
+ if not code.strip():
145
+ return "No code provided"
146
+
147
+ # Truncate input if too long
148
+ code = code[:4000]
149
+ prompt = f"{context}\n{code}" if context else code
150
+
151
+ summary = self.summarizer(prompt, max_length=150, min_length=40, do_sample=False)[0]['summary_text']
152
+ return summary
153
+
154
+ def analyze_project(self, project_dir: str, questions_file: str) -> Dict[str, Any]:
155
+ """Analyze project and answer questions"""
156
+ # Read code files
157
+ code_files = self.read_code_files(project_dir)
158
+
159
+ if not code_files:
160
+ return {
161
+ "project_summary": "No code files found",
162
+ "tech_stack": {},
163
+ "metrics": {},
164
+ "objective": "No code files to analyze",
165
+ "answers": {}
166
+ }
167
+
168
+ # Perform various analyses
169
+ tech_stack = self.detect_technologies(code_files)
170
+ metrics = self.analyze_code_complexity(code_files)
171
+ objective = self.identify_objective(code_files)
172
+
173
+ # Generate overall summary
174
+ combined_code = "\n\n".join(code_files.values())
175
+ summary = self.generate_summary(combined_code)
176
+
177
+ # Read questions
178
+ with open(questions_file, 'r') as f:
179
+ questions = [line.strip() for line in f.readlines() if line.strip()]
180
+
181
+ # Generate targeted answers based on analysis results
182
+ answers = {}
183
+ for question in questions:
184
+ question_lower = question.lower()
185
+ if 'abstract' in question_lower:
186
+ answers[question] = objective
187
+ elif 'architecture' in question_lower:
188
+ arch_summary = f"Project Architecture:\n- Languages: {', '.join(tech_stack['languages'])}\n"
189
+ if tech_stack['frameworks']:
190
+ arch_summary += f"- Frameworks: {', '.join(tech_stack['frameworks'])}\n"
191
+ arch_summary += f"- Components: {metrics['class_count']} classes, {metrics['function_count']} functions"
192
+ answers[question] = arch_summary
193
+ elif 'software' in question_lower and 'requirement' in question_lower:
194
+ deps = tech_stack['dependencies']
195
+ frameworks = tech_stack['frameworks']
196
+ req_list = list(set(deps) | set(frameworks))
197
+ answers[question] = f"Software Requirements:\n- Python environment\n- Dependencies: {', '.join(req_list)}"
198
+ elif 'hardware' in question_lower and 'requirement' in question_lower:
199
+ complexity = "Low" if metrics['complexity_score'] < 10 else "Medium" if metrics['complexity_score'] < 30 else "High"
200
+ answers[question] = f"Hardware Requirements:\n- Complexity: {complexity}\n- Minimum RAM: {2 if complexity == 'Low' else 4 if complexity == 'Medium' else 8}GB\n- CPU: {1 if complexity == 'Low' else 2 if complexity == 'Medium' else 4}+ cores recommended"
201
+ else:
202
+ # For other questions, generate a contextual summary
203
+ answers[question] = self.generate_summary(combined_code, f"Context: {question}")
204
+
205
+ return {
206
+ "project_summary": summary,
207
+ "tech_stack": tech_stack,
208
+ "metrics": metrics,
209
+ "objective": objective,
210
+ "answers": answers
211
+ }
212
+
213
+ if __name__ == "__main__":
214
+ analyzer = CodeAnalyzer()
215
+ # Example usage
216
+ results = analyzer.analyze_project(
217
+ "./example_project",
218
+ "./questions.txt"
219
+ )
220
+ print("\nProject Objective:", results["objective"])
221
+ print("\nTechnology Stack:")
222
+ for category, items in results["tech_stack"].items():
223
+ print(f"- {category.title()}: {', '.join(items)}")
224
+
225
+ print("\nCode Metrics:")
226
+ for metric, value in results["metrics"].items():
227
+ print(f"- {metric.replace('_', ' ').title()}: {value}")
228
+
229
+ print("\nAnswers to Questions:")
230
+ for q, a in results["answers"].items():
231
+ print(f"\n{q}:\n{a}")
example_project/calculator.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class Calculator:
2
+ """A simple calculator implementation with basic arithmetic operations."""
3
+
4
+ def add(self, x: float, y: float) -> float:
5
+ """Add two numbers."""
6
+ return x + y
7
+
8
+ def subtract(self, x: float, y: float) -> float:
9
+ """Subtract y from x."""
10
+ return x - y
11
+
12
+ def multiply(self, x: float, y: float) -> float:
13
+ """Multiply two numbers."""
14
+ return x * y
15
+
16
+ def divide(self, x: float, y: float) -> float:
17
+ """Divide x by y."""
18
+ if y == 0:
19
+ raise ValueError("Cannot divide by zero")
20
+ return x / y
headings.txt DELETED
@@ -1,27 +0,0 @@
1
- ABSTRACT
2
- Introduction
3
- Python
4
- Machine Learning
5
- Deep Learning
6
- Image Classification
7
- Working of Image Classification
8
- Applications of Image Classification
9
- Architecture
10
- Data Flow Diagram
11
- Image Classification Techniques
12
- Aim of the Project
13
- Scope
14
- System Requirements
15
- Hardware Requirements
16
- Software Requirements
17
- Setup Instructions
18
- Algorithms
19
- Performance Evaluation
20
- Comparison of Base Line Models
21
- Error Analysis
22
- Methodology
23
- Results
24
- Discussion
25
- CONCLUSION
26
- REFERENCES
27
- FUTURE WORK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-dev
project_presentation.pptx DELETED
Binary file (51.7 kB)
 
project_report.docx DELETED
Binary file (37 kB)
 
questions.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ What is the project's abstract?
2
+ What is the system architecture?
3
+ What are the software requirements?
4
+ What are the hardware requirements?
requirements.txt CHANGED
@@ -1,8 +1,8 @@
1
- streamlit
2
- transformers
3
- sentencepiece
4
- python-docx
5
- python-pptx
6
- plantuml
7
- huggingface-hub
8
- torch
 
1
+ transformers[torch]==4.35.0
2
+ --extra-index-url https://download.pytorch.org/whl/cpu
3
+ torch>=2.0.0
4
+ numpy>=1.24.0
5
+ pandas>=2.0.0
6
+ streamlit>=1.30.0
7
+ plotly>=5.18.0
8
+ altair>=5.2.0
sample_code/sample.py DELETED
@@ -1,3 +0,0 @@
1
- # Sample Python file
2
- def greet(name):
3
- return f"Hello, {name}!"
 
 
 
 
uploaded_code/sample.py DELETED
@@ -1,3 +0,0 @@
1
- # Sample Python file
2
- def greet(name):
3
- return f"Hello, {name}!"
 
 
 
 
utils/__pycache__/doc_generator.cpython-311.pyc DELETED
Binary file (1.64 kB)
 
utils/__pycache__/summarizer.cpython-311.pyc DELETED
Binary file (1.94 kB)
 
utils/doc_generator.py DELETED
@@ -1,19 +0,0 @@
1
- from docx import Document
2
- from pptx import Presentation
3
- from pptx.util import Inches
4
-
5
- def generate_document(sections):
6
- doc = Document()
7
- doc.add_heading("Project Report", 0)
8
- for title, content in sections.items():
9
- doc.add_heading(title, level=1)
10
- doc.add_paragraph(content)
11
- doc.save("project_report.docx")
12
-
13
- def generate_pptx(sections):
14
- prs = Presentation()
15
- for title, content in sections.items():
16
- slide = prs.slides.add_slide(prs.slide_layouts[1])
17
- slide.shapes.title.text = title
18
- slide.placeholders[1].text = content
19
- prs.save("project_presentation.pptx")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils/summarizer.py DELETED
@@ -1,20 +0,0 @@
1
- from transformers import pipeline
2
- import os
3
- summarizer = pipeline("summarization", model="t5-small")
4
-
5
- def summarize_code(code_dir, headings_path):
6
- sections = {}
7
- with open(headings_path, "r") as hfile:
8
- headings = [line.strip() for line in hfile if line.strip()]
9
-
10
- for heading in headings:
11
- combined_code = ""
12
- for root, _, files in os.walk(code_dir):
13
- for file in files:
14
- with open(os.path.join(root, file), "r", encoding="utf-8", errors="ignore") as f:
15
- combined_code += f.read() + "\n"
16
-
17
- summary = summarizer(combined_code[:1000], max_length=120, min_length=30, do_sample=False)[0]["summary_text"]
18
- sections[heading] = summary
19
-
20
- return sections