Spaces:
Running
Running
File size: 6,460 Bytes
11aa4b5 6803b94 11aa4b5 6803b94 11aa4b5 6803b94 11aa4b5 7f98838 11aa4b5 7f98838 11aa4b5 7f98838 11aa4b5 7f98838 11aa4b5 6803b94 11aa4b5 6803b94 11aa4b5 6803b94 11aa4b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
import gradio as gr
import javalang
from typing import Dict, List, Tuple
class JavaPOOEvaluator:
"""Avaliador para Programação Orientada a Objetos"""
def __init__(self):
self.rubric = {
"classes": 20,
"objetos": 20,
"metodos": 20,
"atributos": 20,
"encapsulamento": 10,
"heranca": 10,
"polimorfismo": 10,
"abstracao": 10,
}
def analyze_classes_and_objects(self, code: str) -> Tuple[float, List[str]]:
"""Avalia a definição e o uso de classes e objetos"""
score = 0
feedback = []
try:
tree = javalang.parse.parse(code)
classes = list(tree.filter(javalang.tree.ClassDeclaration))
if classes:
score += 15
feedback.append(f"✓ {len(classes)} classe(s) declarada(s) corretamente.")
else:
feedback.append("⚠ Nenhuma classe declarada encontrada.")
objects = [node for _, node in tree.filter(javalang.tree.VariableDeclarator) if 'new' in str(node.initializer)]
if objects:
score += 5
feedback.append(f"✓ {len(objects)} objeto(s) criado(s) corretamente.")
else:
feedback.append("⚠ Nenhuma instância de objeto criada.")
except Exception as e:
feedback.append(f"⚠ Erro ao analisar classes e objetos: {e}")
return score, feedback
def analyze_methods(self, code: str) -> Tuple[float, List[str]]:
"""Avalia a definição e organização de métodos"""
score = 0
feedback = []
try:
tree = javalang.parse.parse(code)
methods = list(tree.filter(javalang.tree.MethodDeclaration))
if methods:
score += 15
feedback.append(f"✓ {len(methods)} método(s) declarado(s) corretamente.")
else:
feedback.append("⚠ Nenhum método declarado encontrado.")
except Exception as e:
feedback.append(f"⚠ Erro ao analisar métodos: {e}")
return score, feedback
def analyze_inheritance(self, code: str) -> Tuple[float, List[str]]:
"""Avalia o uso de herança"""
score = 0
feedback = []
try:
tree = javalang.parse.parse(code)
subclasses = [node for _, node in tree.filter(javalang.tree.ClassDeclaration) if node.extends]
if subclasses:
score += 10
feedback.append(f"✓ {len(subclasses)} classe(s) estendendo outra(s).")
else:
feedback.append("⚠ Nenhum uso de herança encontrado.")
except Exception as e:
feedback.append(f"⚠ Erro ao analisar herança: {e}")
return score, feedback
def analyze_encapsulation(self, code: str) -> Tuple[float, List[str]]:
"""Avalia o encapsulamento"""
score = 0
feedback = []
try:
tree = javalang.parse.parse(code)
fields = list(tree.filter(javalang.tree.FieldDeclaration))
private_count = sum(1 for _, node in fields if 'private' in node.modifiers)
if private_count:
score += 5
feedback.append(f"✓ {private_count} atributo(s) encapsulado(s) com 'private'.")
else:
feedback.append("⚠ Nenhum atributo privado encontrado.")
except Exception as e:
feedback.append(f"⚠ Erro ao analisar encapsulamento: {e}")
return score, feedback
def evaluate_code(self, code: str) -> Dict:
"""Avalia o código Java"""
cls_score, cls_feedback = self.analyze_classes_and_objects(code)
meth_score, meth_feedback = self.analyze_methods(code)
enc_score, enc_feedback = self.analyze_encapsulation(code)
inh_score, inh_feedback = self.analyze_inheritance(code)
total_score = cls_score + meth_score + enc_score + inh_score
proficiency = "Necessita Melhorias"
if total_score >= 90:
proficiency = "Excelente"
elif total_score >= 75:
proficiency = "Bom"
elif total_score >= 60:
proficiency = "Satisfatório"
return {
"total_score": total_score,
"proficiency": proficiency,
"feedback": {
"classes_objetos": cls_feedback,
"metodos": meth_feedback,
"encapsulamento": enc_feedback,
"heranca": inh_feedback,
}
}
# Interface Gradio
with gr.Blocks(title="Avaliador de POO em Java", css="#rubric_image img {max-width: 50%; height: auto; margin: 0 auto;}") as demo:
gr.Markdown("# Avaliador de POO em Java")
gr.Markdown("### [Visualizar a Rubrica em PDF](rubric.pdf)")
gr.Image("rubric_table.png", label="Tabela Resumida da Rubrica", elem_id="rubric_image")
upload = gr.File(label="Carregue arquivos Java para avaliação", file_types=[".java"], file_count="multiple")
evaluate_button = gr.Button("Avaliar Código")
output = gr.Textbox(label="Resultado da Avaliação", lines=25)
def evaluate_code_files(files) -> str:
"""Função para avaliar múltiplos arquivos Java"""
evaluator = JavaPOOEvaluator()
results = []
for file in files:
try:
with open(file.name, 'r', encoding='utf-8') as f:
code = f.read()
evaluation = evaluator.evaluate_code(code)
result = f"\nArquivo: {file.name}\n"
result += f"Pontuação Total: {evaluation['total_score']:.1f}/100\n"
result += f"Nível: {evaluation['proficiency']}\n\n"
result += "Feedback Detalhado:\n"
for category, comments in evaluation['feedback'].items():
result += f"\n{category.replace('_', ' ').title()}:\n"
for comment in comments:
result += f" {comment}\n"
results.append(result)
except Exception as e:
results.append(f"Erro ao processar o arquivo {file.name}: {e}")
return "\n".join(results)
evaluate_button.click(fn=evaluate_code_files, inputs=upload, outputs=output)
if __name__ == "__main__":
demo.launch(share=True)
|