Spaces:
Sleeping
Sleeping
# Import necessary packages | |
from ibm_watsonx_ai import Credentials | |
from ibm_watsonx_ai import APIClient | |
from ibm_watsonx_ai.foundation_models import Model, ModelInference | |
from ibm_watsonx_ai.foundation_models.schema import TextChatParameters | |
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames | |
import gradio as gr | |
# Model and project settings | |
model_id = "meta-llama/llama-3-2-11b-vision-instruct" # Directly specifying the LLAMA3 model | |
watsonx_API="L0sx3BXcQRWNmz45mbBLxL1UiZGnftHFQTwITAci-523" | |
project_id="ed8f7a2c-e597-4a09-a98f-dbdcef57a0d0" | |
# Set credentials to use the model | |
credentials = { | |
"url" : "https://au-syd.ml.cloud.ibm.com", | |
"apikey": watsonx_API | |
} | |
# Generation parameters | |
params = TextChatParameters( | |
temperature=0.7, | |
max_tokens=512 | |
) | |
# Initialize the model | |
model = ModelInference( | |
model_id=model_id, | |
credentials=credentials, | |
project_id=project_id, | |
params=params | |
) | |
# Function to polish the resume using the model, making polish_prompt optional | |
def polish_resume(position_name, resume_content, polish_prompt=""): | |
# Check if polish_prompt is provided and adjust the combined_prompt accordingly | |
if polish_prompt and polish_prompt.strip(): | |
prompt_use = f"Given the resume content: '{resume_content}', polish it based on the following instructions: {polish_prompt} for the {position_name} position." | |
else: | |
prompt_use = f"Suggest improvements for the following resume content: '{resume_content}' to better align with the requirements and expectations of a {position_name} position. Return the polished version, highlighting necessary adjustments for clarity, relevance, and impact in relation to the targeted role." | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": prompt_use | |
}, | |
] | |
} | |
] | |
# Generate a response using the model with parameters | |
generated_response = model.chat(messages=messages) | |
# Extract and return the generated text | |
generated_text = generated_response['choices'][0]['message']['content'] | |
return generated_text | |
# Create Gradio interface for the resume polish application, marking polish_prompt as optional | |
resume_polish_application = gr.Interface( | |
fn=polish_resume, | |
flagging_mode="never", # Deactivate the flag function in gradio as it is not needed. | |
inputs=[ | |
gr.Textbox(label="Position Name", placeholder="Enter the name of the position..."), | |
gr.Textbox(label="Resume Content", placeholder="Paste your resume content here...", lines=20), | |
gr.Textbox(label="Polish Instruction (Optional)", placeholder="Enter specific instructions or areas for improvement (optional)...", lines=2), | |
], | |
outputs=gr.Textbox(label="Polished Content"), | |
title="Resume Polish Application", | |
description="This application helps you polish your resume. Enter the position your want to apply, your resume content, and specific instructions or areas for improvement (optional), then get a polished version of your content." | |
) | |
# Launch the application | |
resume_polish_application.launch(share=True) |