import io import base64 import streamlit as st import os from together import Together from PIL import Image import requests from io import BytesIO import json from streamlit_lottie import st_lottie from streamlit_option_menu import option_menu import time # Set page config st.set_page_config( page_title="TeleGuide | AI Telecom Assistant", page_icon="🛰️", layout="wide", initial_sidebar_state="expanded" ) # Load Lottie animation def load_lottie(url: str): try: r = requests.get(url) if r.status_code != 200: return None return r.json() except: return None # Custom CSS with animations and improved styling st.markdown(""" """, unsafe_allow_html=True) # Initialize the Together client with error handling @st.cache_resource def get_together_client(): try: return Together(api_key=st.secrets["api_key"]) except Exception as e: st.error(f"Error initializing API client: {str(e)}") return None client = get_together_client() # Load animations lottie_telecom = load_lottie("https://assets4.lottiefiles.com/packages/lf20_qz3tpn4w.json") lottie_analysis = load_lottie("https://assets4.lottiefiles.com/packages/lf20_xh83pj1k.json") def process_text_query(query, model="meta-llama/Llama-3.2-3B-Instruct-Turbo"): try: with st.spinner("Processing your query..."): response = client.chat.completions.create( model=model, messages=[ {"role": "system", "content": "You are TeleGuide, an expert AI assistant specialized in telecommunication tasks. Provide detailed, practical, and accurate information."}, {"role": "user", "content": query} ], max_tokens=500, temperature=0.7 ) return response.choices[0].message.content except Exception as e: st.error(f"Error processing query: {str(e)}") return None def process_image_query(image_base64, query, model="meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo"): try: with st.spinner("Analyzing image..."): system_message = "You are TeleGuide, an expert AI assistant in telecommunications infrastructure analysis." response = client.chat.completions.create( model=model, messages=[ {"role": "system", "content": system_message}, { "role": "user", "content": [ {"type": "text", "text": query}, {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}} ] } ], max_tokens=500, temperature=0.7 ) return response.choices[0].message.content except Exception as e: st.error(f"Error analyzing image: {str(e)}") return None def image_to_base64(image): try: buffered = io.BytesIO() image.save(buffered, format="JPEG") return base64.b64encode(buffered.getvalue()).decode('utf-8') except Exception as e: st.error(f"Error converting image: {str(e)}") return None # Sidebar with st.sidebar: st.title("🛰️ TeleGuide") st_lottie(lottie_telecom, height=200) st.markdown("---") st.info("Your AI-powered telecommunication assistant, providing expert analysis and insights.") st.markdown("### Features") st.markdown(""" - 📝 Text Analysis - 📄 Document Processing - 🖼️ Image Analysis - 📡 Infrastructure Planning """) st.markdown("---") st.markdown("#### Powered by Advanced AI") st.caption("Using Llama 3.2 Models") # Main content st.markdown('
TeleGuide - Your AI-Powered Telecommunications Assistant
Made with ❤️ for telecom professionals