def extract_context_words(text, high_information_words): words = nltk.word_tokenize(text) context_words = [] for index, word in enumerate(words): if word.lower() in high_information_words: before_word = words[index - 1] if index > 0 else None after_word = words[index + 1] if index < len(words) - 1 else None context_words.append((before_word, word, after_word)) return context_words def create_context_graph(context_words): graph = Digraph() for index, (before_word, high_info_word, after_word) in enumerate(context_words): graph.node(f'before{index}', before_word, shape='box') if before_word else None graph.node(f'high{index}', high_info_word, shape='ellipse') graph.node(f'after{index}', after_word, shape='diamond') if after_word else None if before_word: graph.edge(f'before{index}', f'high{index}') if after_word: graph.edge(f'high{index}', f'after{index}') return graph def display_context_graph(context_words): graph = create_context_graph(context_words) st.graphviz_chart(graph) def display_context_table(context_words): table = "| Before | High Info Word | After |\n|--------|----------------|-------|\n" for before, high, after in context_words: table += f"| {before if before else ''} | {high} | {after if after else ''} |\n" st.markdown(table) # ... if uploaded_file: file_text = uploaded_file.read().decode("utf-8") text_without_timestamps = remove_timestamps(file_text) top_words = extract_high_information_words(text_without_timestamps, 10) st.markdown("**Top 10 High Information Words:**") st.write(top_words) context_words = extract_context_words(text_without_timestamps, top_words) st.markdown("**Relationship Graph:**") display_relationship_graph(top_words) st.markdown("**Context Graph:**") display_context_graph(context_words) st.markdown("**Context Table:**") display_context_table(context_words)