pmkhanh7890's picture
1st version of demo
1ce1659
raw
history blame
579 Bytes
from nltk.tokenize import sent_tokenize
def split_into_sentences(input_text):
"""
Splits input text into sentences by newlines.
Args:
input_text: The input text as a string.
Returns:
A list of sentences. Returns an empty list if input is not valid.
"""
if not isinstance(input_text, str):
return []
paragraphs = input_text.splitlines()
sentences = []
for paragraph in paragraphs:
paragraph = paragraph.strip()
if paragraph:
sentences.extend(sent_tokenize(paragraph))
return sentences