Spaces:
Sleeping
Sleeping
File size: 4,988 Bytes
05d3571 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import argparse
import sys
import typing as tp
import unicodedata
import xxhash
from sacremoses import MosesPunctNormalizer
from .demojizer import Demojizer, legacy_demojizer
from .remove_non_printing_char import \
get_replacer as non_printing_char_replacer
from .sentence_split import get_split_algo
demojizer = Demojizer()
class SentenceSplitClean:
def __init__(self, splitter_lang: str, split_algo: str):
# setup sentence splitter
self.splitter = get_split_algo(splitter_lang, split_algo=split_algo)
# setup "moses" normalization
self.mpn = MosesPunctNormalizer(lang="en", perl_parity=True) # TODO
self.replace_nonprint = non_printing_char_replacer(" ")
def __call__(self, line):
sentence_splits = self.splitter(line)
line_hash = xxhash.xxh3_64_intdigest(line)
for sent in sentence_splits:
# normalize -- moses equivalent
clean = self.mpn.normalize(sent)
clean = self.replace_nonprint(clean)
# replace ππ―ππ«π π’π°π π by Francesca
clean = unicodedata.normalize("NFKC", clean)
yield (line_hash, sent, clean)
def remove_on_unicode_category(x: str) -> str:
return "".join(filter(lambda ch: not unicodedata.category(ch) in {"So"}, x))
def get_replacer_unicode_category(
skip_min: int, max_num: int, replace_by: str = " "
) -> str:
def replace_by_unicode_category(x: str) -> str:
total_counter = 0
skip_counter = 0
def flt(ch):
nonlocal total_counter
nonlocal skip_counter
if max_num == 0 or total_counter < max_num:
if unicodedata.category(ch) in {"So"}:
if skip_counter < skip_min:
skip_counter += 1
return ch
total_counter += 1
return replace_by
return ch
return "".join(map(flt, x))
return replace_by_unicode_category
# to map with previous versions of the pipeline
def get_sentence_candidate_modifiers() -> tp.List[tp.Callable]:
return [
lambda x: x,
lambda x: x + " ",
lambda x: " " + x,
lambda x: " " + x + " ",
lambda x: " " + x,
lambda x: x.rstrip(),
lambda x: x.lstrip(),
lambda x: " " + x.rstrip(),
lambda x: x.strip(),
lambda x: demojizer(x, ""),
lambda x: demojizer(x, "").strip(),
lambda x: " " + demojizer(x, ""),
legacy_demojizer,
remove_on_unicode_category,
get_replacer_unicode_category(1, 1),
get_replacer_unicode_category(0, 0),
]
def reach_sentence_from_paragraph(
paragraph: str,
expected_paragraph_digest: int,
expected_sentence_digest: int,
lang: str,
sentence_splitters: tp.Dict[str, "SentenceSplitClean"],
debug_candidates: bool,
):
if lang not in sentence_splitters:
sentence_splitters[lang] = SentenceSplitClean(lang, "default")
def no_splitter(paragraph):
line_h = xxhash.xxh3_64_intdigest(paragraph)
return [(line_h, paragraph, paragraph)]
sentence_splitter = sentence_splitters[lang]
splitter_candidates = [sentence_splitter, no_splitter]
for duct_candidate in get_sentence_candidate_modifiers():
for split_cand in splitter_candidates:
for line_hash, sent, clean in split_cand(paragraph):
assert line_hash == expected_paragraph_digest
clean_cand = duct_candidate(clean)
reached_sentence_digest = xxhash.xxh3_64_intdigest(clean_cand)
if debug_candidates:
print(f"{reached_sentence_digest}::\t::{clean_cand}::")
if reached_sentence_digest == expected_sentence_digest:
return clean_cand
return None
def split_clean():
split_algo = "default"
sentence_splitters = {}
for line in sys.stdin:
line_stripped = line.rstrip("\n")
metadata, paragraph = line_stripped.split("\t")
(
_,
_,
_,
_,
paragraph_digest,
sentence_digest,
_,
_,
_,
lang,
_,
) = metadata.split()
paragraph_digest = int(paragraph_digest)
sentence_digest = int(sentence_digest)
sentence = reach_sentence_from_paragraph(
paragraph,
paragraph_digest,
sentence_digest,
lang,
sentence_splitters,
False,
)
if sentence is not None:
print(f"{line_stripped}\t{sentence}")
else:
print(
f"Couldn't match sentence for paragraph: {paragraph_digest} sentence: {sentence_digest} lang: {lang}",
file=sys.stderr,
)
def main():
split_clean()
if __name__ == "__main__":
main()
|