forked from gmossessian/spark-n-spell
-
Notifications
You must be signed in to change notification settings - Fork 0
/
contextSPARKfull.py
1180 lines (1013 loc) · 46 KB
/
contextSPARKfull.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# contextSPARKfull.py - SPARK implementation, full parallelization
######################
#
# To run, execute spark-submit contextSPARKfull.py at the prompt.
#
# The default files are:
# - dictionary: testdata/big.txt
# - document to check: yelp100reviews.txt
#
# These files can be over-ridden by adding -d 'dictionary.txt'
# and -c 'checkfile.txt' when executing the code.
#
# Suggested corrections are logged to spell-log.txt in the
# working directory.
#
# WARNING: This code was developed on a Python 2.7 and spark-1.5.0
# build and may not run as expected on other configurations.
#
######################
import re
import math
from scipy.stats import poisson
import time
import sys, getopt
import os
# Initialize Spark
from pyspark import SparkContext
sc = SparkContext()
sc.setLogLevel('ERROR')
######################
#
# Submission by Gioia Dominedo (Harvard ID: 40966234) for
# CS 205 - Computing Foundations for Computational Science
#
# This is part of a joint project with Kendrick Lo that includes a
# separate component for word-level checking. This script includes
# one of three SPARK implementations for context-level spell-checking
# adapted from third party algorithms (Symspell and Viterbi algorithms).
#
# The following were also used as references:
# Peter Norvig, How to Write a Spelling Corrector
# (http://norvig.com/spell-correct.html)
# Peter Norvig, Natural Language Corpus Data: Beautiful Data
# (http://norvig.com/ngrams/ch14.pdf)
#
######################
######################
#
# SUMMARY OF CONTEXT-LEVEL CORRECTION LOGIC - VITERBI ALGORITHM
#
# v 1.0 last revised 6 Dec 2015
#
# Each sentence is modeled as a hidden Markov model. Prior
# probabilities (for first words in the sentences) and transition
# probabilities (for all subsequent words) are calculated when
# generating the main dictionary, using the same corpus. Emission
# probabilities are generated on the fly by parameterizing a Poisson
# distribution with the edit distance between words and suggested
# corrections.
#
# The state space of possible corrections for each word is generated
# using logic based on the Symspell spell-checker (see below for more
# detail on Symspell). Valid suggestions must: (a) be 'real' words;
# (b) appear at least 100 times in the corpus used to generate the
# dictionary; (c) be one of the top 10 suggestions, based on frequency
# and edit distance. This simplification ensures that the state space
# remains manageable.
#
# All probabilities are stored in log-space to avoid underflow. Pre-
# defined minimum values are used for words that are not present in
# the dictionary and/or probability tables.
#
# The pre-processing steps are the same across all three SPARK
# implementations.
#
# More detail on the specific implementation is included below.
#
######################
######################
#
# SPARK IMPLEMENTATION DETAILS - FULL PARALLELIZATION
#
# This is the third attempt at parallelizing the Viterbi algorithm.
#
# Unlike previous implementations that relied heavily on helper
# functions to assess individual sentences, in this version we
# implement all the steps of the Viterbi algorithm in SPARK.
#
# We break each sentence to be corrected into its word components,
# and then loop through each word position exactly as in the
# original serial algorithm.
#
# Because different sentences in the document may have different
# lengths but are all being processed in parallel, we check at
# each iteration for any sentences that are "done" and store
# their results for later use. Once the maximum sentence length is
# reached, the results for all of the sentences are processed to
# extract and display/print suggested corrections.
#
# This approach takes advantage of parallelization by splitting
# all the words at a given position among the workers.
#
######################
######################
#
# PRE-PROCESSING STEPS
#
# The pre-processing steps have been adapted from the dictionary
# creation of the word-level spellchecker, which in turn was based on
# SymSpell, a Symmetric Delete spelling correction algorithm
# developed by Wolf Garbe and originally written in C#. More detail
# on SymSpell is included in the word-level spellcheck documentation.
#
# The main modifications to the word-level spellchecker pre-
# processing stages are to create the additional outputs that are
# required for the context-level checking, and to eliminate redundant
# outputs that are not necessary.
#
# The outputs of the pre-processing stage are:
#
# - dictionary: A dictionary that combines both words present in the
# corpus and other words that are within a given 'delete distance'.
# The format of the dictionary is:
# {word: ([list of words within the given 'delete distance'],
# word count in corpus)}
#
# - start_prob: A dictionary with key, value pairs that correspond to
# (word, probability of the word being the first word in a sentence)
#
# - transition_prob: A dictionary of dictionaries that stores the
# probability of a given word following another. The format of the
# dictionary is:
# {previous word: {word1 : P(word1|prevous word), word2 :
# P(word2|prevous word), ...}}
#
# - default_start_prob: A benchmark probability of a word being at
# the start of a sentence, set to 1 / # of words at the beginning of
# sentences. This ensures that all previously unseen words at the
# beginning of sentences are not corrected unnecessarily.
#
# - default_transition_prob: A benchmark probability of a word being
# seen, given the previous word in the sentence, also set to 1 / # of
# transitions in corpus. This ensures that all previously unseen
# transitions are not corrected unnecessarily.
#
######################
def get_deletes_list(w, max_edit_distance):
'''
Given a word, derive strings with up to max_edit_distance
characters deleted.
The list is generally of the same magnitude as the number of
characters in a word, so it does not make sense to parallelize
this function. Instead, we use Python to create the list.
'''
deletes = []
queue = [w]
for d in range(max_edit_distance):
temp_queue = []
for word in queue:
if len(word)>1:
for c in range(len(word)): # character index
word_minus_c = word[:c] + word[c+1:]
if word_minus_c not in deletes:
deletes.append(word_minus_c)
if word_minus_c not in temp_queue:
temp_queue.append(word_minus_c)
queue = temp_queue
return deletes
def get_transitions(sentence):
'''
Helper function: converts a sentence into all two-word pairs.
Output format is a list of tuples.
e.g. 'This is a test' >> ('this', 'is'), ('is', 'a'), ('a', 'test')
'''
if len(sentence)<2:
return None
else:
return [((sentence[i], sentence[i+1]), 1)
for i in range(len(sentence)-1)]
def map_transition_prob(vals):
'''
Helper function: calculates conditional probabilities for all word
pairs, i.e. P(word|previous word)
'''
total = float(sum(vals.values()))
return {k: math.log(v/total) for k, v in vals.items()}
def parallel_create_dictionary(fname, max_edit_distance=3,
num_partitions=6):
'''
Load a text file and use it to create a dictionary and
to calculate start probabilities and transition probabilities.
'''
# Note: this function makes use of multiple accumulators to keep
# track of the words that are being processed. An alternative
# implementation that wraps accumulators in helper functions was
# also tested, but did not yield any noticeable improvements.
############
#
# load file & initial processing
#
############
# http://stackoverflow.com/questions/22520932/python-remove-all-non-alphabet-chars-from-string
regex = re.compile('[^a-z ]')
# load file contents and convert into one long sequence of words
# RDD format: 'line 1', 'line 2', 'line 3', ...
# cache because this RDD is used in multiple operations
make_all_lower = sc.textFile(fname) \
.map(lambda line: line.lower()) \
.filter(lambda x: x!='').cache()
# split into individual sentences and remove other punctuation
# RDD format: [words of sentence 1], [words of sentence 2], ...
# cache because this RDD is used in multiple operations
split_sentence = make_all_lower.flatMap(lambda
line: line.replace('?','.').replace('!','.').split('.')) \
.map(lambda sentence: regex.sub(' ', sentence)) \
.map(lambda sentence: sentence.split()) \
.filter(lambda x: x!=[]).cache()
############
#
# generate start probabilities
#
############
# extract all words that are at the beginning of sentences
# RDD format: 'word1', 'word2', 'word3', ...
start_words = split_sentence.map(lambda sentence: sentence[0]
if len(sentence)>0 else None) \
.filter(lambda word: word!=None)
# add a count to each word
# RDD format: ('word1', 1), ('word2', 1), ('word3', 1), ...
# note: partition here because we are using words as keys for
# the first time - yields a small but consistent improvement in
# runtime (~2-3 sec for big.txt)
# cache because this RDD is used in multiple operations
count_start_words_once = start_words.map(lambda word: (word, 1)) \
.partitionBy(num_partitions).cache()
# use accumulator to count the number of start words processed
accum_total_start_words = sc.accumulator(0)
count_start_words_once.foreach(lambda x: accum_total_start_words.add(1))
total_start_words = float(accum_total_start_words.value)
# reduce into count of unique words at the start of sentences
# RDD format: ('word1', frequency), ('word2', frequency), ...
unique_start_words = count_start_words_once.reduceByKey(lambda a, b: a + b)
# convert counts to log-probabilities
# RDD format: ('word1', log-prob of word1),
# ('word2', log-prob of word2), ...
start_prob_calc = unique_start_words.mapValues(lambda v:
math.log(v/total_start_words))
# get default start probabilities (for words not in corpus)
default_start_prob = math.log(1/total_start_words)
# store start probabilities as a dictionary (i.e. a lookup table)
# note: given the spell-checking algorithm, this cannot be maintained
# as an RDD as it is not possible to map within a map
start_prob = start_prob_calc.collectAsMap()
############
#
# generate transition probabilities
#
############
# note: various partitioning strategies were attempted for this
# portion of the function, but they failed to yield significant
# improvements in performance.
# focus on continuous word pairs within the sentence
# e.g. "this is a test" -> "this is", "is a", "a test"
# note: as the relevant probability is P(word|previous word)
# the tuples are ordered as (previous word, word)
# extract all word pairs within a sentence and add a count
# RDD format: (('word1', 'word2'), 1), (('word2', 'word3'), 1), ...
# cache because this RDD is used in multiple operations
other_words = split_sentence.map(lambda sentence:
get_transitions(sentence)) \
.filter(lambda x: x!=None) \
.flatMap(lambda x: x).cache()
# use accumulator to count the number of transitions (word pairs)
accum_total_other_words = sc.accumulator(0)
other_words.foreach(lambda x: accum_total_other_words.add(1))
total_other_words = float(accum_total_other_words.value)
# reduce into count of unique word pairs
# RDD format: (('word1', 'word2'), frequency),
# (('word2', 'word3'), frequency), ...
unique_other_words = other_words.reduceByKey(lambda a, b: a + b)
# aggregate by (and change key to) previous word
# RDD format: ('previous word', {'word1': word pair count,
# 'word2': word pair count}}), ...
other_words_collapsed = unique_other_words.map(lambda x:
(x[0][0], (x[0][1], x[1]))) \
.groupByKey().mapValues(dict)
# note: the above line of code is the slowest in the function
# (8.6 MB shuffle read and 4.5 MB shuffle write for big.txt)
# An alternative approach that aggregates lists with reduceByKey was
# attempted, but did not yield noticeable improvements in runtime.
# convert counts to log-probabilities
# RDD format: ('previous word', {'word1': log-prob of pair,
# word2: log-prob of pair}}), ...
transition_prob_calc = other_words_collapsed.mapValues(lambda v:
map_transition_prob(v))
# get default transition probabilities (for word pairs not in corpus)
default_transition_prob = math.log(1/total_other_words)
# store transition probabilities as a dictionary (i.e. a lookup table)
# note: given the spell-checking algorithm, this cannot be maintained
# as an RDD as it is not possible to map within a map
transition_prob = transition_prob_calc.collectAsMap()
############
#
# generate dictionary
#
############
# note: this approach is slightly different from the original SymSpell
# algorithm, but is more appropriate for a SPARK implementation
# split into individual words (all)
# RDD format: 'word1', 'word2', 'word3', ...
# cache because this RDD is used in multiple operations
all_words = make_all_lower.map(lambda line: regex.sub(' ', line)) \
.flatMap(lambda line: line.split()).cache()
# use accumulator to count the number of words processed
accum_words_processed = sc.accumulator(0)
all_words.foreach(lambda x: accum_words_processed.add(1))
# add a count to each word
# RDD format: ('word1', 1), ('word2', 1), ('word3', 1), ...
count_once = all_words.map(lambda word: (word, 1))
# reduce into counts of unique words - this is the core corpus dictionary
# (i.e. only words appearing in the file, without 'deletes'))
# RDD format: ('word1', frequency), ('word2', frequency), ...
# cache because this RDD is used in multiple operations
# note: imposing partitioning at this step yields a small
# improvement in runtime (~1 sec for big.txt) by equally
# balancing elements among workers for subsequent operations
unique_words_with_count = count_once.reduceByKey(lambda a, b: a + b,
numPartitions = num_partitions).cache()
# use accumulator to count the number of unique words
accum_unique_words = sc.accumulator(0)
unique_words_with_count.foreach(lambda x: accum_unique_words.add(1))
# generate list of "deletes" for each word in the corpus
# RDD format: (word1, [deletes for word1]), (word2, [deletes for word2]), ...
generate_deletes = unique_words_with_count.map(lambda (parent, count):
(parent, get_deletes_list(parent, max_edit_distance)))
# split into all key-value pairs
# RDD format: (word1, delete1), (word1, delete2), ...
expand_deletes = generate_deletes.flatMapValues(lambda x: x)
# swap word order and add a zero count (because "deletes" were not
# present in the dictionary)
swap = expand_deletes.map(lambda (orig, delete): (delete, ([orig], 0)))
# create a placeholder for each real word
# RDD format: ('word1', ([], frequency)), ('word2', ([], frequency)), ...
corpus = unique_words_with_count.mapValues(lambda count: ([], count))
# combine main dictionary and "deletes" (and eliminate duplicates)
# RDD format: ('word1', ([deletes for word1], frequency)),
# ('word2', ([deletes for word2], frequency)), ...
combine = swap.union(corpus)
# store dictionary items and deletes as a dictionary (i.e. a lookup table)
# note: given the spell-checking algorithm, this cannot be maintained
# as an RDD as it is not possible to map within a map
# note: use reduceByKeyLocally to avoid an extra shuffle from reduceByKey
dictionary = combine.reduceByKeyLocally(lambda a, b: (a[0]+b[0], a[1]+b[1]))
# output stats
print 'Total words processed: %i' % accum_words_processed.value
print 'Total unique words in corpus: %i' % accum_unique_words.value
print 'Total items in dictionary (corpus words and deletions): %i' \
% len(dictionary)
print ' Edit distance for deletions: %i' % max_edit_distance
print 'Total unique words at the start of a sentence: %i' \
% len(start_prob)
print 'Total unique word transitions: %i' % len(transition_prob)
return dictionary, start_prob, default_start_prob, \
transition_prob, default_transition_prob
######################
#
# SPELL-CHECKING - VITERBI ALGORITHM
#
# The below functions are used to read in a text file, break it down
# into individual sentences, and then carry out context-based spell-
# checking on each sentence in turn. In cases where the 'suggested'
# word does not match the actual word in the text, both the original
# and the suggested sentences are printed/outputed to file.
#
# Probabilistic model:
#
# Each sentence is modeled as a hidden Markov model, where the
# hidden states are the words that the user intended to type, and
# the emissions are the words that were actually typed.
#
# For each word in a sentence, we can define:
#
# - emission probabilities: P(observed word|intended word)
#
# - prior probabilities (for first words in sentences only):
# P(being the first word in a sentence)
#
# - transition probabilities (for all subsequent words):
# P(intended word|previous intended word)
#
# Prior and transition probabilities were calculated in the pre-
# processing steps above, using the same corpus as the dictionary.
#
# Emission probabilities are calculated on the fly using a Poisson
# distribution as follows:
# P(observed word|intended word) = PMF of Poisson(k, l), where
# k = edit distance between word typed and word intended, and l=0.01.
# Both the overall approach and the parameter of l=0.01 are based on
# the 2015 lecture notes from AM207 Stochastic Optimization.
# Various parameters for lambda between 0 and 1 were tested, which
# confirmed that 0.01 yields the most accurate word suggestions.
#
# All probabilities are stored in log-space to avoid underflow. Pre-
# defined minimum values (also defined at the pre-processing stage)
# are used for words that are not present in the dictionary and/or
# probability tables.
#
# Algorithm:
#
# The spell-checking itself is carried out using a modified version
# of the Viterbi algorithm, which yields the most likely sequence of
# hidden states, i.e. the most likely sequence of words that form a
# sentence. The main difference to the 'standard' Viterbi algorithm
# is that the state space (i.e. the list of possible corrections) is
# generated (and therefore varies) for each word. This is in contrast
# to the alternative of considering the state space of all possible
# words in the dictionary for every word that is checked, which would
# be intractable for larger dictionaries.
#
# Example:
#
# The algorithm is best illustrated by way of an example.
#
# Suppose that we are checking the sentence 'This is ax test.'
# The emissions for the entire sentence are 'This is ax test.' and
# the hidden states for the entire sentence are 'This is a test.'
#
# As a pre-processing step, we convert everything to lowercase,
# eliminate punctuation, and break the sentence up into a list of
# words: ['this', 'is', 'ax', 'text']
# This list is passed as a parameter to the viterbi function.
#
# The algorithm tackles each word in turn, starting with 'this'.
#
# We first use get_suggestions to obtain a list of all words that
# may have been intended instead of 'this', i.e. all possible hidden
# states (intended words) for the emission (word typed).
#
# get_suggestions returns the 10 most likely corrections:
# - 1 word with an edit distance of 0
# ['this']
# - 3 words with an edit distance of 1
# ['his', 'thus', 'thin']
# - 6 words with an edit distance of 2
# ['the', 'that', 'is', 'him', 'they', 'their']
#
# These 10 words represent our state space, i.e. possible words that
# may have been intended, and are referred to below as the list of
# possible corrections. They each have an emission probability equal
# to the PMF of Poisson(edit distance, 0.01).
#
# For each word in the list of possible corrections, we calculate:
# P(word starting a sentence) * P(observed 'this'|intended word)
# This is a simple application of Bayes' rule: by normalizing the
# probabilities we obtain P(intended word|oberved 'this') for
# each of the 10 words.
#
# We store the word-probability pairs for future use, and move on to
# the next word.
#
# After the first word, all subsequent words are treated as follows.
#
# The second word in our test sentence is 'is'. Once again, we use
# get_suggestions to obtain a list of all words that may have been
# intended. get_suggestions returns the 10 most likely suggestions:
# - 1 word with an edit distance of 0
# ['is']
# - 9 words with an edit distance of 1
# ['in', 'it', 'his', 'as', 'i', 's', 'if', 'its', 'us']
# These 10 words represent our state space for the second word.
#
# For each word in the current list of possible corrections, we loop
# through all the words in the previous list of possible corrections,
# and calculate:
# probability(previous suggested word)
# * P(current suggested word|previous suggested word)
# * P(typing 'is'|meaning to type current suggested word)
# We determine which previous word maximizes this calculation and
# store that 'path' and probability for each current suggested word.
#
# For example, suppose that we are considering the possibility that
# 'is' was indeed intended to be 'is'. We then calculate:
# probability(previous suggested word)
# * P('is'|previous suggested word) * P('is'|'is')
# for all previous suggested words, and discover that the previous
# suggested word 'this' maximizes the above calculation. We therefore
# store 'this is' as the optimal path for the suggested correction
# 'is' and the above (normalized) probability associated with this
# path.
#
# If the sentence had been only 2 words long, then at this point we
# would return the path that maximizes the most probability for the
# most recent step (word).
#
# As it is not, we repeat the previous steps for 'ax' and 'test',
# and then return the path that is associated with the highest
# probability at the last step.
#
######################
def dameraulevenshtein(seq1, seq2):
'''
Calculate the Damerau-Levenshtein distance between sequences.
codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F
Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1
matrix. However, only the current and two previous rows are
needed at once, so we only store those.
Same code as word-level checking.
'''
oneago = None
thisrow = range(1, len(seq2) + 1) + [0]
for x in xrange(len(seq1)):
twoago, oneago, thisrow = \
oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in xrange(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]
and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):
thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)
return thisrow[len(seq2) - 1]
def get_suggestions(string, dictionary, max_edit_distance,
longest_word_length=20, min_count=100, max_sug=10):
'''
Return list of suggested corrections for potentially incorrectly
spelled word.
Code based on get_suggestions function from word-level checking,
with the addition of the min_count and max_sug parameters.
- min_count: minimum number of times a word must have appeared
in the dictionary corpus to be considered a valid suggestion
- max_sug: number of suggestions that are returned (ranked by
frequency of appearance in dictionary corpus and edit distance
from word being checked)
These changes were imposed in order to ensure that the problem
remains tractable when checking very large documents. In practice,
the "correct" suggestion is almost always amongst the top ten.
'''
if (len(string) - longest_word_length) > max_edit_distance:
# to ensure Viterbi can keep running -- use the word itself
return [(string, 0)]
suggest_dict = {}
queue = [string]
q_dictionary = {} # items other than string that we've checked
while len(queue)>0:
q_item = queue[0] # pop
queue = queue[1:]
# process queue item
if (q_item in dictionary) and (q_item not in suggest_dict):
if (dictionary[q_item][1]>0):
# word is in dictionary, and is a word from the corpus,
# and not already in suggestion list so add to suggestion
# dictionary, indexed by the word with value (frequency
# in corpus, edit distance)
# note: q_items that are not the input string are shorter
# than input string since only deletes are added (unless
# manual dictionary corrections are added)
assert len(string)>=len(q_item)
suggest_dict[q_item] = \
(dictionary[q_item][1], len(string) - len(q_item))
# the suggested corrections for q_item as stored in
# dictionary (whether or not q_item itself is a valid
# word or merely a delete) can be valid corrections
for sc_item in dictionary[q_item][0]:
if (sc_item not in suggest_dict):
# compute edit distance
# suggested items should always be longer (unless
# manual corrections are added)
assert len(sc_item)>len(q_item)
# q_items that are not input should be shorter
# than original string
# (unless manual corrections added)
assert len(q_item)<=len(string)
if len(q_item)==len(string):
assert q_item==string
item_dist = len(sc_item) - len(q_item)
# item in suggestions list should not be the same
# as the string itself
assert sc_item!=string
# calculate edit distance using Damerau-
# Levenshtein distance
item_dist = dameraulevenshtein(sc_item, string)
if item_dist<=max_edit_distance:
# should already be in dictionary if in
# suggestion list
assert sc_item in dictionary
# trim list to contain state space
if (dictionary[q_item][1]>0):
suggest_dict[sc_item] = \
(dictionary[sc_item][1], item_dist)
# now generate deletes (e.g. a substring of string or of a
# delete) from the queue item as additional items to check
# -- add to end of queue
assert len(string)>=len(q_item)
if (len(string)-len(q_item))<max_edit_distance \
and len(q_item)>1:
for c in range(len(q_item)): # character index
word_minus_c = q_item[:c] + q_item[c+1:]
if word_minus_c not in q_dictionary:
queue.append(word_minus_c)
# arbitrary value to identify we checked this
q_dictionary[word_minus_c] = None
# return list of suggestions: (correction, edit distance)
# only include words that have appeared a minimum number of times
# note: make sure that we do not lose the original word
as_list = [i for i in suggest_dict.items()
if (i[1][0]>min_count or i[0]==string)]
# only include the most likely suggestions (based on frequency
# and edit distance from original word)
trunc_as_list = sorted(as_list,
key = lambda (term, (freq, dist)): (dist, -freq))[:max_sug]
if len(trunc_as_list)==0:
# to ensure Viterbi can keep running
# -- use the word itself if no corrections are found
return [(string, 0)]
else:
# drop the word frequency - not needed beyond this point
return [(i[0], i[1][1]) for i in trunc_as_list]
'''
Output format:
get_suggestions('file', dictionary)
[('file', 0), ('five', 1), ('fire', 1), ('fine', 1), ('will', 2),
('time', 2), ('face', 2), ('like', 2), ('life', 2), ('while', 2)]
'''
def get_emission_prob(edit_dist, poisson_lambda=0.01):
'''
The emission probability, i.e. P(observed word|intended word)
is approximated by a Poisson(k, l) distribution, where
k=edit distance between the observed word and the intended
word and l=0.01.
Both the overall approach and the parameter of l=0.01 are based on
the 2015 lecture notes from AM207 Stochastic Optimization.
Various parameters for lambda between 0 and 1 were tested, which
confirmed that 0.01 yields the most accurate word suggestions.
'''
return math.log(poisson.pmf(edit_dist, poisson_lambda))
######################
# Multiple helper functions are used to avoid KeyErrors when
# attempting to access values that are not present in dictionaries,
# in which case the previously specified default value is returned.
######################
def get_start_prob(word, start_prob, default_start_prob):
'''
P(word being at the beginning of a sentence)
'''
try:
return start_prob[word]
except KeyError:
return default_start_prob
def get_transition_prob(cur_word, prev_word,
transition_prob, default_transition_prob):
'''
P(word|previous word)
'''
try:
return transition_prob[prev_word][cur_word]
except KeyError:
return default_transition_prob
def get_sentence_word_id(words):
'''
Helper function: numbers each word according to its position
in the sentence.
'''
return [(i, w) for i, w in enumerate(words)]
def start_word_prob(words, tmp_sp, d_sp):
'''
Helper function: calculates the probability of all word
suggestions being at the beginning of the sentence, based on
the pre-processed start probabilities and the emission model.
i.e. start probability x emission probability
'''
orig_word, sug_words = words
probs = [(w[0], math.exp(
get_start_prob(w[0], tmp_sp, d_sp)
+ get_emission_prob(w[1])
))
for w in sug_words]
sum_probs = sum([p[1] for p in probs])
probs = [([p[0]], math.log(p[1]/sum_probs)) for p in probs]
return probs
def split_suggestions(sentence):
'''
Helper function: Splits into all the suggestions for a given
word, while retaining the previous path for all elements.
'''
sent_id, (word, word_sug) = sentence
return [[sent_id, (word, w)] for w in word_sug]
def normalize(probs):
'''
Helper function: normalizes probability so they add to 1.
Note: this is especially necessary given the small
probabilities that apply to this problem.
'''
sum_probs = sum([p[1] for p in probs])
return [(p[0], math.log(p[1]/sum_probs)) for p in probs]
def get_max_prev_path(words, tmp_tp, d_tp):
'''
Helper function: Calculates the previous path that maximizes
the probability of the current word suggestion.
'''
# unpack values
cur_word = words[0][0]
cur_sug = words[0][1][0]
cur_sug_ed = words[0][1][1]
prev_sug = words[1]
# belief + transition probability + emission probability
(prob, word) = max((p[1]
+ get_transition_prob(cur_sug, p[0][-1], tmp_tp, d_tp)
+ get_emission_prob(cur_sug_ed), p[0])
for p in prev_sug)
return word + [cur_sug], math.exp(prob)
def get_max_path(final_paths):
'''
Helper function: at the final step, identifies the full path
(i.e. sentence correction) with the highest probability.
'''
return max((p[1], p[0]) for p in final_paths)[1]
def get_count_mismatches(sentences):
'''
Helper function: compares the original sentence with the sentence
that has been suggested by the Viterbi algorithm, and calculates
the number of words that do not match.
'''
orig_sentence, sug_sentence = sentences
count_mismatches = len([(orig_sentence[i], sug_sentence[i])
for i in range(len(orig_sentence))
if orig_sentence[i]!=sug_sentence[i]])
return count_mismatches, orig_sentence, sug_sentence
def correct_document_context_parallel_full(fname, dictionary,
start_prob, default_start_prob,
transition_prob, default_transition_prob,
max_edit_distance=3, num_partitions=6,
display_results=False):
'''
Load a text file and spell-check each sentence using the
dictionary and probability tables that were created in the
pre-processing stage.
Suggested corrections are either printed to the screen or
saved in a log file, depending on the settings.
'''
############
#
# load file & initial processing
#
############
# http://stackoverflow.com/questions/22520932/python-remove-all-non-alphabet-chars-from-string
regex = re.compile('[^a-z ]')
# broadcast Python dictionaries to workers (from pre-processing)
bc_dictionary = sc.broadcast(dictionary)
bc_start_prob = sc.broadcast(start_prob)
bc_transition_prob = sc.broadcast(transition_prob)
# load file contents and convert into one long sequence of words
# RDD format: 'line 1', 'line 2', 'line 3', ...
make_all_lower = sc.textFile(fname) \
.map(lambda line: line.lower()) \
.filter(lambda x: x!='')
# split into individual sentences and remove other punctuation
# RDD format: [words of sentence1], [words of sentence2], ...
# cache because this RDD is used in multiple operations
split_sentence = make_all_lower.flatMap(lambda
line: line.replace('?','.').replace('!','.').split('.')) \
.map(lambda sentence: regex.sub(' ', sentence)) \
.map(lambda sentence: sentence.split()) \
.filter(lambda x: x!=[]).cache()
# use accumulator to count the number of words checked
accum_total_words = sc.accumulator(0)
split_sentence.flatMap(lambda x: x) \
.foreach(lambda x: accum_total_words.add(1))
# assign a unique id to each sentence
# RDD format: (0, [words of sentence1]), (1, [words of sentence2]), ...
# partition and cache here after completing transformations - this
# RDD is used in multiple operations and the sentence id will
# remain the key from this point forward
sentence_id = split_sentence.zipWithIndex().map(
lambda (k, v): (v, k)).partitionBy(num_partitions).cache()
# count the number of words in each sentence - this is used to
# determine when each sentence is done processing
# RDD format: (0, words in sentence1), (1, words in sentence2), ...
# cache as this RDD is called at every iteration
sentence_word_count = sentence_id.mapValues(lambda v: len(v)).cache()
############
#
# spell-checking
#
############
# number each word in a sentence, and split into individual words
# RDD format: (sentence1 id, (word1 id, word1)),
# (sentence1 id, (word2 id, word2), ...
sentence_word_id = sentence_id.mapValues(lambda v: get_sentence_word_id(v)) \
.flatMapValues(lambda x: x)
# get suggestions for each word
# RDD format: (sentence1 id, (word1 id, word1, [suggestions for word1])),
# (sentence1 id, (word2 id, word2, [suggestions for word2]), ...
# cache as this RDD is called at each iteration
sentence_word_suggestions = sentence_word_id.mapValues(
lambda v: (v[0], v[1], get_suggestions(v[1], bc_dictionary.value,
max_edit_distance))).cache()
# filter for all the first words in sentences
# RDD format: (sentence id, (0, word, [suggestions for word])),
# (sentence id, (0, word, [suggestions for word]), ...
sentence_word_1 = sentence_word_suggestions.filter(lambda (k, v): v[0]==0) \
.mapValues(lambda v: (v[1], v[2]))
# calculate probability for each suggestion
# RDD format: (sentence id, [([word], P(word)), ([word], P(word)), ...]),
# (sentence id, [([word], P(word)), ([word], P(word)), ...]), ...
sentence_path = sentence_word_1.mapValues(lambda v:
start_word_prob(v, bc_start_prob.value, default_start_prob))
# start loop from second word (zero-indexed)
word_num = 1
# extract any sentences that have been fully processed
# RDD format: (sentence id, [([path], P(path)), ([path], P(path)), ...]),
# (sentence id, [([path], P(path)), ([path], P(path)), ...]), ...
completed = sentence_word_count.filter(lambda (k, v): v==word_num) \
.join(sentence_path).mapValues(lambda v: v[1]) \
.partitionBy(num_partitions).cache()
# filter for the next words in sentences
# RDD format: (sentence id, (word, [suggestions for word])),
# (sentence id, (word, [suggestions for word]), ...
sentence_word_next = sentence_word_suggestions.filter(lambda
(k,v): v[0]==word_num) \
.mapValues(lambda v: (v[1], v[2])).cache()
# check whether there are any words left to process
while not sentence_word_next.isEmpty():
# split by suggestions, while retaining previous path
# RDD format: (sentence id, (word, (suggested word, edit distance)),
# [previous path]), ...
# use preservesPartitioning to signal that the sentence id
# continues to be the key
sentence_word_next_split = sentence_word_next.flatMap(lambda x:
split_suggestions(x), preservesPartitioning=True)
# join each suggestion with the previous path
# RDD format:
# (sentence id, ((current word,
# (current word suggestion, edit distance)),
# [(previous path-probability pairs)])), ...
sentence_word_next_path = sentence_word_next_split.join(sentence_path)
# identify previous path that maximizes the probability
# of each suggested word correction
# RDD format: (sentence id, ([path], path probability)),
# (sentence id, ([path], path probability)), ...
sentence_word_next_path_prob = sentence_word_next_path \
.mapValues(lambda v: get_max_prev_path(v,
bc_transition_prob.value, default_transition_prob))
# group all the new paths for each sentence and normalize
# for numerical stability
# RDD format: (sentence id, [([path], P(path)), ([path], P(path)), ...]),
# (sentence id, [([path], P(path)), ([path], P(path)), ...]), ...
# cache as this is used in multiple operations
sentence_path = sentence_word_next_path_prob.groupByKey() \
.mapValues(lambda v: normalize(v)).cache()
# move on to next word
word_num += 1
# extract any sentences that have been fully processed
# RDD format: (sentence id, [([path], P(path)), ([path], P(path)), ...]),
# (sentence id, [([path], P(path)), ([path], P(path)), ...]), ...
# cache as this is carried over to the next iteration
# note: we confirmed that the RDDs being joined/unioned are
# co-partitioned during the development phase
completed = completed \
.union(sentence_word_count.filter(lambda (k, v): v==word_num) \
.join(sentence_path) \
.mapValues(lambda v: v[1])).cache()
# filter for the next words in sentences
# RDD format: (sentence id, (word, [suggestions for word])),
# (sentence id, (word, [suggestions for word]), ...
# cache as this is carried over to the next iteration
sentence_word_next = sentence_word_suggestions.filter(
lambda (k, v): v[0]==word_num) \
.mapValues(lambda v: (v[1], v[2])).cache()
# this is necessary for stability - otherwise too many threads
# are spawned if we collect everything directly below
completed.cache()
# get most likely path (sentence)