mirror of
https://github.com/LBRYFoundation/LBRY-Vault.git
synced 2025-08-31 01:11:35 +00:00
fix: cache wordlists
This commit is contained in:
parent
b3f913340c
commit
af86c7e3fd
1 changed files with 21 additions and 14 deletions
|
@ -89,20 +89,29 @@ def normalize_text(seed: str) -> str:
|
|||
seed = u''.join([seed[i] for i in range(len(seed)) if not (seed[i] in string.whitespace and is_CJK(seed[i-1]) and is_CJK(seed[i+1]))])
|
||||
return seed
|
||||
|
||||
|
||||
_WORDLIST_CACHE = {}
|
||||
|
||||
|
||||
def load_wordlist(filename):
|
||||
path = resource_path('wordlist', filename)
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
s = f.read().strip()
|
||||
s = unicodedata.normalize('NFKD', s)
|
||||
lines = s.split('\n')
|
||||
wordlist = []
|
||||
for line in lines:
|
||||
line = line.split('#')[0]
|
||||
line = line.strip(' \r')
|
||||
assert ' ' not in line
|
||||
if line:
|
||||
wordlist.append(line)
|
||||
return wordlist
|
||||
if path not in _WORDLIST_CACHE:
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
s = f.read().strip()
|
||||
s = unicodedata.normalize('NFKD', s)
|
||||
lines = s.split('\n')
|
||||
wordlist = []
|
||||
for line in lines:
|
||||
line = line.split('#')[0]
|
||||
line = line.strip(' \r')
|
||||
assert ' ' not in line
|
||||
if line:
|
||||
wordlist.append(line)
|
||||
|
||||
# wordlists shouldn't be mutated, but just in case,
|
||||
# convert it to a tuple
|
||||
_WORDLIST_CACHE[path] = tuple(wordlist)
|
||||
return _WORDLIST_CACHE[path]
|
||||
|
||||
|
||||
filenames = {
|
||||
|
@ -114,8 +123,6 @@ filenames = {
|
|||
}
|
||||
|
||||
|
||||
# FIXME every time we instantiate this class, we read the wordlist from disk
|
||||
# and store a new copy of it in memory
|
||||
class Mnemonic(Logger):
|
||||
# Seed derivation does not follow BIP39
|
||||
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
|
||||
|
|
Loading…
Add table
Reference in a new issue