something working

This commit is contained in:
Evgeniy A. Dushistov
2016-07-03 16:56:30 +03:00
parent 4921f2837a
commit 5b26d06493
15 changed files with 7060732 additions and 0 deletions

10
Cargo.toml Normal file
View File

@@ -0,0 +1,10 @@
[package]
name = "sdcv"
version = "0.1.0"
authors = ["Evgeniy A. Dushistov <dushistov@mail.ru>"]
[dependencies]
getopts = "0.2"
gettext = "0.2.0"
rust-ini = "0.9.5"
byteorder = "0.5"

119
src/core.rs Normal file
View File

@@ -0,0 +1,119 @@
use ini::Ini;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::path::PathBuf;
use std::error::Error;
use index::DictionaryIndex;
use index::MemoryDictionaryIndex;
use data::DictionaryData;
use data::SimpleDictionaryData;
pub struct Dictionary {
ifo_file_name: PathBuf,
wordcount : usize,
bookname: String,
sametypesequence: String,
index: Box<DictionaryIndex>,
data: Box<DictionaryData>
}
impl Dictionary {
pub fn new(ifo_file_name: &Path) -> Result<Dictionary, String> {
let basename = try!(try!(ifo_file_name.file_stem().ok_or("bad ifo file name")).to_str().ok_or("can not convert file name to unicode"));
let dict_dir = try!(ifo_file_name.parent().ok_or("bad ifo directory name"));
let mut file = try!(File::open(ifo_file_name).map_err(|err| err.to_string()));
let mut ifo_content = String::new();
match file.read_to_string(&mut ifo_content) {
Ok(read_bytes) => { if read_bytes < 3 { return Result::Err("ifo file too small".to_string()) } },
Result::Err(err) => return Result::Err(err.to_string())
};
let content =
if ifo_content.starts_with("\u{feff}") {
&ifo_content[1..]
} else {
&ifo_content[0..]
};
let DICT_MAGIC_DATA = "StarDict's dict ifo file";
if !content.starts_with(DICT_MAGIC_DATA) {
return Result::Err("ifo magic not match".to_string());
}
let ifo_cfg = try!(Ini::load_from_str(content).map_err(|err| err.msg));
let section = try!(ifo_cfg.section(None::<String>).ok_or("ifo: parse none section error".to_string()));
let wordcount = try!(section.get("wordcount").ok_or("no wordcount".to_string()));
let wordcount = try!(wordcount.parse::<usize>().map_err(|err| err.description().to_string()));
let idxfilesize = try!(section.get("idxfilesize").ok_or("no idxfilesize".to_string()));
let bookname = try!(section.get("bookname").ok_or("no bookname".to_string()));
let sametypesequence = try!(section.get("sametypesequence").ok_or("no sametypesequence".to_string()));
let idx_path = dict_dir.join(basename.to_string() + ".idx");
let idx_path_gz = dict_dir.join(basename.to_string() + ".idx.gz");
let mut index: Box<DictionaryIndex>;
if idx_path.exists() && idx_path.is_file() {
let mut idx_file = try!(File::open(idx_path).map_err(|err| err.description().to_string()));
let mut idx_content = Vec::<u8>::new();
if let Result::Err(err) = idx_file.read_to_end(&mut idx_content) {
return Result::Err(format!("Can not read index file: {}", err.description()));
}
let mem_dict = try!(MemoryDictionaryIndex::new(wordcount, idx_content));
index = Box::new(mem_dict);
} else if idx_path_gz.exists() && idx_path_gz.is_file() {
return Result::Err("reading idx.gz not implemented".to_string());
} else {
return Result::Err(format!("no index file for {}", ifo_file_name.to_str().unwrap()));
}
let data_path = dict_dir.join(basename.to_string() + ".dict");
let data_path_dz = dict_dir.join(basename.to_string() + ".dict.dz");
let mut data: Box<DictionaryData>;
if data_path.exists() && data_path.is_file() {
let simple_data = try!(SimpleDictionaryData::new(data_path.as_path()));
data = Box::new(simple_data);
} else if data_path_dz.exists() && data_path_dz.is_file() {
return Result::Err("reading dict.dz not implemented".to_string());
} else {
return Result::Err(format!("no data file for {}", ifo_file_name.to_str().unwrap()));
}
Result::Ok(Dictionary{ifo_file_name : ifo_file_name.to_path_buf(), wordcount : wordcount,
bookname : bookname.clone(), sametypesequence: sametypesequence.clone(),
index : index, data: data})
}
pub fn find(&mut self, key: &str) -> Option<String> {
match self.index.find(key) {
Err(idx) => None,
Ok(idx) => {
let key_pos = self.index.get_key(idx);
Some(self.data.get_data(key_pos.1, key_pos.2).unwrap())
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::Path;
#[test]
fn open_dict() {
let dic = Dictionary::new(Path::new("tests/stardict-test_dict-2.4.2/test_dict.ifo")).unwrap();
assert_eq!(dic.wordcount, 1_usize);
assert_eq!(dic.bookname, "test_dict");
assert_eq!(dic.sametypesequence, "x");
}
#[test]
fn find_in_dict() {
let mut dic = Dictionary::new(Path::new("tests/stardict-test_dict-2.4.2/test_dict.ifo")).unwrap();
assert_eq!(dic.find("test").unwrap(), "<k>test</k>\ntest passed");
}
}

33
src/data.rs Normal file
View File

@@ -0,0 +1,33 @@
use std::fs::File;
use std::io::Read;
use std::io::Seek;
use std::path::Path;
use std::io::SeekFrom;
use std::str::from_utf8;
pub trait DictionaryData {
fn get_data(& mut self, offset: u64, length: usize) -> Result<String, String>;
}
pub struct SimpleDictionaryData {
data_file: File,
}
impl DictionaryData for SimpleDictionaryData {
fn get_data(& mut self, offset: u64, length: usize) -> Result<String, String> {
try!(self.data_file.seek(SeekFrom::Start(offset)).map_err(|err| err.to_string()));
let mut buffer = Vec::<u8>::with_capacity(length);
buffer.resize(length, 0_u8);
try!(self.data_file.read_exact(& mut buffer).map_err(|err| err.to_string()));
let utf8_s = try!(String::from_utf8(buffer).map_err(|err| "invalid utf-8 data in data".to_string()));
Result::Ok(utf8_s)
}
}
impl SimpleDictionaryData {
pub fn new(data_file_name: &Path) ->Result<SimpleDictionaryData, String> {
let mut file = try!(File::open(data_file_name).map_err(|err| err.to_string()));
Result::Ok(SimpleDictionaryData{data_file : file})
}
}

249
src/index.rs Normal file
View File

@@ -0,0 +1,249 @@
use std::str::from_utf8;
use std::cmp::Ordering;
use std::io::Cursor;
use byteorder::{NetworkEndian, ReadBytesExt};
pub trait DictionaryIndex {
fn get_key(&self, idx: usize) -> (&str, u64, usize);
fn count(&self) -> usize;
fn find(&self, key: &str) -> Result<usize, usize>;
}
pub struct MemoryDictionaryIndex {
idx_content: Vec<u8>,
wordlist: Vec<usize>,
}
fn g_ascii_strcasecmp(s1: &str, s2: &str) -> isize {
fn is_upper(c: u8) -> bool { c >= b'A' && c <= b'Z' }
fn to_lower(c: u8) -> u8 { if is_upper(c) { c - b'A' + b'a' } else { c } }
let mut it1 = s1.bytes();
let mut it2 = s2.bytes();
loop {
match (it1.next(), it2.next()) {
(None, None) => return 0,
(Some(b1), None) => return b1 as isize,
(None, Some(b2)) => return -(b2 as isize),
(Some(b1), Some(b2)) => {
let c1 = to_lower(b1) as isize;
let c2 = to_lower(b2) as isize;
if c1 != c2 {
return c1 - c2;
}
},
}
}
0_isize
}
fn stardict_strcmp(s1: &str, s2: &str) -> isize {
/*
#define ISUPPER(c) ((c) >= 'A' && (c) <= 'Z')
#define TOLOWER(c) (ISUPPER (c) ? (c) - 'A' + 'a' : (c))
gint
g_ascii_strcasecmp (const gchar *s1,
const gchar *s2)
{
gint c1, c2;
g_return_val_if_fail (s1 != NULL, 0);
g_return_val_if_fail (s2 != NULL, 0);
while (*s1 && *s2)
{
c1 = (gint)(guchar) TOLOWER (*s1);
c2 = (gint)(guchar) TOLOWER (*s2);
if (c1 != c2)
return (c1 - c2);
s1++; s2++;
}
return (((gint)(guchar) *s1) - ((gint)(guchar) *s2));
}
const gint a = g_ascii_strcasecmp(s1, s2);
if (a == 0)
return strcmp(s1, s2);
else
return a;
*/
let res = g_ascii_strcasecmp(s1, s2);
if res == 0 {
assert_eq!(s1.len(), s2.len());
for it in s1.bytes().zip(s2.bytes()) {
let (b1, b2) = it;
if b1 != b2 {
return (b1 as isize) - (b2 as isize);
}
}
0_isize
} else {
res
}
}
fn stardict_str_order(s1: &str, s2: &str) -> Ordering {
let cmp_res = stardict_strcmp(s1, s2);
if cmp_res < 0 {
Ordering::Less
} else if cmp_res > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}
fn get_key_by_offset(content: &Vec<u8>, key_pos: usize) -> (&str, u64, usize) {
let key = &content[key_pos..];
let end_of_key = key.iter().position(|&x| x == b'\0').unwrap() + key_pos;
let mut reader = Cursor::new(&content[end_of_key + 1..]);
let offset = reader.read_u32::<NetworkEndian>().unwrap() as u64;
let length = reader.read_u32::<NetworkEndian>().unwrap() as usize;
(from_utf8(&content[key_pos..end_of_key]).unwrap(), offset, length)
}
impl DictionaryIndex for MemoryDictionaryIndex {
fn get_key(&self, idx: usize) -> (&str, u64, usize) {
let key_pos = self.wordlist[idx];
get_key_by_offset(&self.idx_content, key_pos)
/*
let key = &self.idx_content[key_pos..];
let end_of_key = key.iter().position(|&x| x == b'\0').unwrap();
let mut reader = Cursor::new(&self.idx_content[end_of_key + 1..]);
let offset = reader.read_u32::<NetworkEndian>().unwrap() as u64;
let length = reader.read_u32::<NetworkEndian>().unwrap() as usize;
(from_utf8(&self.idx_content[key_pos..(key_pos + end_of_key)]).unwrap(), offset, length)*/
}
fn count(&self) -> usize {
self.wordlist.len()
}
fn find(&self, key: &str) -> Result<usize, usize> {
self.wordlist.binary_search_by(
|probe| stardict_str_order(get_key_by_offset(&self.idx_content, *probe).0, key)
)
}
}
impl MemoryDictionaryIndex {
pub fn new(expected_wordcount : usize, idx_content : Vec<u8>) -> Result<MemoryDictionaryIndex, String> {
const PADDING_LENGTH: usize = 8;
let mut wordlist = vec![];
wordlist.reserve(expected_wordcount);
{
let mut slice = &idx_content[0..];
let mut pos : usize = 0;
while let Some(idx) = slice.iter().position(|&x| x == b'\0') {
let (head, tail) = slice.split_at(idx);
try!(from_utf8(head).map_err(|err| "invalid utf-8 in key".to_string()));
wordlist.push(pos);
pos += head.len() + PADDING_LENGTH + 1;
// +1 to skip over the NUL
if tail.len() > PADDING_LENGTH + 1 {
slice = &tail[PADDING_LENGTH + 1..];
} else {
break;
}
}
}
if wordlist.len() != expected_wordcount {
Result::Err(format!("Expect words in index {}, got {} words", expected_wordcount, wordlist.len()))
} else {
Result::Ok(MemoryDictionaryIndex {idx_content: idx_content, wordlist : wordlist})
}
}
}
#[cfg(test)]
mod test {
use super::*;
use super::stardict_str_order;
use super::stardict_strcmp;
use std::path::Path;
use std::fs::File;
use std::io::Read;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashSet;
#[test]
fn index_memory_open() {
let idx_file_name = Path::new("tests/stardict-test_dict-2.4.2/test_dict.idx");
let mut file = File::open(idx_file_name).unwrap();
let mut idx_content = Vec::<u8>::new();
file.read_to_end(&mut idx_content).unwrap();
let mut index = MemoryDictionaryIndex::new(1, idx_content).unwrap();
assert_eq!(1_usize, index.count());
assert_eq!("test", index.get_key(0).0);
let idx_file_name = Path::new("tests/words_dic/stardict-words-2.4.2/words.idx");
let mut file = File::open(idx_file_name).unwrap();
let mut idx_content = Vec::<u8>::new();
file.read_to_end(&mut idx_content).unwrap();
let index_size = 1671704_usize;
let mut index = MemoryDictionaryIndex::new(index_size, idx_content).unwrap();
assert_eq!(index_size, index.count());
{
let mut counter: usize = 0;
let f = File::open("tests/words_dic/words.dummy").unwrap();
let mut file = BufReader::new(&f);
let mut dic_words = HashSet::new();
for line in file.lines() {
let l = line.unwrap();
dic_words.insert(l);
}
for i in 0..index.count() {
let (key, _, _) = index.get_key(i);
if !dic_words.contains(key) {
panic!("no '{}'(len {}) idx {} in dic_words", key, key.len(), i);
}
match index.find(key) {
Err(idx) => panic!("we search '{}', and not found err({})", key, idx),
Ok(idx) => assert_eq!(idx, i),
}
}
}
}
#[test]
fn index_stardict_strcmp_small() {
let arr_s = ["a", "b", "c", "d", "z"];
let seek = "c";
assert_eq!(arr_s.binary_search_by(|probe| stardict_str_order(probe, seek)), Ok(2));
let seek = "e";
assert_eq!(arr_s.binary_search_by(|probe| stardict_str_order(probe, seek)), Err(4));
}
#[test]
fn index_stardict_strcmp_big() {
let mut exp_res_list = vec![];
{
let f = File::open("tests/stardict_strcmp_test_data_cmp_exp.txt").unwrap();
let mut file = BufReader::new(&f);
for line in file.lines() {
let line = line.unwrap();
let exp_res = line.parse::<isize>().unwrap();
exp_res_list.push(exp_res);
}
}
let f = File::open("tests/stardict_strcmp_test_data.txt").unwrap();
let mut file = BufReader::new(&f);
let mut prev_line : String = "".to_string();
let mut counter : u8 = 0;
let mut exp_it = exp_res_list.iter();
for line in file.lines() {
if counter == 1 {
let cur_line = line.unwrap();
let res = stardict_strcmp(&prev_line, &cur_line);
let exp_res = exp_it.next().unwrap();
if res != *exp_res {
panic!("we expect {}, got {} for '{}' vs '{}'", exp_res, res, prev_line, cur_line);
}
} else {
prev_line = line.unwrap();
}
counter = (counter + 1) % 2;
}
}
}

64
src/main.rs Normal file
View File

@@ -0,0 +1,64 @@
extern crate getopts;
extern crate gettext;
extern crate ini;
extern crate byteorder;
mod core;
mod index;
mod data;
use core::Dictionary;
use std::env;
use std::result::Result;
use getopts::Options;
use gettext::Catalog;
use std::fs::File;
use std::path::Path;
struct Library {
dicts : Vec<Dictionary>
}
impl Library {
fn new(dict_dirs: &Vec<String>, dict_order_names: &Vec<String>, dict_disable_names: &Vec<String>) -> Result<Library, String> {
let dicts = vec![Dictionary::new(Path::new("/home/evgeniy/.stardict/dic/stardict-Mueller7accentGPL-2.4.2/Mueller7accentGPL.ifo")).unwrap()];
Ok(Library{dicts: dicts})
}
pub fn simple_lookup(&mut self, phrase: &str) /*-> Option<String>*/ {
for dict in self.dicts.iter_mut() {
if let Some(translation) = dict.find(phrase) {
println!("{}", translation);
}
}
}
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [options] [list of words]", program);
print!("{}", opts.usage(&brief));
}
fn main() {
let translation_f = File::open("/usr/share/locale/ru/LC_MESSAGES/sdcv.mo").expect("could not open the catalog");
let catalog = Catalog::parse(translation_f).expect("could not parse the catalog");
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
opts.optflag("v", "version", catalog.gettext("display version information and exit"));
opts.optflag("h", "help", catalog.gettext("Show help options"));
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let dict_dirs: Vec<String> = vec![];
let dict_order_names: Vec<String> = vec![];
let dict_disable_names: Vec<String> = vec![];
let mut library = Library::new(&dict_dirs, &dict_order_names, &dict_disable_names).unwrap();
library.simple_lookup("man");
}

View File

@@ -0,0 +1,27 @@
//g++ `pkg-config --cflags glib-2.0` call_stardict_strcmp.cpp `pkg-config --libs glib-2.0`
#include <glib.h>
#include <locale.h>
#include <cstdlib>
#include <cstring>
#include <iostream>
static inline gint stardict_strcmp(const gchar *s1, const gchar *s2)
{
const gint a = g_ascii_strcasecmp(s1, s2);
if (a == 0)
return strcmp(s1, s2);
else
return a;
}
int main()
{
setlocale(LC_ALL, "");
std::cin.sync_with_stdio(false);
std::string line1, line2;
while (std::getline(std::cin, line1) &&
std::getline(std::cin, line2)) {
std::cout << stardict_strcmp(line1.c_str(), line2.c_str()) << "\n";
}
return EXIT_SUCCESS;
}

42
tests/generate_strings_pairs.py Executable file
View File

@@ -0,0 +1,42 @@
#!/usr/bin/env python3
import random, sys
fname = "/home/evgeniy/projects/competitions/words/data/words.txt"
with open(fname, "r") as fin:
words = sorted(set([word.strip() for word in fin.readlines()]))
res = []
for i in range(0, len(words)):
max_idx = len(words) - 1
idx1 = random.randint(0, max_idx)
idx2 = random.randint(0, max_idx)
res.append((words[idx1], words[idx2]))
letters = "abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфкцчщьъэюя"
letters += letters.upper()
letters += " \t!@#$%^&*()[]"
def gen_word(req_word_len):
max_idx = len(letters) - 1
res = ""
for i in range(0, req_word_len):
res += letters[random.randint(0, max_idx)]
return res
for i in range(0, 10000):
l1 = random.randint(1, 100)
l2 = random.randint(1, 100)
res.append((gen_word(l1), gen_word(l2)))
for i in range(0, 10000):
l1 = random.randint(1, 100)
res.append((gen_word(l1), gen_word(l1)))
for item in res:
print(item[0])
print(item[1])

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python3
import sys
with open(sys.argv[1], "r") as f:
with open(sys.argv[2], "w") as out:
words = set([word.strip() for word in f.readlines()])
for word in words:
out.write(word + "\n")
out.write(word + "\n")

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,7 @@
StarDict's dict ifo file
version=2.4.2
wordcount=1671704
idxfilesize=30235592
bookname=
date=2016.06.18
sametypesequence=x

3343408
tests/words_dic/words.dummy Normal file

File diff suppressed because it is too large Load Diff