3 Commits

Author SHA1 Message Date
Evgeniy A. Dushistov
21a0678cba implement support of gziped index files 2016-08-06 00:31:37 +03:00
Evgeniy A. Dushistov
3aaae57139 add cargo lock 2016-07-03 19:43:43 +03:00
Evgeniy A. Dushistov
5b26d06493 something working 2016-07-03 16:56:30 +03:00
18 changed files with 7061106 additions and 0 deletions

134
Cargo.lock generated Normal file
View File

@@ -0,0 +1,134 @@
[root]
name = "sdcv"
version = "0.1.0"
dependencies = [
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
"getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
"gettext 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rust-ini 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "byteorder"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "byteorder"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "encoding"
version = "0.2.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"encoding-index-japanese 1.20141219.5 (registry+https://github.com/rust-lang/crates.io-index)",
"encoding-index-korean 1.20141219.5 (registry+https://github.com/rust-lang/crates.io-index)",
"encoding-index-simpchinese 1.20141219.5 (registry+https://github.com/rust-lang/crates.io-index)",
"encoding-index-singlebyte 1.20141219.5 (registry+https://github.com/rust-lang/crates.io-index)",
"encoding-index-tradchinese 1.20141219.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "encoding-index-japanese"
version = "1.20141219.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"encoding_index_tests 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "encoding-index-korean"
version = "1.20141219.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"encoding_index_tests 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "encoding-index-simpchinese"
version = "1.20141219.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"encoding_index_tests 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "encoding-index-singlebyte"
version = "1.20141219.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"encoding_index_tests 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "encoding-index-tradchinese"
version = "1.20141219.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"encoding_index_tests 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "encoding_index_tests"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "flate2"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
"miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "gcc"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "getopts"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "gettext"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"encoding 0.2.32 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "libc"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "log"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "miniz-sys"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rust-ini"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]

12
Cargo.toml Normal file
View File

@@ -0,0 +1,12 @@
[package]
name = "sdcv"
version = "0.1.0"
authors = ["Evgeniy A. Dushistov <dushistov@mail.ru>"]
[dependencies]
getopts = "0.2"
gettext = "0.2.0"
rust-ini = "0.9.5"
byteorder = "0.5"
libc = "0.2.14"
flate2 = "0.2"

116
src/core.rs Normal file
View File

@@ -0,0 +1,116 @@
use ini::Ini;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::path::PathBuf;
use std::error::Error;
use index::DictionaryIndex;
use index::MemoryDictionaryIndex;
use index::DiskDictionaryIndex;
use data::DictionaryData;
use data::SimpleDictionaryData;
pub struct Dictionary {
ifo_file_name: PathBuf,
wordcount : usize,
bookname: String,
sametypesequence: String,
index: Box<DictionaryIndex>,
data: Box<DictionaryData>
}
impl Dictionary {
pub fn new(ifo_file_name: &Path) -> Result<Dictionary, String> {
let basename = try!(try!(ifo_file_name.file_stem().ok_or("bad ifo file name")).to_str().ok_or("can not convert file name to unicode"));
let dict_dir = try!(ifo_file_name.parent().ok_or("bad ifo directory name"));
let mut file = try!(File::open(ifo_file_name).map_err(|err| err.to_string()));
let mut ifo_content = String::new();
match file.read_to_string(&mut ifo_content) {
Ok(read_bytes) => { if read_bytes < 3 { return Result::Err("ifo file too small".to_string()) } },
Result::Err(err) => return Result::Err(err.to_string())
};
let content =
if ifo_content.starts_with("\u{feff}") {
&ifo_content[1..]
} else {
&ifo_content[0..]
};
static DICT_MAGIC_DATA: &'static str = "StarDict's dict ifo file";
if !content.starts_with(DICT_MAGIC_DATA) {
return Result::Err("ifo magic not match".to_string());
}
let ifo_cfg = try!(Ini::load_from_str(content).map_err(|err| err.msg));
let section = try!(ifo_cfg.section(None::<String>).ok_or("ifo: parse none section error".to_string()));
let wordcount = try!(section.get("wordcount").ok_or("no wordcount".to_string()));
let wordcount = try!(wordcount.parse::<usize>().map_err(|err| err.description().to_string()));
let idxfilesize = try!(section.get("idxfilesize").ok_or("no idxfilesize".to_string()));
let bookname = try!(section.get("bookname").ok_or("no bookname".to_string()));
let sametypesequence = try!(section.get("sametypesequence").ok_or("no sametypesequence".to_string()));
let idx_path = dict_dir.join(basename.to_string() + ".idx");
let idx_path_gz = dict_dir.join(basename.to_string() + ".idx.gz");
let mut index: Box<DictionaryIndex>;
if idx_path.exists() && idx_path.is_file() {
let disk_dict = try!(DiskDictionaryIndex::new(wordcount, &idx_path));
index = Box::new(disk_dict);
} else if idx_path_gz.exists() && idx_path_gz.is_file() {
let mem_dict = try!(MemoryDictionaryIndex::new_from_gzip(wordcount, &idx_path_gz));
index = Box::new(mem_dict);
} else {
return Result::Err(format!("no index file for {}", ifo_file_name.to_str().unwrap()));
}
let data_path = dict_dir.join(basename.to_string() + ".dict");
let data_path_dz = dict_dir.join(basename.to_string() + ".dict.dz");
let mut data: Box<DictionaryData>;
if data_path.exists() && data_path.is_file() {
let simple_data = try!(SimpleDictionaryData::new(data_path.as_path()));
data = Box::new(simple_data);
} else if data_path_dz.exists() && data_path_dz.is_file() {
return Result::Err("reading dict.dz not implemented".to_string());
} else {
return Result::Err(format!("no data file for {}", ifo_file_name.to_str().unwrap()));
}
Result::Ok(Dictionary{ifo_file_name : ifo_file_name.to_path_buf(), wordcount : wordcount,
bookname : bookname.clone(), sametypesequence: sametypesequence.clone(),
index : index, data: data})
}
pub fn find(&mut self, key: &str) -> Option<String> {
match self.index.find(key) {
Err(idx) => None,
Ok(idx) => {
let key_pos = self.index.get_key(idx);
Some(self.data.get_data(key_pos.1, key_pos.2).unwrap())
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::Path;
#[test]
fn open_dict() {
let dic = Dictionary::new(Path::new("tests/stardict-test_dict-2.4.2/test_dict.ifo")).unwrap();
assert_eq!(dic.wordcount, 1_usize);
assert_eq!(dic.bookname, "test_dict");
assert_eq!(dic.sametypesequence, "x");
}
#[test]
fn find_in_dict() {
let mut dic = Dictionary::new(Path::new("tests/stardict-test_dict-2.4.2/test_dict.ifo")).unwrap();
assert_eq!(dic.find("test").unwrap(), "<k>test</k>\ntest passed");
}
}

33
src/data.rs Normal file
View File

@@ -0,0 +1,33 @@
use std::fs::File;
use std::io::Read;
use std::io::Seek;
use std::path::Path;
use std::io::SeekFrom;
use std::str::from_utf8;
pub trait DictionaryData {
fn get_data(& mut self, offset: u64, length: usize) -> Result<String, String>;
}
pub struct SimpleDictionaryData {
data_file: File,
}
impl DictionaryData for SimpleDictionaryData {
fn get_data(& mut self, offset: u64, length: usize) -> Result<String, String> {
try!(self.data_file.seek(SeekFrom::Start(offset)).map_err(|err| err.to_string()));
let mut buffer = Vec::<u8>::with_capacity(length);
buffer.resize(length, 0_u8);
try!(self.data_file.read_exact(& mut buffer).map_err(|err| err.to_string()));
let utf8_s = try!(String::from_utf8(buffer).map_err(|err| "invalid utf-8 data in data".to_string()));
Result::Ok(utf8_s)
}
}
impl SimpleDictionaryData {
pub fn new(data_file_name: &Path) ->Result<SimpleDictionaryData, String> {
let file = try!(File::open(data_file_name).map_err(|err| err.to_string()));
Result::Ok(SimpleDictionaryData{data_file : file})
}
}

308
src/index.rs Normal file
View File

@@ -0,0 +1,308 @@
use std::str::from_utf8;
use std::cmp::Ordering;
use std::io::Cursor;
use byteorder::{NetworkEndian, ReadBytesExt};
use mem_mapped_file::MemMappedFile;
use std::path::Path;
use std::fs::File;
use std::io::Read;
use flate2::read::GzDecoder;
use std::error::Error;
pub trait DictionaryIndex {
fn get_key(&self, idx: usize) -> (&str, u64, usize);
fn count(&self) -> usize;
fn find(&self, key: &str) -> Result<usize, usize>;
}
pub struct MemoryDictionaryIndex {
idx_content: Vec<u8>,
wordlist: Vec<usize>,
}
fn g_ascii_strcasecmp(s1: &str, s2: &str) -> isize {
fn is_upper(c: u8) -> bool { c >= b'A' && c <= b'Z' }
fn to_lower(c: u8) -> u8 { if is_upper(c) { c - b'A' + b'a' } else { c } }
let mut it1 = s1.bytes();
let mut it2 = s2.bytes();
loop {
match (it1.next(), it2.next()) {
(None, None) => return 0,
(Some(b1), None) => return b1 as isize,
(None, Some(b2)) => return -(b2 as isize),
(Some(b1), Some(b2)) => {
let c1 = to_lower(b1) as isize;
let c2 = to_lower(b2) as isize;
if c1 != c2 {
return c1 - c2;
}
},
}
}
0_isize
}
fn stardict_strcmp(s1: &str, s2: &str) -> isize {
/*
#define ISUPPER(c) ((c) >= 'A' && (c) <= 'Z')
#define TOLOWER(c) (ISUPPER (c) ? (c) - 'A' + 'a' : (c))
gint
g_ascii_strcasecmp (const gchar *s1,
const gchar *s2)
{
gint c1, c2;
g_return_val_if_fail (s1 != NULL, 0);
g_return_val_if_fail (s2 != NULL, 0);
while (*s1 && *s2)
{
c1 = (gint)(guchar) TOLOWER (*s1);
c2 = (gint)(guchar) TOLOWER (*s2);
if (c1 != c2)
return (c1 - c2);
s1++; s2++;
}
return (((gint)(guchar) *s1) - ((gint)(guchar) *s2));
}
const gint a = g_ascii_strcasecmp(s1, s2);
if (a == 0)
return strcmp(s1, s2);
else
return a;
*/
let res = g_ascii_strcasecmp(s1, s2);
if res == 0 {
assert_eq!(s1.len(), s2.len());
for it in s1.bytes().zip(s2.bytes()) {
let (b1, b2) = it;
if b1 != b2 {
return (b1 as isize) - (b2 as isize);
}
}
0_isize
} else {
res
}
}
fn stardict_str_order(s1: &str, s2: &str) -> Ordering {
let cmp_res = stardict_strcmp(s1, s2);
if cmp_res < 0 {
Ordering::Less
} else if cmp_res > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}
fn get_key_by_offset(content: &[u8], key_pos: usize) -> (&str, u64, usize) {
let key = &content[key_pos..];
let end_of_key = key.iter().position(|&x| x == b'\0').unwrap() + key_pos;
let mut reader = Cursor::new(&content[end_of_key + 1..]);
let offset = reader.read_u32::<NetworkEndian>().unwrap() as u64;
let length = reader.read_u32::<NetworkEndian>().unwrap() as usize;
(from_utf8(&content[key_pos..end_of_key]).unwrap(), offset, length)
}
impl DictionaryIndex for MemoryDictionaryIndex {
fn get_key(&self, idx: usize) -> (&str, u64, usize) {
let key_pos = self.wordlist[idx];
get_key_by_offset(&self.idx_content, key_pos)
/*
let key = &self.idx_content[key_pos..];
let end_of_key = key.iter().position(|&x| x == b'\0').unwrap();
let mut reader = Cursor::new(&self.idx_content[end_of_key + 1..]);
let offset = reader.read_u32::<NetworkEndian>().unwrap() as u64;
let length = reader.read_u32::<NetworkEndian>().unwrap() as usize;
(from_utf8(&self.idx_content[key_pos..(key_pos + end_of_key)]).unwrap(), offset, length)*/
}
fn count(&self) -> usize {
self.wordlist.len()
}
fn find(&self, key: &str) -> Result<usize, usize> {
self.wordlist.binary_search_by(
|probe| stardict_str_order(get_key_by_offset(&self.idx_content, *probe).0, key)
)
}
}
fn get_offsets_of_phrases_in_index(expected_wordcount: usize, idx_content: &[u8]) -> Result<Vec<usize>, String> {
const PADDING_LENGTH: usize = 8;
let mut wordlist = vec![];
wordlist.reserve(expected_wordcount);
{
let mut slice = idx_content;
let mut pos : usize = 0;
while let Some(idx) = slice.iter().position(|&x| x == b'\0') {
let (head, tail) = slice.split_at(idx);
try!(from_utf8(head).map_err(|err| "invalid utf-8 in key".to_string()));
wordlist.push(pos);
pos += head.len() + PADDING_LENGTH + 1;
// +1 to skip over the NUL
if tail.len() > PADDING_LENGTH + 1 {
slice = &tail[PADDING_LENGTH + 1..];
} else {
break;
}
}
}
if wordlist.len() != expected_wordcount {
Result::Err(format!("Expect words in index {}, got {} words", expected_wordcount, wordlist.len()))
} else {
Result::Ok(wordlist)
}
}
impl MemoryDictionaryIndex {
pub fn new_from_vec(expected_wordcount: usize, idx_content: Vec<u8>) -> Result<MemoryDictionaryIndex, String> {
let wordlist = try!(get_offsets_of_phrases_in_index(expected_wordcount, idx_content.as_slice()));
Result::Ok(MemoryDictionaryIndex {idx_content: idx_content, wordlist: wordlist})
}
pub fn new_from_gzip(expected_wordcount: usize, idx_gz_file_path: &Path) -> Result<MemoryDictionaryIndex, String> {
let mut idx_file = try!(File::open(idx_gz_file_path).map_err(|err| err.description().to_string()));
let mut gzip_decoder = try!(GzDecoder::new(idx_file).map_err(|err| err.description().to_string()));
let mut idx_content = Vec::<u8>::new();
if let Result::Err(err) = gzip_decoder.read_to_end(&mut idx_content) {
return Result::Err(format!("Can not read index file: {}", err.description()));
}
let wordlist = try!(get_offsets_of_phrases_in_index(expected_wordcount, idx_content.as_slice()));
Result::Ok(MemoryDictionaryIndex {idx_content: idx_content, wordlist: wordlist})
}
}
pub struct DiskDictionaryIndex {
file_map: MemMappedFile,
wordlist: Vec<usize>,
}
impl DictionaryIndex for DiskDictionaryIndex {
fn get_key(&self, idx: usize) -> (&str, u64, usize) {
let key_pos = self.wordlist[idx];
let idx_content = self.file_map.get_chunk(0, self.file_map.len()).unwrap();
get_key_by_offset(idx_content, key_pos)
}
fn count(&self) -> usize { self.wordlist.len() }
fn find(&self, key: &str) -> Result<usize, usize> {
let idx_content = self.file_map.get_chunk(0, self.file_map.len()).unwrap();
self.wordlist.binary_search_by(
|probe| stardict_str_order(get_key_by_offset(idx_content, *probe).0, key)
)
}
}
impl DiskDictionaryIndex {
pub fn new(expected_wordcount: usize, idx_file_path: &Path) -> Result<DiskDictionaryIndex, String> {
let file_map = try!(MemMappedFile::new(idx_file_path));
let whole_map = try!(file_map.get_chunk(0, file_map.len()));
let wordlist = try!(get_offsets_of_phrases_in_index(expected_wordcount, whole_map));
Ok(DiskDictionaryIndex{file_map: file_map, wordlist: wordlist})
}
}
#[cfg(test)]
mod test {
use super::*;
use super::stardict_str_order;
use super::stardict_strcmp;
use std::path::Path;
use std::fs::File;
use std::io::Read;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashSet;
#[test]
fn index_memory_open() {
let idx_file_name = Path::new("tests/stardict-test_dict-2.4.2/test_dict.idx");
let mut file = File::open(idx_file_name).unwrap();
let mut idx_content = Vec::<u8>::new();
file.read_to_end(&mut idx_content).unwrap();
let mut index = MemoryDictionaryIndex::new_from_vec(1, idx_content).unwrap();
assert_eq!(1_usize, index.count());
assert_eq!("test", index.get_key(0).0);
let idx_file_name = Path::new("tests/words_dic/stardict-words-2.4.2/words.idx");
let mut file = File::open(idx_file_name).unwrap();
let mut idx_content = Vec::<u8>::new();
file.read_to_end(&mut idx_content).unwrap();
let index_size = 1671704_usize;
let mut mem_index = MemoryDictionaryIndex::new_from_vec(index_size, idx_content).unwrap();
assert_eq!(index_size, mem_index.count());
let mut disk_index = DiskDictionaryIndex::new(index_size, idx_file_name).unwrap();
assert_eq!(index_size, disk_index.count());
let mut gz_mem_index = MemoryDictionaryIndex::new_from_gzip(index_size,
Path::new("tests/words_dic/stardict-words-2.4.2/words.idx.gz")).unwrap();
assert_eq!(index_size, gz_mem_index.count());
{
let mut counter: usize = 0;
let f = File::open("tests/words_dic/words.dummy").unwrap();
let mut file = BufReader::new(&f);
let mut dic_words = HashSet::new();
for line in file.lines() {
let l = line.unwrap();
dic_words.insert(l);
}
for i in 0..index.count() {
for index in [&mem_index as &DictionaryIndex, &disk_index as &DictionaryIndex, &gz_mem_index as &DictionaryIndex].iter() {
let (key, _, _) = index.get_key(i);
if !dic_words.contains(key) {
panic!("no '{}'(len {}) idx {} in dic_words", key, key.len(), i);
}
match index.find(key) {
Err(idx) => panic!("we search '{}', and not found err({})", key, idx),
Ok(idx) => assert_eq!(idx, i),
}
}
}
}
}
#[test]
fn index_stardict_strcmp_small() {
let arr_s = ["a", "b", "c", "d", "z"];
let seek = "c";
assert_eq!(arr_s.binary_search_by(|probe| stardict_str_order(probe, seek)), Ok(2));
let seek = "e";
assert_eq!(arr_s.binary_search_by(|probe| stardict_str_order(probe, seek)), Err(4));
}
#[test]
fn index_stardict_strcmp_big() {
let mut exp_res_list = vec![];
{
let f = File::open("tests/stardict_strcmp_test_data_cmp_exp.txt").unwrap();
let mut file = BufReader::new(&f);
for line in file.lines() {
let line = line.unwrap();
let exp_res = line.parse::<isize>().unwrap();
exp_res_list.push(exp_res);
}
}
let f = File::open("tests/stardict_strcmp_test_data.txt").unwrap();
let mut file = BufReader::new(&f);
let mut prev_line : String = "".to_string();
let mut counter : u8 = 0;
let mut exp_it = exp_res_list.iter();
for line in file.lines() {
if counter == 1 {
let cur_line = line.unwrap();
let res = stardict_strcmp(&prev_line, &cur_line);
let exp_res = exp_it.next().unwrap();
if res != *exp_res {
panic!("we expect {}, got {} for '{}' vs '{}'", exp_res, res, prev_line, cur_line);
}
} else {
prev_line = line.unwrap();
}
counter = (counter + 1) % 2;
}
}
}

67
src/main.rs Normal file
View File

@@ -0,0 +1,67 @@
extern crate getopts;
extern crate gettext;
extern crate ini;
extern crate byteorder;
extern crate libc;
extern crate flate2;
mod core;
mod index;
mod data;
mod mem_mapped_file;
use core::Dictionary;
use std::env;
use std::result::Result;
use getopts::Options;
use gettext::Catalog;
use std::fs::File;
use std::path::Path;
struct Library {
dicts : Vec<Dictionary>
}
impl Library {
fn new(dict_dirs: &[&Path], dict_order_names: &[String], dict_disable_names: &[String]) -> Result<Library, String> {
let dicts = vec![Dictionary::new(Path::new("/home/evgeniy/.stardict/dic/stardict-Mueller7accentGPL-2.4.2/Mueller7accentGPL.ifo")).unwrap()];
Ok(Library{dicts: dicts})
}
pub fn simple_lookup(&mut self, phrase: &str) /*-> Option<String>*/ {
for dict in self.dicts.iter_mut() {
if let Some(translation) = dict.find(phrase) {
println!("{}", translation);
}
}
}
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [options] [list of words]", program);
print!("{}", opts.usage(&brief));
}
fn main() {
let translation_f = File::open("/usr/share/locale/ru/LC_MESSAGES/sdcv.mo").expect("could not open the catalog");
let catalog = Catalog::parse(translation_f).expect("could not parse the catalog");
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
opts.optflag("v", "version", catalog.gettext("display version information and exit"));
opts.optflag("h", "help", catalog.gettext("Show help options"));
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let dict_dirs = vec![Path::new("/tmp")];
let dict_order_names: Vec<String> = vec![];
let dict_disable_names: Vec<String> = vec![];
let mut library = Library::new(&dict_dirs, &dict_order_names, &dict_disable_names).unwrap();
library.simple_lookup("man");
}

179
src/mem_mapped_file.rs Normal file
View File

@@ -0,0 +1,179 @@
use std::path::Path;
use std::fs::File;
use std::os::unix::io::AsRawFd;
use std::ptr;
use std::slice;
use libc::{c_void, c_int};
use libc;
pub struct MemMappedFile {
mem_addr: *const u8,
length: usize,
}
#[cfg(unix)]
impl MemMappedFile {
pub fn new(file_name: &Path) -> Result<MemMappedFile, String> {
let file = try!(File::open(file_name).map_err(|err| err.to_string()));
let metadata = try!(file.metadata().map_err(|err| err.to_string()));
if !metadata.is_file() {
return Err(format!("{} is not file", file_name.to_str().unwrap()));
}
let file_len = metadata.len();
let fd = file.as_raw_fd();
let addr: *const u8 = ptr::null();
let res = unsafe {
libc::mmap(addr as *mut c_void, file_len as libc::size_t, libc::PROT_READ, libc::MAP_SHARED,
fd, 0)
};
if res == libc::MAP_FAILED {
Err(format!("mmap for {} failed", file_name.to_str().unwrap()))
} else {
Ok(MemMappedFile{mem_addr: res as *mut u8, length: file_len as usize})
}
}
pub fn len(&self) -> usize { self.length }
pub fn get_chunk<'a>(&self, offset: usize, size: usize) -> Result<&'a[u8], String> {
let end = offset + size;
if end > self.length {
return Err(format!("offset out of range from 0 to {}", self.length));
}
Ok(unsafe {slice::from_raw_parts(self.mem_addr.offset(offset as isize), size) })
}
}
#[cfg(unix)]
impl Drop for MemMappedFile {
fn drop(&mut self) {
let res: c_int = unsafe {
// `munmap` only panics due to logic errors
libc::munmap(self.mem_addr as *mut c_void, self.length as libc::size_t)
};
if res == -1 {
panic!("munmap return error");
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::Path;
use std::mem;
use std::slice;
static CRCTAB: [u32; 256] = [
0x00000000,
0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b,
0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6,
0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac,
0x5bd4b01b, 0x569796c2, 0x52568b75, 0x6a1936c8, 0x6ed82b7f,
0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a,
0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58,
0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033,
0xa4ad16ea, 0xa06c0b5d, 0xd4326d90, 0xd0f37027, 0xddb056fe,
0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4,
0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5,
0x2ac12072, 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, 0x7897ab07,
0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c,
0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1,
0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b,
0xbb60adfc, 0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698,
0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d,
0x94ea7b2a, 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, 0xc6bcf05f,
0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80,
0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a,
0x58c1663d, 0x558240e4, 0x51435d53, 0x251d3b9e, 0x21dc2629,
0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c,
0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e,
0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65,
0xeba91bbc, 0xef68060b, 0xd727bbb6, 0xd3e6a601, 0xdea580d8,
0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2,
0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74,
0x857130c3, 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, 0x7b827d21,
0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a,
0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e, 0x18197087,
0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d,
0x2056cd3a, 0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce,
0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb,
0xdbee767c, 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, 0x89b8fd09,
0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf,
0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
];
fn crc32(buf: &[u32]) -> u32 {
let mut crc = 0_u32;
for item in buf {
let mut byte: u32 = item & 0xFF_u32;
crc = (crc << 8) ^ CRCTAB[(((crc >> 24) ^ byte) & 0xFF) as usize];
byte = (item >> 8) & 0xFF_u32;
crc = (crc << 8) ^ CRCTAB[(((crc >> 24) ^ byte) & 0xFF) as usize];
byte = (item >> 16) & 0xFF_u32;
crc = (crc << 8) ^ CRCTAB[(((crc >> 24) ^ byte) & 0xFF) as usize];
byte = (item >> 24) & 0xFF_u32;
crc = (crc << 8) ^ CRCTAB[(((crc >> 24) ^ byte) & 0xFF) as usize];
}
!crc & 0xFFFFFFFF_u32
}
#[test]
fn mem_mapped_file_basic_test() {
let mfile = MemMappedFile::new(Path::new("tests/words_dic/stardict-words-2.4.2/words.idx")).unwrap();
let mem_chunk = mfile.get_chunk(0, 16).unwrap();
assert_eq!(mem_chunk.len(), 16);
let exp : Vec<u8> = vec![0x21, 0, 0, 0, 0, 0, 0, 0, 0, 0xa, 0x21, 0x20, 0x23, 0x24, 0x25, 0x5e];
assert_eq!(mem_chunk, exp.as_slice());
const CHUNK_SIZE: usize = 256;
let n_chunks = (mfile.len() + CHUNK_SIZE -1) / CHUNK_SIZE;
let mut data: Vec<u32> = Vec::with_capacity(mfile.len() / 4);
for i in 0..(n_chunks - 1) {
let mem_chunk = mfile.get_chunk(i * CHUNK_SIZE, CHUNK_SIZE).unwrap();
let data_bytes = &mem_chunk[0] as *const u8;
let data_words: *const u32 = unsafe { mem::transmute(data_bytes) };
let chunk_u32_view = unsafe { slice::from_raw_parts(data_words, CHUNK_SIZE / 4) };
data.extend_from_slice(chunk_u32_view);
}
let res_crc = crc32(data.as_slice());
assert_eq!(res_crc, 0x2a04c834_u32);
let rest = mfile.len() - (n_chunks - 1) * CHUNK_SIZE;
if rest > 0 {
let mem_chunk = mfile.get_chunk((n_chunks - 1) * CHUNK_SIZE, rest).unwrap();
let add_size = (rest + 3) / 4;
for i in 0..add_size {
let mut word = 0_u32;
for j in 0..4 {
if i * 4 + j < mem_chunk.len() {
word |= (mem_chunk[i * 4 + j] as u32) << (j * 8);
}
}
data.push(word);
}
}
let res_crc = crc32(data.as_slice());
assert_eq!(res_crc, 0x6ae491df);
}
}

View File

@@ -0,0 +1,27 @@
//g++ `pkg-config --cflags glib-2.0` call_stardict_strcmp.cpp `pkg-config --libs glib-2.0`
#include <glib.h>
#include <locale.h>
#include <cstdlib>
#include <cstring>
#include <iostream>
static inline gint stardict_strcmp(const gchar *s1, const gchar *s2)
{
const gint a = g_ascii_strcasecmp(s1, s2);
if (a == 0)
return strcmp(s1, s2);
else
return a;
}
int main()
{
setlocale(LC_ALL, "");
std::cin.sync_with_stdio(false);
std::string line1, line2;
while (std::getline(std::cin, line1) &&
std::getline(std::cin, line2)) {
std::cout << stardict_strcmp(line1.c_str(), line2.c_str()) << "\n";
}
return EXIT_SUCCESS;
}

42
tests/generate_strings_pairs.py Executable file
View File

@@ -0,0 +1,42 @@
#!/usr/bin/env python3
import random, sys
fname = "/home/evgeniy/projects/competitions/words/data/words.txt"
with open(fname, "r") as fin:
words = sorted(set([word.strip() for word in fin.readlines()]))
res = []
for i in range(0, len(words)):
max_idx = len(words) - 1
idx1 = random.randint(0, max_idx)
idx2 = random.randint(0, max_idx)
res.append((words[idx1], words[idx2]))
letters = "abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфкцчщьъэюя"
letters += letters.upper()
letters += " \t!@#$%^&*()[]"
def gen_word(req_word_len):
max_idx = len(letters) - 1
res = ""
for i in range(0, req_word_len):
res += letters[random.randint(0, max_idx)]
return res
for i in range(0, 10000):
l1 = random.randint(1, 100)
l2 = random.randint(1, 100)
res.append((gen_word(l1), gen_word(l2)))
for i in range(0, 10000):
l1 = random.randint(1, 100)
res.append((gen_word(l1), gen_word(l1)))
for item in res:
print(item[0])
print(item[1])

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python3
import sys
with open(sys.argv[1], "r") as f:
with open(sys.argv[2], "w") as out:
words = set([word.strip() for word in f.readlines()])
for word in words:
out.write(word + "\n")
out.write(word + "\n")

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,7 @@
StarDict's dict ifo file
version=2.4.2
wordcount=1671704
idxfilesize=30235592
bookname=
date=2016.06.18
sametypesequence=x

3343408
tests/words_dic/words.dummy Normal file

File diff suppressed because it is too large Load Diff