mirror of
https://github.com/Dushistov/sdcv.git
synced 2025-12-15 09:21:55 +00:00
Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5478f290a1 | ||
|
|
9c77e91006 | ||
|
|
b74bc2478a | ||
|
|
58c48988f6 | ||
|
|
07cd873e9d | ||
|
|
4545473da9 | ||
|
|
849f0ed1ac | ||
|
|
d5e1eb4d93 | ||
|
|
3a4b76124c | ||
|
|
6eaebaaa2f | ||
|
|
e24722b8fc | ||
|
|
c57ef6e916 | ||
|
|
24c08365c4 | ||
|
|
8f77ede167 | ||
|
|
3a8ab1d5c3 | ||
|
|
5887505185 | ||
|
|
beebb0faa7 | ||
|
|
49c8094b53 | ||
|
|
4346e65bd3 | ||
|
|
d144e0310c | ||
|
|
6e36e7730c | ||
|
|
abe5e9e72f | ||
|
|
488ec68854 | ||
|
|
b698445ead | ||
|
|
504e7807e6 | ||
|
|
6c80bf2d99 | ||
|
|
8742575c33 | ||
|
|
b294b76fb5 | ||
|
|
823ec3d840 | ||
|
|
6ab8b51e6c | ||
|
|
881657b336 | ||
|
|
911fc2f561 | ||
|
|
f488f5350b | ||
|
|
e72220e748 | ||
|
|
b77c0e793a | ||
|
|
ebaa6f2136 |
@@ -15,7 +15,7 @@ BreakBeforeBinaryOperators: true
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
BinPackParameters: true
|
||||
ColumnLimit: 0
|
||||
ColumnLimit: 120
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: false
|
||||
DerivePointerAlignment: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
|
||||
14
.github/workflows/main.yml
vendored
14
.github/workflows/main.yml
vendored
@@ -20,14 +20,12 @@ jobs:
|
||||
fail-fast: true
|
||||
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
os: [ubuntu-22.04, ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- uses: jwlawson/actions-setup-cmake@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
- uses: jwlawson/actions-setup-cmake@v1.4
|
||||
with:
|
||||
cmake-version: '3.5.1'
|
||||
cmake-version: '3.10'
|
||||
github-api-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Check versions
|
||||
run: |
|
||||
@@ -36,6 +34,10 @@ jobs:
|
||||
gcc --version
|
||||
echo "end of versions checking"
|
||||
shell: bash
|
||||
- uses: awalsh128/cache-apt-pkgs-action@v1
|
||||
with:
|
||||
packages: libglib2.0-dev
|
||||
version: 1.0
|
||||
- name: Run tests
|
||||
run: |
|
||||
set -e
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
|
||||
cmake_policy(VERSION 3.10)
|
||||
|
||||
project(sdcv)
|
||||
|
||||
cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
|
||||
cmake_policy(VERSION 3.5)
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED True)
|
||||
set(CMAKE_CXX_EXTENSIONS False)
|
||||
|
||||
include("${CMAKE_CURRENT_SOURCE_DIR}/cmake/compiler.cmake")
|
||||
|
||||
@@ -23,7 +27,8 @@ if (WITH_READLINE)
|
||||
find_path(READLINE_INCLUDE_DIR readline/readline.h)
|
||||
find_library(READLINE_LIBRARY NAMES readline)
|
||||
if (NOT (READLINE_INCLUDE_DIR AND READLINE_LIBRARY))
|
||||
set(WITH_READLINE False CACHE FORCE)
|
||||
message(STATUS "readline library not FOUND, disable it's usage")
|
||||
set(WITH_READLINE False CACHE BOOL "Use readline library" FORCE)
|
||||
endif ()
|
||||
endif (WITH_READLINE)
|
||||
|
||||
@@ -33,16 +38,16 @@ set(sdcv_SRCS
|
||||
src/sdcv.cpp
|
||||
src/readline.cpp
|
||||
src/readline.hpp
|
||||
src/libwrapper.cpp
|
||||
src/libwrapper.cpp
|
||||
src/libwrapper.hpp
|
||||
src/utils.cpp
|
||||
src/utils.cpp
|
||||
src/utils.hpp
|
||||
|
||||
src/stardict_lib.cpp
|
||||
src/stardict_lib.hpp
|
||||
src/dictziplib.cpp
|
||||
src/dictziplib.hpp
|
||||
src/distance.cpp
|
||||
src/dictziplib.hpp
|
||||
src/distance.cpp
|
||||
src/distance.hpp
|
||||
src/mapfile.hpp
|
||||
)
|
||||
@@ -78,10 +83,13 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake
|
||||
include_directories(
|
||||
${ZLIB_INCLUDE_DIR}
|
||||
${GLIB2_INCLUDE_DIRS}
|
||||
${READLINE_INCLUDE_DIR}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/lib
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
)
|
||||
)
|
||||
|
||||
if (WITH_READLINE)
|
||||
include_directories(${READLINE_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
#
|
||||
# Packing stuff
|
||||
@@ -91,7 +99,7 @@ set(CPACK_PACKAGE_VENDOR "Evgeniy Dushistov <dushistov@mail.ru>")
|
||||
set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.org")
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR "0")
|
||||
set(CPACK_PACKAGE_VERSION_MINOR "5")
|
||||
set(CPACK_PACKAGE_VERSION_PATCH "3")
|
||||
set(CPACK_PACKAGE_VERSION_PATCH "5")
|
||||
|
||||
set(sdcv_VERSION
|
||||
"${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}")
|
||||
@@ -103,8 +111,10 @@ add_executable(sdcv ${sdcv_SRCS})
|
||||
target_link_libraries(sdcv
|
||||
${GLIB2_LIBRARIES}
|
||||
${ZLIB_LIBRARIES}
|
||||
${READLINE_LIBRARY}
|
||||
)
|
||||
if (WITH_READLINE)
|
||||
target_link_libraries(sdcv ${READLINE_LIBRARY})
|
||||
endif()
|
||||
if (ENABLE_NLS)
|
||||
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "locale")
|
||||
endif ()
|
||||
@@ -144,5 +154,6 @@ if (BUILD_TESTS)
|
||||
add_sdcv_shell_test(t_datadir)
|
||||
add_sdcv_shell_test(t_return_code)
|
||||
add_sdcv_shell_test(t_multiple_results)
|
||||
add_sdcv_shell_test(t_newlines_in_ifo)
|
||||
|
||||
endif (BUILD_TESTS)
|
||||
|
||||
12
NEWS
12
NEWS
@@ -1,3 +1,13 @@
|
||||
Version 0.5.5
|
||||
- Avoid crashes when passing unknown dicts to the -u flag (by NiLuJe)
|
||||
- Use off_t for stuff mainly assigned to a stat.st_size value
|
||||
Version 0.5.4
|
||||
- Use binary search for synonyms
|
||||
- Various improvments in work with synonyms
|
||||
- Added --json (same as --json-output) to match man
|
||||
- Show all matched result
|
||||
- More robust parsing of ifo file
|
||||
- Prevent crash if file size of files not matched expecting one for .oft files
|
||||
Version 0.5.3
|
||||
- Use single quotes around JSON data to reduce need for escaping
|
||||
- Store integer magic in cache file
|
||||
@@ -36,7 +46,7 @@ Version 0.4.2
|
||||
* Russian translation update
|
||||
|
||||
Version 0.4.1
|
||||
* Recreate cache if idx file was modified
|
||||
* Recreate cache if idx file was modified
|
||||
* Abbility to use pager(SDCV_PAGER)
|
||||
* Add Chinese (traditional) translation
|
||||
* Add Ukrainian translation
|
||||
|
||||
23
README.org
23
README.org
@@ -1,6 +1,9 @@
|
||||
#+OPTIONS: ^:nil
|
||||
[[https://github.com/Dushistov/sdcv/actions?query=workflow%3ACI+branch%3Amaster][https://github.com/Dushistov/sdcv/workflows/CI/badge.svg]]
|
||||
[[https://github.com/Dushistov/sdcv/blob/master/LICENSE][https://img.shields.io/badge/license-GPL%202-brightgreen.svg]]
|
||||
|
||||
* sdcv
|
||||
*sdcv* is a simple, cross-platform, text-based utility for working with dictionaries in [[http://stardict-4.sourceforge.net/][StarDict]] format.
|
||||
* How to compile and install
|
||||
#+BEGIN_SRC sh
|
||||
mkdir /tmp/build-sdcv
|
||||
@@ -26,6 +29,26 @@ To report bugs use https://github.com/Dushistov/sdcv/issues ,
|
||||
if it is not possible you can report it via email to dushistov at mail dot ru.
|
||||
Be sure to include the word "sdcv" somewhere in the "Subject:" field.
|
||||
|
||||
* Integration with [[https://github.com/junegunn/fzf][fzf]]
|
||||
Useful when you have multiple dictionaries
|
||||
#+BEGIN_SRC sh
|
||||
fzf --prompt="Dict: " \
|
||||
--phony \
|
||||
--bind "enter:reload(sdcv {q} -n --json | jq '.[].dict' -r)" \
|
||||
--preview "sdcv {q} -en --use-dict={}" \
|
||||
--preview-window=right:70%:wrap \
|
||||
< <(echo)
|
||||
#+END_SRC
|
||||
|
||||
* Integration with readline
|
||||
This lines can be added to inputrc file (~/.inputrc, /etc/inputrc),
|
||||
to abort multiply usage with ESC:
|
||||
#+begin_src
|
||||
$if sdcv
|
||||
"\e\e": "-1\n"
|
||||
$endif
|
||||
#+end_src
|
||||
|
||||
* Notes to developer
|
||||
** make source code release
|
||||
#+BEGIN_SRC sh
|
||||
|
||||
@@ -16,19 +16,6 @@ if (NOT DEFINED SDCV_COMPILER_IS_GCC_COMPATIBLE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (MSVC AND (MSVC_VERSION LESS 1900))
|
||||
message(FATAL_ERROR "MSVC version ${MSVC_VERSION} have no full c++11 support")
|
||||
elseif (MSVC)
|
||||
add_definitions(-DNOMINMAX)
|
||||
elseif (NOT MSVC)
|
||||
check_cxx_compiler_flag("-std=c++11" CXX_SUPPORTS_CXX11)
|
||||
if (CXX_SUPPORTS_CXX11)
|
||||
append("-std=c++11" CMAKE_CXX_FLAGS)
|
||||
else ()
|
||||
message(FATAL_ERROR "sdcv requires C++11 support but the '-std=c++11' flag isn't supported.")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if (SDCV_COMPILER_IS_GCC_COMPATIBLE)
|
||||
append("-Wall" "-Wextra" "-Wformat-security" "-Wcast-align" "-Werror=format" "-Wcast-qual" CMAKE_C_FLAGS)
|
||||
append("-Wall" "-pedantic" "-Wextra" "-Wformat-security" "-Wcast-align" "-Werror=format" "-Wcast-qual" CMAKE_CXX_FLAGS)
|
||||
|
||||
147
po/ka.po
Normal file
147
po/ka.po
Normal file
@@ -0,0 +1,147 @@
|
||||
# Georgian translation for sdcv.
|
||||
# Copyright (C) 2025 sdcv authors
|
||||
# This file is distributed under the same license as the sdcv project.
|
||||
# Temuri Doghonadze <temuri.doghonadze@gmail.com>, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: sdcv 0.5\n"
|
||||
"Report-Msgid-Bugs-To: dushistov@mail.ru\n"
|
||||
"POT-Creation-Date: 2020-08-14 12:58+0300\n"
|
||||
"PO-Revision-Date: 2025-01-26 03:17+0100\n"
|
||||
"Last-Translator: Temuri Doghonadze <temuri.doghonadze@gmail.com>\n"
|
||||
"Language-Team: Georgian <(nothing)>\n"
|
||||
"Language: ka\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Generator: Poedit 3.5\n"
|
||||
|
||||
#: ../src/libwrapper.cpp:300
|
||||
msgid "popen failed"
|
||||
msgstr "popen ჩავარდა"
|
||||
|
||||
#: ../src/libwrapper.cpp:341
|
||||
#, c-format
|
||||
msgid "Can not convert %s to utf8.\n"
|
||||
msgstr "%s-ის utf8-ში გადაყვანა შეუძლებელია.\n"
|
||||
|
||||
#: ../src/libwrapper.cpp:399 ../src/libwrapper.cpp:433
|
||||
#, c-format
|
||||
msgid "Found %zu items, similar to %s.\n"
|
||||
msgstr "აღმოჩენილია %zu ელემენტი, რომელიც %s-ს ჰგავს.\n"
|
||||
|
||||
#: ../src/libwrapper.cpp:417
|
||||
msgid "Your choice[-1 to abort]: "
|
||||
msgstr "თქვენი არჩევანი[-1 გასაუქმებლად]: "
|
||||
|
||||
#: ../src/libwrapper.cpp:427
|
||||
#, c-format
|
||||
msgid ""
|
||||
"Invalid choice.\n"
|
||||
"It must be from 0 to %zu or -1.\n"
|
||||
msgstr ""
|
||||
"არასწორი არჩევანი.\n"
|
||||
"უნდა იყოს 0-დან %zu-მდე, ან -1.\n"
|
||||
|
||||
#: ../src/libwrapper.cpp:446
|
||||
#, c-format
|
||||
msgid "Nothing similar to %s, sorry :(\n"
|
||||
msgstr "%s-ს არაფერი ჰგავს :(\n"
|
||||
|
||||
#: ../src/sdcv.cpp:89
|
||||
msgid "display version information and exit"
|
||||
msgstr "ვერსიის ჩვენება და გასვლა"
|
||||
|
||||
#: ../src/sdcv.cpp:91
|
||||
msgid "display list of available dictionaries and exit"
|
||||
msgstr "ხელმისაწვდომი ლექსიკონების ჩვენება და გასვლა"
|
||||
|
||||
#: ../src/sdcv.cpp:93
|
||||
msgid "for search use only dictionary with this bookname"
|
||||
msgstr "ძებნისთვის, მხოლოდ, ამ სახელის მქონე ლექსიკონის გამოყენება"
|
||||
|
||||
#: ../src/sdcv.cpp:94
|
||||
msgid "bookname"
|
||||
msgstr "ლექსიკონის_სახელი"
|
||||
|
||||
#: ../src/sdcv.cpp:96
|
||||
msgid "for use in scripts"
|
||||
msgstr "სკრიპტებში გამოსაყენებლად"
|
||||
|
||||
#: ../src/sdcv.cpp:98
|
||||
msgid "print the result formatted as JSON"
|
||||
msgstr "შედეგების JSON ფორმატში გამოტანა"
|
||||
|
||||
#: ../src/sdcv.cpp:100
|
||||
msgid "do not fuzzy-search for similar words, only return exact matches"
|
||||
msgstr ""
|
||||
"მსგავსი სიტყვებისთვის არაზუსტი ძებნა გამოყენებული არ იქნება. "
|
||||
"დაბრუნდება, მხოლოდ, ზუსტი დამთხვევები"
|
||||
|
||||
#: ../src/sdcv.cpp:102
|
||||
msgid "output must be in utf8"
|
||||
msgstr "გამოტანა utf8-ში უნდა იყოს"
|
||||
|
||||
#: ../src/sdcv.cpp:104
|
||||
msgid "input of sdcv in utf8"
|
||||
msgstr "პროგრამაში შეყვანა utf8 -ში"
|
||||
|
||||
#: ../src/sdcv.cpp:106
|
||||
msgid "use this directory as path to stardict data directory"
|
||||
msgstr ""
|
||||
"ამ საქაღალდის გამოყენება stardict-ის მონაცემების საქაღალდის ბილიკად"
|
||||
|
||||
#: ../src/sdcv.cpp:107
|
||||
msgid "path/to/dir"
|
||||
msgstr "ბილიკი/სასურველ/საქაღალდემდე"
|
||||
|
||||
#: ../src/sdcv.cpp:109
|
||||
msgid ""
|
||||
"only use the dictionaries in data-dir, do not search in user and system "
|
||||
"directories"
|
||||
msgstr ""
|
||||
"ლექსიკონების, მხოლოდ, data-dir-დან გამოყენება. მომხმარებლის და სისტემურ "
|
||||
"საქაღალდეებში ძებნა არ მოხდება"
|
||||
|
||||
#: ../src/sdcv.cpp:111
|
||||
msgid "colorize the output"
|
||||
msgstr "ფერებში გამოტანა"
|
||||
|
||||
#: ../src/sdcv.cpp:116
|
||||
msgid " words"
|
||||
msgstr " სიტყვა"
|
||||
|
||||
#: ../src/sdcv.cpp:122
|
||||
#, c-format
|
||||
msgid "Invalid command line arguments: %s\n"
|
||||
msgstr "არასწორი ბრძანების სტრიქონის არგუმენტები: %s\n"
|
||||
|
||||
#: ../src/sdcv.cpp:128
|
||||
#, c-format
|
||||
msgid "Console version of Stardict, version %s\n"
|
||||
msgstr "Startdict-ის კონსოლის ვერსია. ვერსია %s\n"
|
||||
|
||||
#: ../src/sdcv.cpp:206
|
||||
#, c-format
|
||||
msgid "g_mkdir failed: %s\n"
|
||||
msgstr "g_mkdir ჩავარდა: %s\n"
|
||||
|
||||
#: ../src/sdcv.cpp:222
|
||||
msgid "Enter word or phrase: "
|
||||
msgstr "შეიყვანეთ სიტყვა ან ფრაზა: "
|
||||
|
||||
#: ../src/sdcv.cpp:230
|
||||
#, c-format
|
||||
msgid "There are no words/phrases to translate.\n"
|
||||
msgstr "სათარგმნი სიტყვების/ფრაზების გარეშე.\n"
|
||||
|
||||
#: ../src/sdcv.cpp:242
|
||||
#, c-format
|
||||
msgid "Dictionary's name Word count\n"
|
||||
msgstr "ლექსიკონის სახელი სიტყვების რაოდენობა\n"
|
||||
|
||||
#: ../src/utils.cpp:48
|
||||
#, c-format
|
||||
msgid "Can not convert %s to current locale.\n"
|
||||
msgstr "ვერ გადავიყვანე %s მიმდინარე ლოკალში.\n"
|
||||
@@ -27,7 +27,7 @@ public:
|
||||
private:
|
||||
const char *start; /* start of mmap'd area */
|
||||
const char *end; /* end of mmap'd area */
|
||||
unsigned long size; /* size of mmap */
|
||||
off_t size; /* size of mmap */
|
||||
|
||||
int type;
|
||||
z_stream zStream;
|
||||
@@ -47,7 +47,7 @@ private:
|
||||
std::string origFilename;
|
||||
std::string comment;
|
||||
unsigned long crc;
|
||||
unsigned long length;
|
||||
off_t length;
|
||||
unsigned long compressedLength;
|
||||
DictCache cache[DICT_CACHE_SIZE];
|
||||
MapFile mapfile;
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#endif
|
||||
|
||||
#include <cstring>
|
||||
#include <cstdio> //for popen
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
||||
@@ -415,10 +416,9 @@ search_result Library::process_phrase(const char *loc_str, IReadLine &io, bool f
|
||||
colorize_output_ ? ESC_END : "");
|
||||
}
|
||||
int choise;
|
||||
std::unique_ptr<IReadLine> choice_readline(create_readline_object());
|
||||
for (;;) {
|
||||
std::string str_choise;
|
||||
choice_readline->read(_("Your choice[-1 to abort]: "), str_choise);
|
||||
io.read(_("Your choice[-1 to abort]: "), str_choise);
|
||||
sscanf(str_choise.c_str(), "%d", &choise);
|
||||
if (choise >= 0 && choise < int(res_list.size())) {
|
||||
sdcv_pager pager;
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#ifdef HAVE_MMAP
|
||||
#include <fcntl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
@@ -21,13 +22,13 @@ public:
|
||||
~MapFile();
|
||||
MapFile(const MapFile &) = delete;
|
||||
MapFile &operator=(const MapFile &) = delete;
|
||||
bool open(const char *file_name, unsigned long file_size);
|
||||
bool open(const char *file_name, off_t file_size);
|
||||
gchar *begin() { return data; }
|
||||
|
||||
private:
|
||||
char *data = nullptr;
|
||||
unsigned long size = 0ul;
|
||||
#ifdef HAVE_MMAP
|
||||
size_t size = 0u;
|
||||
int mmap_fd = -1;
|
||||
#elif defined(_WIN32)
|
||||
HANDLE hFile = 0;
|
||||
@@ -35,25 +36,31 @@ private:
|
||||
#endif
|
||||
};
|
||||
|
||||
inline bool MapFile::open(const char *file_name, unsigned long file_size)
|
||||
inline bool MapFile::open(const char *file_name, off_t file_size)
|
||||
{
|
||||
size = file_size;
|
||||
#ifdef HAVE_MMAP
|
||||
if ((mmap_fd = ::open(file_name, O_RDONLY)) < 0) {
|
||||
//g_print("Open file %s failed!\n",fullfilename);
|
||||
// g_print("Open file %s failed!\n",fullfilename);
|
||||
return false;
|
||||
}
|
||||
data = (gchar *)mmap(nullptr, file_size, PROT_READ, MAP_SHARED, mmap_fd, 0);
|
||||
struct stat st;
|
||||
if (fstat(mmap_fd, &st) == -1 || st.st_size < 0 || (st.st_size == 0 && S_ISREG(st.st_mode))
|
||||
|| st.st_size != file_size) {
|
||||
close(mmap_fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
size = static_cast<size_t>(st.st_size);
|
||||
data = (gchar *)mmap(nullptr, size, PROT_READ, MAP_SHARED, mmap_fd, 0);
|
||||
if ((void *)data == (void *)(-1)) {
|
||||
//g_print("mmap file %s failed!\n",idxfilename);
|
||||
// g_print("mmap file %s failed!\n",idxfilename);
|
||||
size = 0u;
|
||||
data = nullptr;
|
||||
return false;
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
hFile = CreateFile(file_name, GENERIC_READ, 0, nullptr, OPEN_ALWAYS,
|
||||
FILE_ATTRIBUTE_NORMAL, 0);
|
||||
hFileMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0,
|
||||
file_size, nullptr);
|
||||
hFile = CreateFile(file_name, GENERIC_READ, 0, nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0);
|
||||
hFileMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, file_size, nullptr);
|
||||
data = (gchar *)MapViewOfFile(hFileMap, FILE_MAP_READ, 0, 0, file_size);
|
||||
#else
|
||||
gsize read_len;
|
||||
|
||||
18
src/sdcv.cpp
18
src/sdcv.cpp
@@ -186,10 +186,13 @@ try {
|
||||
}
|
||||
|
||||
// add bookname to list
|
||||
gchar **p = get_impl(use_dict_list);
|
||||
while (*p) {
|
||||
order_list.push_back(bookname_to_ifo.at(*p));
|
||||
++p;
|
||||
for (gchar **p = get_impl(use_dict_list); *p != nullptr; ++p) {
|
||||
auto it = bookname_to_ifo.find(*p);
|
||||
if (it != bookname_to_ifo.end()) {
|
||||
order_list.push_back(it->second);
|
||||
} else {
|
||||
fprintf(stderr, _("Unknown dictionary: %s\n"), *p);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::string ordering_cfg_file = std::string(g_get_user_config_dir()) + G_DIR_SEPARATOR_S "sdcv_ordering";
|
||||
@@ -201,7 +204,12 @@ try {
|
||||
if (ordering_file != nullptr) {
|
||||
std::string line;
|
||||
while (stdio_getline(ordering_file, line)) {
|
||||
order_list.push_back(bookname_to_ifo.at(line));
|
||||
auto it = bookname_to_ifo.find(line);
|
||||
if (it != bookname_to_ifo.end()) {
|
||||
order_list.push_back(it->second);
|
||||
} else {
|
||||
fprintf(stderr, _("Unknown dictionary: %s\n"), line.c_str());
|
||||
}
|
||||
}
|
||||
fclose(ordering_file);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <algorithm>
|
||||
#include <cctype>
|
||||
#include <cstring>
|
||||
#include <map>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <glib/gstdio.h>
|
||||
@@ -47,9 +48,9 @@ static bool bIsPureEnglish(const gchar *str)
|
||||
{
|
||||
// i think this should work even when it is UTF8 string :).
|
||||
for (int i = 0; str[i] != 0; i++)
|
||||
//if(str[i]<0)
|
||||
//if(str[i]<32 || str[i]>126) // tab equal 9,so this is not OK.
|
||||
// Better use isascii() but not str[i]<0 while char is default unsigned in arm
|
||||
// if(str[i]<0)
|
||||
// if(str[i]<32 || str[i]>126) // tab equal 9,so this is not OK.
|
||||
// Better use isascii() but not str[i]<0 while char is default unsigned in arm
|
||||
if (!isascii(str[i]))
|
||||
return false;
|
||||
return true;
|
||||
@@ -78,108 +79,93 @@ bool DictInfo::load_from_ifo_file(const std::string &ifofilename,
|
||||
{
|
||||
ifo_file_name = ifofilename;
|
||||
glib::CharStr buffer;
|
||||
if (!g_file_get_contents(ifofilename.c_str(), get_addr(buffer), nullptr, nullptr))
|
||||
gsize length = 0;
|
||||
if (!g_file_get_contents(ifofilename.c_str(), get_addr(buffer), &length, nullptr)) {
|
||||
fprintf(stderr, "Can not read from %s\n", ifofilename.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
static const char TREEDICT_MAGIC_DATA[] = "StarDict's treedict ifo file";
|
||||
static const char DICT_MAGIC_DATA[] = "StarDict's dict ifo file";
|
||||
|
||||
const gchar *magic_data = istreedict ? TREEDICT_MAGIC_DATA : DICT_MAGIC_DATA;
|
||||
static const unsigned char utf8_bom[] = { 0xEF, 0xBB, 0xBF, '\0' };
|
||||
if (!g_str_has_prefix(
|
||||
g_str_has_prefix(get_impl(buffer), (const gchar *)(utf8_bom)) ? get_impl(buffer) + 3 : get_impl(buffer),
|
||||
magic_data)) {
|
||||
static const gchar utf8_bom[] = { (gchar)0xEF, (gchar)0xBB, (gchar)0xBF, '\0' };
|
||||
|
||||
const gchar *p = get_impl(buffer);
|
||||
const gchar *end = p + length;
|
||||
|
||||
if (g_str_has_prefix(p, utf8_bom)) {
|
||||
p += strlen(utf8_bom);
|
||||
}
|
||||
if (!g_str_has_prefix(p, magic_data)) {
|
||||
fprintf(stderr, "No magic header(%s) in ifo file\n", magic_data);
|
||||
return false;
|
||||
}
|
||||
p += strlen(magic_data);
|
||||
|
||||
gchar *p1 = get_impl(buffer) + strlen(magic_data) - 1;
|
||||
std::map<std::string, std::string> key_value_map;
|
||||
while (p != end) {
|
||||
auto key_it = std::find_if(p, end, [](gchar ch) { return !g_ascii_isspace(ch); });
|
||||
if (key_it == end) {
|
||||
break;
|
||||
}
|
||||
auto eq_it = std::find(key_it, end, gchar('='));
|
||||
if (eq_it == end) {
|
||||
fprintf(stderr, "Invalid part of ifo (no '=') here: %s\n", key_it);
|
||||
return false;
|
||||
}
|
||||
auto val_it = std::find_if(eq_it + 1, end, [](gchar ch) { return !g_ascii_isspace(ch); });
|
||||
if (val_it == end) {
|
||||
key_value_map.insert(std::make_pair(std::string(key_it, eq_it), std::string()));
|
||||
break;
|
||||
}
|
||||
|
||||
gchar *p2 = strstr(p1, "\nwordcount=");
|
||||
if (p2 == nullptr)
|
||||
return false;
|
||||
auto line_end_it = std::find_if(val_it, end, [](gchar ch) { return ch == '\r' || ch == '\n'; });
|
||||
key_value_map.insert(std::make_pair(std::string(key_it, eq_it), std::string(val_it, line_end_it)));
|
||||
if (line_end_it == end)
|
||||
break;
|
||||
p = line_end_it + 1;
|
||||
}
|
||||
|
||||
gchar *p3 = strchr(p2 + sizeof("\nwordcount=") - 1, '\n');
|
||||
std::map<std::string, std::string>::const_iterator it;
|
||||
#define FIND_KEY(_key_) \
|
||||
it = key_value_map.find(_key_); \
|
||||
if (it == key_value_map.end()) { \
|
||||
fprintf(stderr, "Can not find '%s' in ifo file\n", _key_); \
|
||||
return false; \
|
||||
}
|
||||
|
||||
wordcount = atol(std::string(p2 + sizeof("\nwordcount=") - 1, p3 - (p2 + sizeof("\nwordcount=") - 1)).c_str());
|
||||
FIND_KEY("wordcount")
|
||||
wordcount = atol(it->second.c_str());
|
||||
|
||||
if (istreedict) {
|
||||
p2 = strstr(p1, "\ntdxfilesize=");
|
||||
if (p2 == nullptr)
|
||||
return false;
|
||||
|
||||
p3 = strchr(p2 + sizeof("\ntdxfilesize=") - 1, '\n');
|
||||
|
||||
index_file_size = atol(std::string(p2 + sizeof("\ntdxfilesize=") - 1, p3 - (p2 + sizeof("\ntdxfilesize=") - 1)).c_str());
|
||||
|
||||
FIND_KEY("tdxfilesize")
|
||||
index_file_size = atol(it->second.c_str());
|
||||
} else {
|
||||
FIND_KEY("idxfilesize")
|
||||
index_file_size = atol(it->second.c_str());
|
||||
}
|
||||
FIND_KEY("bookname")
|
||||
bookname = it->second;
|
||||
|
||||
p2 = strstr(p1, "\nidxfilesize=");
|
||||
if (p2 == nullptr)
|
||||
return false;
|
||||
|
||||
p3 = strchr(p2 + sizeof("\nidxfilesize=") - 1, '\n');
|
||||
index_file_size = atol(std::string(p2 + sizeof("\nidxfilesize=") - 1, p3 - (p2 + sizeof("\nidxfilesize=") - 1)).c_str());
|
||||
#define SET_IF_EXISTS(_key_) \
|
||||
it = key_value_map.find(#_key_); \
|
||||
if (it != key_value_map.end()) { \
|
||||
_key_ = it->second; \
|
||||
}
|
||||
|
||||
p2 = strstr(p1, "\nbookname=");
|
||||
|
||||
if (p2 == nullptr)
|
||||
return false;
|
||||
|
||||
p2 = p2 + sizeof("\nbookname=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
bookname.assign(p2, p3 - p2);
|
||||
|
||||
p2 = strstr(p1, "\nauthor=");
|
||||
if (p2) {
|
||||
p2 = p2 + sizeof("\nauthor=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
author.assign(p2, p3 - p2);
|
||||
}
|
||||
|
||||
p2 = strstr(p1, "\nemail=");
|
||||
if (p2) {
|
||||
p2 = p2 + sizeof("\nemail=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
email.assign(p2, p3 - p2);
|
||||
}
|
||||
|
||||
p2 = strstr(p1, "\nwebsite=");
|
||||
if (p2) {
|
||||
p2 = p2 + sizeof("\nwebsite=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
website.assign(p2, p3 - p2);
|
||||
}
|
||||
|
||||
p2 = strstr(p1, "\ndate=");
|
||||
if (p2) {
|
||||
p2 = p2 + sizeof("\ndate=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
date.assign(p2, p3 - p2);
|
||||
}
|
||||
|
||||
p2 = strstr(p1, "\ndescription=");
|
||||
if (p2) {
|
||||
p2 = p2 + sizeof("\ndescription=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
description.assign(p2, p3 - p2);
|
||||
}
|
||||
|
||||
p2 = strstr(p1, "\nsametypesequence=");
|
||||
if (p2) {
|
||||
p2 += sizeof("\nsametypesequence=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
sametypesequence.assign(p2, p3 - p2);
|
||||
}
|
||||
|
||||
p2 = strstr(p1, "\nsynwordcount=");
|
||||
SET_IF_EXISTS(author)
|
||||
SET_IF_EXISTS(email)
|
||||
SET_IF_EXISTS(website)
|
||||
SET_IF_EXISTS(date)
|
||||
SET_IF_EXISTS(description)
|
||||
SET_IF_EXISTS(sametypesequence)
|
||||
syn_wordcount = 0;
|
||||
if (p2) {
|
||||
p2 += sizeof("\nsynwordcount=") - 1;
|
||||
p3 = strchr(p2, '\n');
|
||||
syn_wordcount = atol(std::string(p2, p3 - p2).c_str());
|
||||
}
|
||||
|
||||
it = key_value_map.find("synwordcount");
|
||||
if (it != key_value_map.end())
|
||||
syn_wordcount = atol(it->second.c_str());
|
||||
#undef FIND_KEY
|
||||
#undef SET_IF_EXISTS
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -204,10 +190,10 @@ gchar *DictBase::GetWordData(guint32 idxitem_offset, guint32 idxitem_size)
|
||||
|
||||
guint32 data_size;
|
||||
gint sametypesequence_len = sametypesequence.length();
|
||||
//there have sametypesequence_len char being omitted.
|
||||
// there have sametypesequence_len char being omitted.
|
||||
data_size = idxitem_size + sizeof(guint32) + sametypesequence_len;
|
||||
//if the last item's size is determined by the end up '\0',then +=sizeof(gchar);
|
||||
//if the last item's size is determined by the head guint32 type data,then +=sizeof(guint32);
|
||||
// if the last item's size is determined by the end up '\0',then +=sizeof(gchar);
|
||||
// if the last item's size is determined by the head guint32 type data,then +=sizeof(guint32);
|
||||
switch (sametypesequence[sametypesequence_len - 1]) {
|
||||
case 'm':
|
||||
case 't':
|
||||
@@ -234,7 +220,7 @@ gchar *DictBase::GetWordData(guint32 idxitem_offset, guint32 idxitem_size)
|
||||
p1 = data + sizeof(guint32);
|
||||
p2 = get_impl(origin_data);
|
||||
guint32 sec_size;
|
||||
//copy the head items.
|
||||
// copy the head items.
|
||||
for (int i = 0; i < sametypesequence_len - 1; i++) {
|
||||
*p1 = sametypesequence[i];
|
||||
p1 += sizeof(gchar);
|
||||
@@ -272,7 +258,7 @@ gchar *DictBase::GetWordData(guint32 idxitem_offset, guint32 idxitem_size)
|
||||
break;
|
||||
}
|
||||
}
|
||||
//calculate the last item 's size.
|
||||
// calculate the last item 's size.
|
||||
sec_size = idxitem_size - (p2 - get_impl(origin_data));
|
||||
*p1 = sametypesequence[sametypesequence_len - 1];
|
||||
p1 += sizeof(gchar);
|
||||
@@ -286,7 +272,7 @@ gchar *DictBase::GetWordData(guint32 idxitem_offset, guint32 idxitem_size)
|
||||
case 'k':
|
||||
memcpy(p1, p2, sec_size);
|
||||
p1 += sec_size;
|
||||
*p1 = '\0'; //add the end up '\0';
|
||||
*p1 = '\0'; // add the end up '\0';
|
||||
break;
|
||||
case 'W':
|
||||
case 'P':
|
||||
@@ -443,7 +429,7 @@ public:
|
||||
if (idxfile)
|
||||
fclose(idxfile);
|
||||
}
|
||||
bool load(const std::string &url, gulong wc, gulong fsize, bool verbose) override;
|
||||
bool load(const std::string &url, gulong wc, off_t fsize, bool verbose) override;
|
||||
const gchar *get_key(glong idx) override;
|
||||
void get_data(glong idx) override { get_key(idx); }
|
||||
const gchar *get_key_and_data(glong idx) override
|
||||
@@ -503,7 +489,7 @@ public:
|
||||
{
|
||||
}
|
||||
~WordListIndex() { g_free(idxdatabuf); }
|
||||
bool load(const std::string &url, gulong wc, gulong fsize, bool verbose) override;
|
||||
bool load(const std::string &url, gulong wc, off_t fsize, bool verbose) override;
|
||||
const gchar *get_key(glong idx) override { return wordlist[idx]; }
|
||||
void get_data(glong idx) override;
|
||||
const gchar *get_key_and_data(glong idx) override
|
||||
@@ -542,7 +528,7 @@ inline const gchar *OffsetIndex::read_first_on_page_key(glong page_idx)
|
||||
std::min(sizeof(wordentry_buf), static_cast<size_t>(page_size)),
|
||||
1, idxfile);
|
||||
THROW_IF_ERROR(nitems == 1);
|
||||
//TODO: check returned values, deal with word entry that strlen>255.
|
||||
// TODO: check returned values, deal with word entry that strlen>255.
|
||||
return wordentry_buf;
|
||||
}
|
||||
|
||||
@@ -629,12 +615,12 @@ bool OffsetIndex::save_cache(const std::string &url, bool verbose)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OffsetIndex::load(const std::string &url, gulong wc, gulong fsize, bool verbose)
|
||||
bool OffsetIndex::load(const std::string &url, gulong wc, off_t fsize, bool verbose)
|
||||
{
|
||||
wordcount = wc;
|
||||
gulong npages = (wc - 1) / ENTR_PER_PAGE + 2;
|
||||
wordoffset.resize(npages);
|
||||
if (!load_cache(url)) { //map file will close after finish of block
|
||||
if (!load_cache(url)) { // map file will close after finish of block
|
||||
MapFile map_file;
|
||||
if (!map_file.open(url.c_str(), fsize))
|
||||
return false;
|
||||
@@ -756,10 +742,10 @@ bool OffsetIndex::lookup(const char *str, std::set<glong> &idxs, glong &next_idx
|
||||
}
|
||||
|
||||
if (!bFound)
|
||||
next_idx = iPage*ENTR_PER_PAGE + iFrom; // next
|
||||
next_idx = iPage * ENTR_PER_PAGE + iFrom; // next
|
||||
else {
|
||||
// Convert the found in-page index to the dict index.
|
||||
iThisIndex = iPage*ENTR_PER_PAGE + iThisIndex;
|
||||
iThisIndex = iPage * ENTR_PER_PAGE + iThisIndex;
|
||||
// In order to return all idxs that match the search string, walk
|
||||
// linearly behind and ahead of the found index.
|
||||
glong iHeadIndex = iThisIndex - 1; // do not include iThisIndex
|
||||
@@ -772,7 +758,7 @@ bool OffsetIndex::lookup(const char *str, std::set<glong> &idxs, glong &next_idx
|
||||
return bFound;
|
||||
}
|
||||
|
||||
bool WordListIndex::load(const std::string &url, gulong wc, gulong fsize, bool)
|
||||
bool WordListIndex::load(const std::string &url, gulong wc, off_t fsize, bool)
|
||||
{
|
||||
gzFile in = gzopen(url.c_str(), "rb");
|
||||
if (in == nullptr)
|
||||
@@ -785,7 +771,7 @@ bool WordListIndex::load(const std::string &url, gulong wc, gulong fsize, bool)
|
||||
if (len < 0)
|
||||
return false;
|
||||
|
||||
if (gulong(len) != fsize)
|
||||
if (static_cast<off_t>(len) != fsize)
|
||||
return false;
|
||||
|
||||
wordlist.resize(wc + 1);
|
||||
@@ -834,7 +820,7 @@ bool WordListIndex::lookup(const char *str, std::set<glong> &idxs, glong &next_i
|
||||
}
|
||||
}
|
||||
if (!bFound)
|
||||
next_idx = iFrom; //next
|
||||
next_idx = iFrom; // next
|
||||
else {
|
||||
// In order to return all idxs that match the search string, walk
|
||||
// linearly behind and ahead of the found index.
|
||||
@@ -905,19 +891,19 @@ bool SynFile::lookup(const char *str, std::set<glong> &idxs, glong &next_idx)
|
||||
}
|
||||
}
|
||||
if (!bFound)
|
||||
next_idx = iFrom; //next
|
||||
next_idx = iFrom; // next
|
||||
else {
|
||||
// In order to return all idxs that match the search string, walk
|
||||
// linearly behind and ahead of the found index.
|
||||
glong iHeadIndex = iThisIndex - 1; // do not include iThisIndex
|
||||
while (iHeadIndex >= 0 && stardict_strcmp(str, get_key(iHeadIndex)) == 0) {
|
||||
const gchar *key = get_key(iHeadIndex--);
|
||||
idxs.insert(g_ntohl(get_uint32(key+strlen(key)+1)));
|
||||
idxs.insert(g_ntohl(get_uint32(key + strlen(key) + 1)));
|
||||
}
|
||||
do {
|
||||
// no need to double-check iThisIndex -- we know it's a match already
|
||||
const gchar *key = get_key(iThisIndex++);
|
||||
idxs.insert(g_ntohl(get_uint32(key+strlen(key)+1)));
|
||||
idxs.insert(g_ntohl(get_uint32(key + strlen(key) + 1)));
|
||||
} while (iThisIndex <= iLast && stardict_strcmp(str, get_key(iThisIndex)) == 0);
|
||||
}
|
||||
}
|
||||
@@ -934,7 +920,7 @@ bool Dict::Lookup(const char *str, std::set<glong> &idxs, glong &next_idx)
|
||||
|
||||
bool Dict::load(const std::string &ifofilename, bool verbose)
|
||||
{
|
||||
gulong idxfilesize;
|
||||
off_t idxfilesize;
|
||||
if (!load_ifofile(ifofilename, idxfilesize))
|
||||
return false;
|
||||
|
||||
@@ -944,14 +930,14 @@ bool Dict::load(const std::string &ifofilename, bool verbose)
|
||||
if (g_file_test(fullfilename.c_str(), G_FILE_TEST_EXISTS)) {
|
||||
dictdzfile.reset(new DictData);
|
||||
if (!dictdzfile->open(fullfilename, 0)) {
|
||||
//g_print("open file %s failed!\n",fullfilename);
|
||||
// g_print("open file %s failed!\n",fullfilename);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
fullfilename.erase(fullfilename.length() - sizeof(".dz") + 1, sizeof(".dz") - 1);
|
||||
dictfile = fopen(fullfilename.c_str(), "rb");
|
||||
if (!dictfile) {
|
||||
//g_print("open file %s failed!\n",fullfilename);
|
||||
// g_print("open file %s failed!\n",fullfilename);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -974,11 +960,11 @@ bool Dict::load(const std::string &ifofilename, bool verbose)
|
||||
syn_file.reset(new SynFile);
|
||||
syn_file->load(fullfilename, syn_wordcount);
|
||||
|
||||
//g_print("bookname: %s , wordcount %lu\n", bookname.c_str(), narticles());
|
||||
// g_print("bookname: %s , wordcount %lu\n", bookname.c_str(), narticles());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Dict::load_ifofile(const std::string &ifofilename, gulong &idxfilesize)
|
||||
bool Dict::load_ifofile(const std::string &ifofilename, off_t &idxfilesize)
|
||||
{
|
||||
DictInfo dict_info;
|
||||
if (!dict_info.load_from_ifo_file(ifofilename, false))
|
||||
@@ -1003,7 +989,7 @@ bool Dict::LookupWithRule(GPatternSpec *pspec, glong *aIndex, int iBuffLen)
|
||||
int iIndexCount = 0;
|
||||
|
||||
for (guint32 i = 0; i < narticles() && iIndexCount < (iBuffLen - 1); i++)
|
||||
if (g_pattern_match_string(pspec, get_key(i)))
|
||||
if (g_pattern_spec_match_string(pspec, get_key(i)))
|
||||
aIndex[iIndexCount++] = i;
|
||||
|
||||
aIndex[iIndexCount] = -1; // -1 is the end.
|
||||
@@ -1061,12 +1047,12 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
}
|
||||
// Upper the first character and lower others.
|
||||
if (!bFound) {
|
||||
gchar *nextchar = g_utf8_next_char(sWord);
|
||||
gchar *firstchar = g_utf8_strup(sWord, nextchar - sWord);
|
||||
nextchar = g_utf8_strdown(nextchar, -1);
|
||||
casestr = g_strdup_printf("%s%s", firstchar, nextchar);
|
||||
const gchar *rest = g_utf8_next_char(sWord);
|
||||
gchar *firstchar = g_utf8_strup(sWord, rest - sWord);
|
||||
gchar *rest_lowercase = g_utf8_strdown(rest, -1);
|
||||
casestr = g_strconcat(firstchar, rest_lowercase, nullptr);
|
||||
g_free(rest_lowercase);
|
||||
g_free(firstchar);
|
||||
g_free(nextchar);
|
||||
if (strcmp(casestr, sWord)) {
|
||||
if (oLib[iLib]->Lookup(casestr, iWordIndices))
|
||||
bFound = true;
|
||||
@@ -1082,7 +1068,7 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
|
||||
gchar *sNewWord = (gchar *)g_malloc(iWordLen + 1);
|
||||
|
||||
//cut one char "s" or "d"
|
||||
// cut one char "s" or "d"
|
||||
if (!bFound && iWordLen > 1) {
|
||||
isupcase = sWord[iWordLen - 1] == 'S' || !strncmp(&sWord[iWordLen - 2], "ED", 2);
|
||||
if (isupcase || sWord[iWordLen - 1] == 's' || !strncmp(&sWord[iWordLen - 2], "ed", 2)) {
|
||||
@@ -1101,14 +1087,14 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
}
|
||||
}
|
||||
|
||||
//cut "ly"
|
||||
// cut "ly"
|
||||
if (!bFound && iWordLen > 2) {
|
||||
isupcase = !strncmp(&sWord[iWordLen - 2], "LY", 2);
|
||||
if (isupcase || (!strncmp(&sWord[iWordLen - 2], "ly", 2))) {
|
||||
strcpy(sNewWord, sWord);
|
||||
sNewWord[iWordLen - 2] = '\0'; // cut "ly"
|
||||
if (iWordLen > 5 && sNewWord[iWordLen - 3] == sNewWord[iWordLen - 4]
|
||||
&& !bIsVowel(sNewWord[iWordLen - 4]) && bIsVowel(sNewWord[iWordLen - 5])) { //doubled
|
||||
&& !bIsVowel(sNewWord[iWordLen - 4]) && bIsVowel(sNewWord[iWordLen - 5])) { // doubled
|
||||
|
||||
sNewWord[iWordLen - 3] = '\0';
|
||||
if (oLib[iLib]->Lookup(sNewWord, iWordIndices))
|
||||
@@ -1123,7 +1109,7 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
g_free(casestr);
|
||||
}
|
||||
if (!bFound)
|
||||
sNewWord[iWordLen - 3] = sNewWord[iWordLen - 4]; //restore
|
||||
sNewWord[iWordLen - 3] = sNewWord[iWordLen - 4]; // restore
|
||||
}
|
||||
}
|
||||
if (!bFound) {
|
||||
@@ -1141,14 +1127,14 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
}
|
||||
}
|
||||
|
||||
//cut "ing"
|
||||
// cut "ing"
|
||||
if (!bFound && iWordLen > 3) {
|
||||
isupcase = !strncmp(&sWord[iWordLen - 3], "ING", 3);
|
||||
if (isupcase || !strncmp(&sWord[iWordLen - 3], "ing", 3)) {
|
||||
strcpy(sNewWord, sWord);
|
||||
sNewWord[iWordLen - 3] = '\0';
|
||||
if (iWordLen > 6 && (sNewWord[iWordLen - 4] == sNewWord[iWordLen - 5])
|
||||
&& !bIsVowel(sNewWord[iWordLen - 5]) && bIsVowel(sNewWord[iWordLen - 6])) { //doubled
|
||||
&& !bIsVowel(sNewWord[iWordLen - 5]) && bIsVowel(sNewWord[iWordLen - 6])) { // doubled
|
||||
sNewWord[iWordLen - 4] = '\0';
|
||||
if (oLib[iLib]->Lookup(sNewWord, iWordIndices))
|
||||
bFound = true;
|
||||
@@ -1162,7 +1148,7 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
g_free(casestr);
|
||||
}
|
||||
if (!bFound)
|
||||
sNewWord[iWordLen - 4] = sNewWord[iWordLen - 5]; //restore
|
||||
sNewWord[iWordLen - 4] = sNewWord[iWordLen - 5]; // restore
|
||||
}
|
||||
}
|
||||
if (!bFound) {
|
||||
@@ -1196,7 +1182,7 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
}
|
||||
}
|
||||
|
||||
//cut two char "es"
|
||||
// cut two char "es"
|
||||
if (!bFound && iWordLen > 3) {
|
||||
isupcase = (!strncmp(&sWord[iWordLen - 2], "ES", 2) && (sWord[iWordLen - 3] == 'S' || sWord[iWordLen - 3] == 'X' || sWord[iWordLen - 3] == 'O' || (iWordLen > 4 && sWord[iWordLen - 3] == 'H' && (sWord[iWordLen - 4] == 'C' || sWord[iWordLen - 4] == 'S'))));
|
||||
if (isupcase || (!strncmp(&sWord[iWordLen - 2], "es", 2) && (sWord[iWordLen - 3] == 's' || sWord[iWordLen - 3] == 'x' || sWord[iWordLen - 3] == 'o' || (iWordLen > 4 && sWord[iWordLen - 3] == 'h' && (sWord[iWordLen - 4] == 'c' || sWord[iWordLen - 4] == 's'))))) {
|
||||
@@ -1215,14 +1201,14 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
}
|
||||
}
|
||||
|
||||
//cut "ed"
|
||||
// cut "ed"
|
||||
if (!bFound && iWordLen > 3) {
|
||||
isupcase = !strncmp(&sWord[iWordLen - 2], "ED", 2);
|
||||
if (isupcase || !strncmp(&sWord[iWordLen - 2], "ed", 2)) {
|
||||
strcpy(sNewWord, sWord);
|
||||
sNewWord[iWordLen - 2] = '\0';
|
||||
if (iWordLen > 5 && (sNewWord[iWordLen - 3] == sNewWord[iWordLen - 4])
|
||||
&& !bIsVowel(sNewWord[iWordLen - 4]) && bIsVowel(sNewWord[iWordLen - 5])) { //doubled
|
||||
&& !bIsVowel(sNewWord[iWordLen - 4]) && bIsVowel(sNewWord[iWordLen - 5])) { // doubled
|
||||
sNewWord[iWordLen - 3] = '\0';
|
||||
if (oLib[iLib]->Lookup(sNewWord, iWordIndices))
|
||||
bFound = true;
|
||||
@@ -1236,7 +1222,7 @@ bool Libs::LookupSimilarWord(const gchar *sWord, std::set<glong> &iWordIndices,
|
||||
g_free(casestr);
|
||||
}
|
||||
if (!bFound)
|
||||
sNewWord[iWordLen - 3] = sNewWord[iWordLen - 4]; //restore
|
||||
sNewWord[iWordLen - 3] = sNewWord[iWordLen - 4]; // restore
|
||||
}
|
||||
}
|
||||
if (!bFound) {
|
||||
@@ -1386,8 +1372,8 @@ bool Libs::LookupWithFuzzy(const gchar *sWord, gchar *reslist[], gint reslist_si
|
||||
if (progress_func)
|
||||
progress_func();
|
||||
|
||||
//if (stardict_strcmp(sWord, poGetWord(0,iLib))>=0 && stardict_strcmp(sWord, poGetWord(narticles(iLib)-1,iLib))<=0) {
|
||||
//there are Chinese dicts and English dicts...
|
||||
// if (stardict_strcmp(sWord, poGetWord(0,iLib))>=0 && stardict_strcmp(sWord, poGetWord(narticles(iLib)-1,iLib))<=0) {
|
||||
// there are Chinese dicts and English dicts...
|
||||
|
||||
const int iwords = narticles(iLib);
|
||||
for (int index = 0; index < iwords; index++) {
|
||||
@@ -1409,11 +1395,11 @@ bool Libs::LookupWithFuzzy(const gchar *sWord, gchar *reslist[], gint reslist_si
|
||||
bool bAlreadyInList = false;
|
||||
int iMaxDistanceAt = 0;
|
||||
for (int j = 0; j < reslist_size; j++) {
|
||||
if (oFuzzystruct[j].pMatchWord && strcmp(oFuzzystruct[j].pMatchWord, sCheck) == 0) { //already in list
|
||||
if (oFuzzystruct[j].pMatchWord && strcmp(oFuzzystruct[j].pMatchWord, sCheck) == 0) { // already in list
|
||||
bAlreadyInList = true;
|
||||
break;
|
||||
}
|
||||
//find the position,it will certainly be found (include the first time) as iMaxDistance is set by last time.
|
||||
// find the position,it will certainly be found (include the first time) as iMaxDistance is set by last time.
|
||||
if (oFuzzystruct[j].iMatchWordDistance == iMaxDistance) {
|
||||
iMaxDistanceAt = j;
|
||||
}
|
||||
@@ -1460,8 +1446,8 @@ gint Libs::LookupWithRule(const gchar *word, gchar **ppMatchWord)
|
||||
GPatternSpec *pspec = g_pattern_spec_new(word);
|
||||
|
||||
for (std::vector<Dict *>::size_type iLib = 0; iLib < oLib.size(); iLib++) {
|
||||
//if(oLibs.LookdupWordsWithRule(pspec,aiIndex,MAX_MATCH_ITEM_PER_LIB+1-iMatchCount,iLib))
|
||||
// -iMatchCount,so save time,but may got less result and the word may repeat.
|
||||
// if(oLibs.LookdupWordsWithRule(pspec,aiIndex,MAX_MATCH_ITEM_PER_LIB+1-iMatchCount,iLib))
|
||||
// -iMatchCount,so save time,but may got less result and the word may repeat.
|
||||
|
||||
if (oLib[iLib]->LookupWithRule(pspec, aiIndex, MAX_MATCH_ITEM_PER_LIB + 1)) {
|
||||
if (progress_func)
|
||||
@@ -1470,7 +1456,7 @@ gint Libs::LookupWithRule(const gchar *word, gchar **ppMatchWord)
|
||||
const gchar *sMatchWord = poGetWord(aiIndex[i], iLib);
|
||||
bool bAlreadyInList = false;
|
||||
for (int j = 0; j < iMatchCount; j++) {
|
||||
if (strcmp(ppMatchWord[j], sMatchWord) == 0) { //already in list
|
||||
if (strcmp(ppMatchWord[j], sMatchWord) == 0) { // already in list
|
||||
bAlreadyInList = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <string>
|
||||
@@ -30,7 +28,7 @@ inline void set_uint32(gchar *addr, guint32 val)
|
||||
struct cacheItem {
|
||||
guint32 offset;
|
||||
gchar *data;
|
||||
//write code here to make it inline
|
||||
// write code here to make it inline
|
||||
cacheItem() { data = nullptr; }
|
||||
~cacheItem() { g_free(data); }
|
||||
};
|
||||
@@ -68,7 +66,7 @@ private:
|
||||
gint cache_cur = 0;
|
||||
};
|
||||
|
||||
//this structure contain all information about dictionary
|
||||
// this structure contain all information about dictionary
|
||||
struct DictInfo {
|
||||
std::string ifo_file_name;
|
||||
guint32 wordcount;
|
||||
@@ -79,8 +77,8 @@ struct DictInfo {
|
||||
std::string website;
|
||||
std::string date;
|
||||
std::string description;
|
||||
guint32 index_file_size;
|
||||
guint32 syn_file_size;
|
||||
off_t index_file_size;
|
||||
off_t syn_file_size;
|
||||
std::string sametypesequence;
|
||||
|
||||
bool load_from_ifo_file(const std::string &ifofilename, bool istreedict);
|
||||
@@ -93,12 +91,13 @@ public:
|
||||
guint32 wordentry_size;
|
||||
|
||||
virtual ~IIndexFile() {}
|
||||
virtual bool load(const std::string &url, gulong wc, gulong fsize, bool verbose) = 0;
|
||||
virtual bool load(const std::string &url, gulong wc, off_t fsize, bool verbose) = 0;
|
||||
virtual const gchar *get_key(glong idx) = 0;
|
||||
virtual void get_data(glong idx) = 0;
|
||||
virtual const gchar *get_key_and_data(glong idx) = 0;
|
||||
virtual bool lookup(const char *str, std::set<glong> &idxs, glong &next_idx) = 0;
|
||||
virtual bool lookup(const char *str, std::set<glong> &idxs) {
|
||||
virtual bool lookup(const char *str, std::set<glong> &idxs)
|
||||
{
|
||||
glong unused_next_idx;
|
||||
return lookup(str, idxs, unused_next_idx);
|
||||
};
|
||||
@@ -144,7 +143,8 @@ public:
|
||||
*size = idx_file->wordentry_size;
|
||||
}
|
||||
bool Lookup(const char *str, std::set<glong> &idxs, glong &next_idx);
|
||||
bool Lookup(const char *str, std::set<glong> &idxs) {
|
||||
bool Lookup(const char *str, std::set<glong> &idxs)
|
||||
{
|
||||
glong unused_next_idx;
|
||||
return Lookup(str, idxs, unused_next_idx);
|
||||
}
|
||||
@@ -160,7 +160,7 @@ private:
|
||||
std::unique_ptr<IIndexFile> idx_file;
|
||||
std::unique_ptr<SynFile> syn_file;
|
||||
|
||||
bool load_ifofile(const std::string &ifofilename, gulong &idxfilesize);
|
||||
bool load_ifofile(const std::string &ifofilename, off_t &idxfilesize);
|
||||
};
|
||||
|
||||
class Libs
|
||||
@@ -169,7 +169,7 @@ public:
|
||||
Libs(std::function<void(void)> f = std::function<void(void)>())
|
||||
{
|
||||
progress_func = f;
|
||||
iMaxFuzzyDistance = MAX_FUZZY_DISTANCE; //need to read from cfg.
|
||||
iMaxFuzzyDistance = MAX_FUZZY_DISTANCE; // need to read from cfg.
|
||||
}
|
||||
void setVerbose(bool verbose) { verbose_ = verbose; }
|
||||
void setFuzzy(bool fuzzy) { fuzzy_ = fuzzy; }
|
||||
|
||||
0
tests/not-unix-newlines-ifo/russian/russian.dict
Normal file
0
tests/not-unix-newlines-ifo/russian/russian.dict
Normal file
0
tests/not-unix-newlines-ifo/russian/russian.idx
Normal file
0
tests/not-unix-newlines-ifo/russian/russian.idx
Normal file
9
tests/not-unix-newlines-ifo/russian/russian.ifo
Normal file
9
tests/not-unix-newlines-ifo/russian/russian.ifo
Normal file
@@ -0,0 +1,9 @@
|
||||
StarDict's dict ifo file
|
||||
version=3.0.0
|
||||
bookname=Russian-English Dictionary (ru-en)
|
||||
wordcount=415144
|
||||
idxfilesize=12344255
|
||||
sametypesequence=h
|
||||
synwordcount=1277580
|
||||
author=Vuizur
|
||||
description=
|
||||
0
tests/not-unix-newlines-ifo/russian/russian.syn
Normal file
0
tests/not-unix-newlines-ifo/russian/russian.syn
Normal file
@@ -18,7 +18,8 @@ test_json() {
|
||||
fi
|
||||
}
|
||||
|
||||
test_json '[{"name": "Test synonyms", "wordcount": "2"},
|
||||
test_json '[{"name": "Russian-English Dictionary (ru-en)", "wordcount": "415144"},
|
||||
{"name": "Test synonyms", "wordcount": "2"},
|
||||
{"name": "Test multiple results", "wordcount": "246"},
|
||||
{"name": "Sample 1 test dictionary", "wordcount": "1"},
|
||||
{"name": "test_dict", "wordcount": "1"}]' -x -j -l -n --data-dir "$TEST_DIR"
|
||||
|
||||
18
tests/t_newlines_in_ifo
Executable file
18
tests/t_newlines_in_ifo
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
PATH_TO_SDCV="$1"
|
||||
TEST_DIR="$2"
|
||||
|
||||
unset SDCV_PAGER
|
||||
unset STARDICT_DATA_DIR
|
||||
|
||||
RES=$("$PATH_TO_SDCV" -n -x --data-dir="$TEST_DIR/not-unix-newlines-ifo" -l | tail -n 1)
|
||||
|
||||
if [ "$RES" = "Russian-English Dictionary (ru-en) 415144" ]; then
|
||||
exit 0
|
||||
else
|
||||
echo "test failed, unexpected result: $RES" >&2
|
||||
exit 1
|
||||
fi
|
||||
Reference in New Issue
Block a user