|
a/src/index/indexer.cpp |
|
b/src/index/indexer.cpp |
|
... |
|
... |
147 |
ifiles = myfiles;
|
147 |
ifiles = myfiles;
|
148 |
clearMimeHandlerCache();
|
148 |
clearMimeHandlerCache();
|
149 |
return ret;
|
149 |
return ret;
|
150 |
}
|
150 |
}
|
151 |
|
151 |
|
|
|
152 |
// Update index for specific documents. The docs come from an index
|
|
|
153 |
// query, so the udi, backend etc. fields are filled.
|
|
|
154 |
bool ConfIndexer::updateDocs(std::vector<Rcl::Doc> &docs, IxFlag flag)
|
|
|
155 |
{
|
|
|
156 |
list<string> files;
|
|
|
157 |
for (vector<Rcl::Doc>::iterator it = docs.begin(); it != docs.end(); it++) {
|
|
|
158 |
Rcl::Doc &idoc = *it;
|
|
|
159 |
string backend;
|
|
|
160 |
idoc.getmeta(Rcl::Doc::keybcknd, &backend);
|
|
|
161 |
|
|
|
162 |
// This only makes sense for file system files: beagle docs are
|
|
|
163 |
// always up to date because they can't be updated in the cache,
|
|
|
164 |
// only added/removed. Same remark as made inside internfile, we
|
|
|
165 |
// need a generic way to handle backends.
|
|
|
166 |
if (!backend.empty() && backend.compare("FS"))
|
|
|
167 |
continue;
|
|
|
168 |
|
|
|
169 |
// Filesystem document. Intern from file.
|
|
|
170 |
// The url has to be like file://
|
|
|
171 |
if (idoc.url.find(cstr_fileu) != 0) {
|
|
|
172 |
LOGERR(("idx::updateDocs: FS backend and non fs url: [%s]\n",
|
|
|
173 |
idoc.url.c_str()));
|
|
|
174 |
continue;
|
|
|
175 |
}
|
|
|
176 |
files.push_back(idoc.url.substr(7, string::npos));
|
|
|
177 |
}
|
|
|
178 |
if (!files.empty()) {
|
|
|
179 |
return indexFiles(files, flag);
|
|
|
180 |
}
|
|
|
181 |
return true;
|
|
|
182 |
}
|
|
|
183 |
|
152 |
bool ConfIndexer::purgeFiles(std::list<string> &files)
|
184 |
bool ConfIndexer::purgeFiles(std::list<string> &files)
|
153 |
{
|
185 |
{
|
154 |
list<string> myfiles;
|
186 |
list<string> myfiles;
|
155 |
for (list<string>::const_iterator it = files.begin();
|
187 |
for (list<string>::const_iterator it = files.begin();
|
156 |
it != files.end(); it++) {
|
188 |
it != files.end(); it++) {
|