Commit 17ec3d5e authored by groot's avatar groot
Browse files

#1609 Refine Compact function



Signed-off-by: default avatargroot <yihua.mo@zilliz.com>
parent 4825f072
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@ Please mark all change in change log and use the issue from GitHub
-   \#1590 Server down caused by failure to write file during concurrent mixed operations
-   \#1598 Server down during mixed operations
-   \#1601 External link bug in HTTP doc
-   \#1609 Refine Compact function

## Feature
-   \#216 Add CLI to get server info
+17 −49
Original line number Diff line number Diff line
@@ -671,26 +671,6 @@ DBImpl::Compact(const std::string& table_id) {

    ENGINE_LOG_DEBUG << "Compacting table: " << table_id;

    /*
        // Save table index
        TableIndex table_index;
        status = DescribeIndex(table_id, table_index);
        if (!status.ok()) {
            return status;
        }

        // Drop all index
        status = DropIndex(table_id);
        if (!status.ok()) {
            return status;
        }

        // Then update table index to the previous index
        status = UpdateTableIndexRecursively(table_id, table_index);
        if (!status.ok()) {
            return status;
        }
    */
    // Get files to compact from meta.
    std::vector<int> file_types{meta::TableFileSchema::FILE_TYPE::RAW, meta::TableFileSchema::FILE_TYPE::TO_INDEX,
                                meta::TableFileSchema::FILE_TYPE::BACKUP};
@@ -706,7 +686,6 @@ DBImpl::Compact(const std::string& table_id) {

    OngoingFileChecker::GetInstance().MarkOngoingFiles(files_to_compact);

    meta::TableFilesSchema files_to_update;
    Status compact_status;
    for (auto& file : files_to_compact) {
        // Check if the segment needs compacting
@@ -719,52 +698,41 @@ DBImpl::Compact(const std::string& table_id) {
        if (!status.ok()) {
            std::string msg = "Failed to load deleted_docs from " + segment_dir;
            ENGINE_LOG_ERROR << msg;
            return Status(DB_ERROR, msg);
            OngoingFileChecker::GetInstance().UnmarkOngoingFile(file);
            continue;  // skip this file and try compact next one
        }

        meta::TableFilesSchema files_to_update;
        if (deleted_docs->GetSize() != 0) {
            compact_status = CompactFile(table_id, file, files_to_update);

            if (!compact_status.ok()) {
                ENGINE_LOG_ERROR << "Compact failed for segment " << file.segment_id_ << ": "
                                 << compact_status.message();
                break;
                OngoingFileChecker::GetInstance().UnmarkOngoingFile(file);
                continue;  // skip this file and try compact next one
            }
        } else {
            OngoingFileChecker::GetInstance().UnmarkOngoingFile(file);
            ENGINE_LOG_DEBUG << "Segment " << file.segment_id_ << " has no deleted data. No need to compact";
        }
    }

    if (compact_status.ok()) {
        ENGINE_LOG_DEBUG << "Finished compacting table: " << table_id;
            continue;  // skip this file and try compact next one
        }

        ENGINE_LOG_DEBUG << "Updating meta after compaction...";

    /*
    // Drop index again, in case some files were in the index building process during compacting
    status = DropIndex(table_id);
    if (!status.ok()) {
        return status;
    }

    // Update index
    status = UpdateTableIndexRecursively(table_id, table_index);
    if (!status.ok()) {
        return status;
    }
     */

        status = meta_ptr_->UpdateTableFiles(files_to_update);
        if (!status.ok()) {
        return status;
            compact_status = status;
            break;  // meta error, could not go on
        }
    }

    OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_compact);

    ENGINE_LOG_DEBUG << "Finished updating meta after compaction";
    if (compact_status.ok()) {
        ENGINE_LOG_DEBUG << "Finished compacting table: " << table_id;
    }

    return status;
    return compact_status;
}

Status