Commit 8574b8b9 authored by Yukikaze-CZR's avatar Yukikaze-CZR Committed by Jin Hai
Browse files

add code coverage rate and fix #670 (#693)

parent d82c7da6
Loading
Loading
Loading
Loading
+0 −56
Original line number Diff line number Diff line
@@ -295,61 +295,5 @@ FaissIVFQuantizer::~FaissIVFQuantizer() {
    // else do nothing
}

#else

QuantizerPtr
IVFSQHybrid::LoadQuantizer(const Config& conf) {
    return knowhere::QuantizerPtr();
}

void
IVFSQHybrid::SetQuantizer(const QuantizerPtr& q) {
}

void
IVFSQHybrid::UnsetQuantizer() {
}

VectorIndexPtr
IVFSQHybrid::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) {
    return nullptr;
}

std::pair<VectorIndexPtr, QuantizerPtr>
IVFSQHybrid::CopyCpuToGpuWithQuantizer(const int64_t& device_id, const Config& config) {
    KNOWHERE_THROW_MSG("Not yet implemented");
}

IndexModelPtr
IVFSQHybrid::Train(const DatasetPtr& dataset, const Config& config) {
    return GPUIVFSQ::Train(dataset, config);
}

VectorIndexPtr
IVFSQHybrid::CopyGpuToCpu(const Config& config) {
    return GPUIVFSQ::CopyGpuToCpu(config);
}

VectorIndexPtr
IVFSQHybrid::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
    return IVF::CopyCpuToGpu(device_id, config);
}

void
IVFSQHybrid::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels,
                         const Config& cfg) {
    GPUIVF::search_impl(n, data, k, distances, labels, cfg);
}

void
IVFSQHybrid::LoadImpl(const BinarySet& index_binary) {
    GPUIVF::LoadImpl(index_binary);
}

void
IVFSQHybrid::set_index_model(IndexModelPtr model) {
    GPUIVF::set_index_model(model);
}

#endif
}  // namespace knowhere
+1 −1
Original line number Diff line number Diff line
@@ -36,7 +36,6 @@ struct FaissIVFQuantizer : public Quantizer {
    ~FaissIVFQuantizer() override;
};
using FaissIVFQuantizerPtr = std::shared_ptr<FaissIVFQuantizer>;
#endif

class IVFSQHybrid : public GPUIVFSQ {
 public:
@@ -93,5 +92,6 @@ class IVFSQHybrid : public GPUIVFSQ {
    int64_t gpu_mode = 0;  // 0,1,2
    int64_t quantizer_gpu_id_ = -1;
};
#endif

}  // namespace knowhere
+2 −0
Original line number Diff line number Diff line
@@ -38,9 +38,11 @@ CopyGpuToCpu(const VectorIndexPtr& index, const Config& config) {

VectorIndexPtr
CopyCpuToGpu(const VectorIndexPtr& index, const int64_t& device_id, const Config& config) {
#ifdef CUSTOMIZATION
    if (auto device_index = std::dynamic_pointer_cast<IVFSQHybrid>(index)) {
        return device_index->CopyCpuToGpu(device_id, config);
    }
#endif

    if (auto device_index = std::dynamic_pointer_cast<GPUIndex>(index)) {
        return device_index->CopyGpuToGpu(device_id, config);
+2 −0
Original line number Diff line number Diff line
@@ -53,8 +53,10 @@ IndexFactory(const std::string& type) {
        return std::make_shared<knowhere::GPUIVFPQ>(DEVICEID);
    } else if (type == "GPUIVFSQ") {
        return std::make_shared<knowhere::GPUIVFSQ>(DEVICEID);
#ifdef CUSTOMIZATION
    } else if (type == "IVFSQHybrid") {
        return std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
#endif
#endif
    }
}
+2 −0
Original line number Diff line number Diff line
@@ -73,6 +73,7 @@ IVFMixIndex::Load(const knowhere::BinarySet& index_binary) {
    return Status::OK();
}

#ifdef CUSTOMIZATION
knowhere::QuantizerPtr
IVFHybridIndex::LoadQuantizer(const Config& conf) {
    // TODO(linxj): Hardcode here
@@ -158,6 +159,7 @@ IVFHybridIndex::CopyToGpuWithQuantizer(const int64_t& device_id, const Config& c
    }
    return std::make_pair(nullptr, nullptr);
}
#endif

}  // namespace engine
}  // namespace milvus
Loading