mirror of
https://github.com/cldellow/sqlite-parquet-vtable.git
synced 2025-09-14 22:39:59 +00:00
add string == row group filter
For the statscan census set filtering on `== 'Dawson Creek'`, the query goes from 980ms to 660ms. This is expected, since the data isn't sorted by that column. I'll try adding some scaffolding to do filtering at the row level, too. We could also try unpacking the dictionary and testing the individual values, although we may want some heuristics to decide whether it's worth doing -- eg if < 10% of the rows have a unique value. Ideally, this should be like a ~1ms query.
This commit is contained in:
@@ -38,16 +38,21 @@ bool ParquetCursor::currentRowGroupSatisfiesTextFilter(Constraint constraint, st
|
||||
return true;
|
||||
}
|
||||
|
||||
if(constraint.getType() != Text) {
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string str = constraint.getString();
|
||||
parquet::ByteArray min = stats->min();
|
||||
parquet::ByteArray max = stats->max();
|
||||
std::string minStr((const char*)min.ptr, min.len);
|
||||
std::string maxStr((const char*)max.ptr, max.len);
|
||||
printf("min=%s [%d], max=%s [%d]\n", minStr.data(), min.len, maxStr.data(), max.len);
|
||||
// printf("min=%s [%d], max=%s [%d], target=%s\n", minStr.data(), min.len, maxStr.data(), max.len, str.data());
|
||||
|
||||
switch(constraint.getOperator()) {
|
||||
case Is:
|
||||
case Equal:
|
||||
|
||||
return str >= minStr && str <= maxStr;
|
||||
case GreaterThan:
|
||||
case GreaterThanOrEqual:
|
||||
case LessThan:
|
||||
@@ -77,7 +82,6 @@ bool ParquetCursor::currentRowGroupSatisfiesDoubleFilter(Constraint constraint,
|
||||
// data, which provides substantial performance benefits.
|
||||
bool ParquetCursor::currentRowGroupSatisfiesFilter() {
|
||||
for(unsigned int i = 0; i < constraints.size(); i++) {
|
||||
ValueType type = constraints[i].getType();
|
||||
int column = constraints[i].getColumn();
|
||||
int op = constraints[i].getOperator();
|
||||
bool rv = true;
|
||||
|
Reference in New Issue
Block a user