Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PHOENIX-7107 Add support for indexing on SYSTEM.CATALOG table #2048

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,8 @@ public enum ExpressionType {
BsonValueFunction(BsonValueFunction.class),
PartitionIdFunction(PartitionIdFunction.class),
DecodeBinaryFunction(DecodeBinaryFunction.class),
EncodeBinaryFunction(EncodeBinaryFunction.class);
EncodeBinaryFunction(EncodeBinaryFunction.class),
DecodeViewIdFunction(DecodeViewIndexIdFunction.class);

ExpressionType(Class<? extends Expression> clazz) {
this.clazz = clazz;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.expression.function;

import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.expression.Determinism;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.KeyValueColumnExpression;
import org.apache.phoenix.parse.FunctionParseNode;
import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
import org.apache.phoenix.parse.DecodeViewIndexIdParseNode;
import org.apache.phoenix.parse.PhoenixRowTimestampParseNode;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.schema.types.PSmallint;

import java.sql.Types;
import java.util.List;

import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.NULL_DATA_TYPE_VALUE;
import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN;

/**
Copy link
Contributor

@kadirozde kadirozde Jan 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Comments here are for the PHOENIX_ROW_TIMESTAMP() function. Let's update them.

* Function to return the ViewIndexId value based on the ViewIndexIDDataType field.
* Can also be used in sql predicates.
* THe ViewIndexId field value needs to be interpreted based on the type specified in the
* ViewIndexIdDataType field
This is how the various client created view index id's look like:
client VIEW_INDEX_ID(Cell number of bytes) VIEW_INDEX_ID_DATA_TYPE
pre-4.15 2 bytes NULL
post-4.15[config smallint] 2 bytes 5(smallint)
post-4.15[config bigint] 8 bytes -5(bigint)

VIEW_INDEX_ID_DATA_TYPE, VIEW_INDEX_ID(Cell representation of the data)
NULL, SMALLINT -> RETRIEVE AND CONVERT TO BIGINT
SMALLINT, SMALLINT -> RETRIEVE AND CONVERT TO BIGINT
BIGINT, BIGINT -> DO NOT CONVERT

*/
@BuiltInFunction(name = DecodeViewIndexIdFunction.NAME,
nodeClass= DecodeViewIndexIdParseNode.class,
args = {@FunctionParseNode.Argument(allowedTypes = { PLong.class}),
@FunctionParseNode.Argument(allowedTypes = { PInteger.class})
})
public class DecodeViewIndexIdFunction extends ScalarFunction {

public static final String NAME = "DECODE_VIEW_INDEX_ID";

public DecodeViewIndexIdFunction() {
}

/**
* @param children VIEW_INDEX_ID and VIEW_INDEX_ID_DATA_TYPE expressions
*/
public DecodeViewIndexIdFunction(List<Expression> children) {
super(children);

// It takes 2 parameters - VIEW_INDEX_ID, VIEW_INDEX_ID_DATA_TYPE.
if ((children.size() != 2) || !children.get(0).getClass().isAssignableFrom(
KeyValueColumnExpression.class) || !children.get(1).getClass().isAssignableFrom(
KeyValueColumnExpression.class)) {
throw new IllegalArgumentException(
"DecodeViewIndexIdFunction should only have a "
+ "VIEW_INDEX_ID and a VIEW_INDEX_ID_DATA_TYPE key value expression."
);
}
if (!(children.get(0).getDataType().equals(PLong.INSTANCE))) {
throw new IllegalArgumentException(
"DecodeViewIndexIdFunction should have an "
+ "VIEW_INDEX_ID key value expression of type PLong"
);
}

if (!(children.get(1).getDataType().equals(PInteger.INSTANCE))) {
throw new IllegalArgumentException(
"DecodeViewIndexIdFunction should have an "
+ "VIEW_INDEX_ID_DATA_TYPE key value expression of type PLong"
);
}
}

@Override
public String getName() {
return NAME;
}

@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
if (tuple == null) {
return false;
}

byte[] viewIndexIdCF = ((KeyValueColumnExpression) children.get(0)).getColumnFamily();
byte[] viewIndexIdCQ = ((KeyValueColumnExpression) children.get(0)).getColumnQualifier();
byte[] viewIndexIdTypeCF = ((KeyValueColumnExpression) children.get(1)).getColumnFamily();
byte[] viewIndexIdTypeCQ = ((KeyValueColumnExpression) children.get(1)).getColumnQualifier();

Cell viewIndexIdCell = tuple.getValue(viewIndexIdCF, viewIndexIdCQ);
Cell viewIndexIdDataTypeCell = tuple.getValue(viewIndexIdTypeCF, viewIndexIdTypeCQ);


/*
This is combination of diff client created view index looks like:
client VIEW_INDEX_ID(Cell number of bytes) VIEW_INDEX_ID_DATA_TYPE
pre-4.15 2 bytes NULL
post-4.15[config smallint] 2 bytes 5(smallint)
post-4.15[config bigint] 8 bytes -5(bigint)

VIEW_INDEX_ID_DATA_TYPE, VIEW_INDEX_ID(Cell representation of the data)
NULL, SMALLINT -> RETRIEVE AND CONVERT TO BIGINT
SMALLINT, SMALLINT -> RETRIEVE AND CONVERT TO BIGINT
BIGINT, BIGINT -> DO NOT CONVERT

*/

if (viewIndexIdCell != null) {
int type = NULL_DATA_TYPE_VALUE;
if (viewIndexIdDataTypeCell != null) {
type = (Integer) PInteger.INSTANCE.toObject(
viewIndexIdDataTypeCell.getValueArray(),
viewIndexIdDataTypeCell.getValueOffset(),
viewIndexIdDataTypeCell.getValueLength(),
PInteger.INSTANCE,
SortOrder.ASC);
}

ImmutableBytesWritable columnValue =
new ImmutableBytesWritable(CellUtil.cloneValue(viewIndexIdCell));
if ((type == NULL_DATA_TYPE_VALUE || type == Types.SMALLINT) && (viewIndexIdCell.getValueLength() <
VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN)) {
byte[] newBytes = PLong.INSTANCE.toBytes(PSmallint.INSTANCE.toObject(columnValue.get()));
ptr.set(newBytes, 0, newBytes.length);
} else {
ptr.set(columnValue.get(), columnValue.getOffset(), columnValue.getLength());
}
}
return true;
}

@Override
public PDataType getDataType() {
return PLong.INSTANCE;
}

@Override
public boolean isStateless() {
return false;
}

@Override
public Determinism getDeterminism() {
return Determinism.PER_ROW;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -1074,6 +1074,9 @@ public boolean shouldPrepareIndexMutations(Put dataRowState) {
return true;
}
List<Cell> cols = IndexUtil.readColumnsFromRow(dataRowState, getIndexWhereColumns());
if (cols.isEmpty()) {
return false;
}
// Cells should be sorted as they are searched using a binary search during expression
// evaluation
Collections.sort(cols, CellComparator.getInstance());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.ScanUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class IndexMetaDataCacheClient {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,12 @@ protected CreateTableStatement(TableName tableName, ListMultimap<String,Pair<Str
Map<String, Integer> familyCounters, boolean noVerify) {
this.tableName = tableName;
this.props = props == null ? ImmutableListMultimap.<String,Pair<String,Object>>of() : props;
this.tableType = PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(tableName.getSchemaName()) ? PTableType.SYSTEM : tableType;
this.tableType =
(PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(
tableName.getSchemaName()) &&
(tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) ?
PTableType.SYSTEM :
tableType);
this.columns = columns == null ? ImmutableList.<ColumnDef>of() : ImmutableList.<ColumnDef>copyOf(columns);
this.pkConstraint = pkConstraint == null ? PrimaryKeyConstraint.EMPTY : pkConstraint;
this.splitNodes = splitNodes == null ? Collections.<ParseNode>emptyList() : ImmutableList.copyOf(splitNodes);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.phoenix.parse;

import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.function.DecodeViewIndexIdFunction;
import org.apache.phoenix.expression.function.FunctionExpression;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.util.IndexUtil;

import java.sql.SQLException;
import java.util.List;

import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE;

public class DecodeViewIndexIdParseNode extends FunctionParseNode {

DecodeViewIndexIdParseNode(String name, List<ParseNode> children,
BuiltInFunctionInfo info) {
super(name, children, info);
// It takes 2 parameters - VIEW_INDEX_ID, VIEW_INDEX_ID_DATA_TYPE.
if (children.size() != 2) {
throw new IllegalArgumentException(
"DecodeViewIndexIdParseNode should only have "
+ "VIEW_INDEX_ID and VIEW_INDEX_ID_DATA_TYPE parse nodes."
);
}
if (children.get(0).getClass().isAssignableFrom(ColumnParseNode.class)
&& children.get(1).getClass().isAssignableFrom(ColumnParseNode.class)
&& (!(((ColumnParseNode) children.get(0)).getName().equals(VIEW_INDEX_ID))
|| !(((ColumnParseNode) children.get(1)).getName().equals(VIEW_INDEX_ID_DATA_TYPE)))
) {
throw new IllegalArgumentException(
"DecodeViewIndexIdParseNode should only have "
+ "VIEW_INDEX_ID and VIEW_INDEX_ID_DATA_TYPE parse nodes."
);
}

// CastPastNode is generated during IndexStatement rewriting
if (children.get(0).getClass().isAssignableFrom(CastParseNode.class)
&& children.get(1).getClass().isAssignableFrom(CastParseNode.class)
&& (!((ColumnParseNode) (((CastParseNode) children.get(0)).getChildren().get(0))).getName().equals(
IndexUtil.getIndexColumnName(QueryConstants.DEFAULT_COLUMN_FAMILY, VIEW_INDEX_ID))
|| !((ColumnParseNode) (((CastParseNode) children.get(1)).getChildren().get(0))).getName().equals(
IndexUtil.getIndexColumnName(QueryConstants.DEFAULT_COLUMN_FAMILY, VIEW_INDEX_ID_DATA_TYPE)))
) {
throw new IllegalArgumentException(
"DecodeViewIndexIdParseNode should only have "
+ "VIEW_INDEX_ID and VIEW_INDEX_ID_DATA_TYPE parse nodes."
);
}

}

@Override
public FunctionExpression create(List<Expression> children, StatementContext context)
throws SQLException {
return new DecodeViewIndexIdFunction(children);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -1326,10 +1326,11 @@ private void addCoprocessors(byte[] tableName, TableDescriptorBuilder builder,

// TODO: better encapsulation for this
// Since indexes can't have indexes, don't install our indexing coprocessor for indexes.
// Also don't install on the SYSTEM.CATALOG and SYSTEM.STATS table because we use
// Also don't install on the SYSTEM.STATS table because we use
// all-or-none mutate class which break when this coprocessor is installed (PHOENIX-1318).
// With PHOENIX-7107 which introduced indexes on SYSTEM.CATALOG we need to install the
// indexing coprocessor on SYSTEM.CATALOG
if ((tableType != PTableType.INDEX && tableType != PTableType.VIEW && !isViewIndex)
&& !SchemaUtil.isMetaTable(tableName)
&& !SchemaUtil.isStatsTable(tableName)) {
if (isTransactional) {
if (!newDesc.hasCoprocessor(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME)) {
Expand Down Expand Up @@ -1759,8 +1760,23 @@ private TableDescriptor ensureTableCreated(byte[] physicalTableName, byte[] pare
TableDescriptorBuilder newDesc = generateTableDescriptor(physicalTableName, parentPhysicalTableName, existingDesc, tableType, props, families,
splits, isNamespaceMapped);

if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("ensureTableCreated " +
"physicalTableName = %s, " +
"parentPhysicalTableName = %s, " +
"isUpgradeRequired = %s, " +
"isAutoUpgradeEnabled = %s, " +
"isDoNotUpgradePropSet = %s",
Bytes.toString(parentPhysicalTableName),
Bytes.toString(parentPhysicalTableName),
isUpgradeRequired(),
isAutoUpgradeEnabled,
isDoNotUpgradePropSet));
}


if (!tableExist) {
if (SchemaUtil.isSystemTable(physicalTableName) && !isUpgradeRequired() && (!isAutoUpgradeEnabled || isDoNotUpgradePropSet)) {
if (SchemaUtil.isSystemTable(physicalTableName) && (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) && !isUpgradeRequired() && (!isAutoUpgradeEnabled || isDoNotUpgradePropSet)) {
// Disallow creating the SYSTEM.CATALOG or SYSTEM:CATALOG HBase table
throw new UpgradeRequiredException();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME;
import static org.apache.phoenix.query.QueryServices.INDEX_CREATE_DEFAULT_STATE;
import static org.apache.phoenix.schema.PTableType.CDC;
import static org.apache.phoenix.schema.types.PDataType.NULL_BYTES;
import static org.apache.phoenix.thirdparty.com.google.common.collect.Sets.newLinkedHashSet;
import static org.apache.phoenix.thirdparty.com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize;
import static org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,11 @@ public void createViewAddChildLink(RpcController controller,
getCoprocessorHost().preCreateViewAddChildLink(fullparentTableName);

// From 4.15 the parent->child links are stored in a separate table SYSTEM.CHILD_LINK
mutateRowsWithLocks(this.accessCheckEnabled, this.env.getRegion(), childLinkMutations,
Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
// Also check if SYSTEM indexes exists on SYSTEM.CHILD_LINK, if so set metadata attribs
// on child link mutations
mutateRowsWithLocks(this.accessCheckEnabled, env, this.env.getRegion(), childLinkMutations,
Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE,
true, PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME);

} catch (Throwable t) {
LOGGER.error("Unable to write mutations to " +
Expand Down
Loading