From 3bd1e85b8564a8942bb3638ec124e212f867a9fd Mon Sep 17 00:00:00 2001
From: Palash Chauhan
Date: Thu, 17 Oct 2024 15:20:26 -0700
Subject: [PATCH 1/5] create metric type
---
.../apache/phoenix/iterate/ScanningResultIterator.java | 5 ++++-
.../apache/phoenix/monitoring/GlobalClientMetrics.java | 3 ++-
.../java/org/apache/phoenix/monitoring/MetricType.java | 3 +++
.../org/apache/phoenix/monitoring/ScanMetricsHolder.java | 8 ++++++++
.../org/apache/phoenix/monitoring/TableClientMetrics.java | 4 +++-
5 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 0b60b6d3e68..e0aee52ccc5 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -26,9 +26,11 @@
import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
+import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.BLOCK_BYTES_SCANNED_KEY_METRIC_NAME;
import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
import static org.apache.phoenix.exception.SQLExceptionCode.OPERATION_TIMED_OUT;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -190,8 +192,9 @@ private void updateMetrics() {
scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME));
changeMetric(GLOBAL_HBASE_COUNT_ROWS_FILTERED,
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));
-
changeMetric(GLOBAL_PAGED_ROWS_COUNTER, dummyRowCounter);
+ changeMetric(GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED,
+ scanMetricsMap.get(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME));
scanMetricsUpdated = true;
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
index f0f071cbcfe..f3c40dbfbd9 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
@@ -21,6 +21,7 @@
import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE;
import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_EVICTION_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_REMOVAL_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED;
import static org.apache.phoenix.monitoring.MetricType.HCONNECTIONS_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
import static org.apache.phoenix.monitoring.MetricType.MEMORY_WAIT_TIME;
@@ -145,7 +146,7 @@ public enum GlobalClientMetrics {
GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED),
GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED),
GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY(COUNTER_METADATA_INCONSISTENCY),
-
+ GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED(COUNT_BLOCK_BYTES_SCANNED),
GLOBAL_HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME(HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME),
GLOBAL_HA_PARALLEL_POOL1_TASK_END_TO_END_TIME(HA_PARALLEL_POOL1_TASK_END_TO_END_TIME),
GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTION_TIME(HA_PARALLEL_POOL1_TASK_EXECUTION_TIME),
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
index d66fb0e19df..1bbb9d75672 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
@@ -186,6 +186,9 @@ public enum MetricType {
TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS("tsistrc", "Time spent in RPC calls for systemTable lookup",
LogLevel.DEBUG,PLong.INSTANCE),
+ COUNT_BLOCK_BYTES_SCANNED("bbs", "Count of Block Bytes Scanned",
+ LogLevel.DEBUG,PLong.INSTANCE),
+
//HA Related Metrics
HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER("hpoac","Number of Operations to the active cluster",LogLevel.DEBUG,PLong.INSTANCE),
HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER("hposc","Number of Operations to the standby cluster",LogLevel.DEBUG,PLong.INSTANCE),
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
index dd0aca0659b..b0e31aa7bd2 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.monitoring;
+import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED;
import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_IN_REMOTE_RESULTS;
import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_REGION_SERVER_RESULTS;
import static org.apache.phoenix.monitoring.MetricType.COUNT_MILLS_BETWEEN_NEXTS;
@@ -53,6 +54,8 @@ public class ScanMetricsHolder {
private final CombinableMetric countOfRowsFiltered;
private final CombinableMetric countOfBytesScanned;
private final CombinableMetric countOfRowsPaged;
+
+ private final CombinableMetric countOfBlockBytesScanned;
private Map scanMetricMap;
private Object scan;
@@ -84,6 +87,7 @@ private ScanMetricsHolder(ReadMetricQueue readMetrics, String tableName,Scan sca
countOfRowsFiltered = readMetrics.allotMetric(COUNT_ROWS_FILTERED, tableName);
countOfBytesScanned = readMetrics.allotMetric(SCAN_BYTES,tableName);
countOfRowsPaged = readMetrics.allotMetric(PAGED_ROWS_COUNTER, tableName);
+ countOfBlockBytesScanned = readMetrics.allotMetric(COUNT_BLOCK_BYTES_SCANNED, tableName);
}
public CombinableMetric getCountOfRemoteRPCcalls() {
@@ -142,6 +146,10 @@ public CombinableMetric getCountOfRowsPaged() {
return countOfRowsPaged;
}
+ public CombinableMetric getCountOfBlockBytesScanned() {
+ return countOfBlockBytesScanned;
+ }
+
public void setScanMetricMap(Map scanMetricMap) {
this.scanMetricMap = scanMetricMap;
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java
index 5a1aa3dbc7e..4bec6eb37b6 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java
@@ -23,6 +23,7 @@
import java.util.List;
import java.util.Map;
+import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED;
import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE;
import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_FAILED_SIZE;
import static org.apache.phoenix.monitoring.MetricType.MUTATION_BYTES;
@@ -140,7 +141,8 @@ public enum TableMetrics {
TABLE_NUM_SYSTEM_TABLE_RPC_SUCCESS(NUM_SYSTEM_TABLE_RPC_SUCCESS),
TABLE_NUM_SYSTEM_TABLE_RPC_FAILURES(NUM_SYSTEM_TABLE_RPC_FAILURES),
TABLE_NUM_METADATA_LOOKUP_FAILURES(NUM_METADATA_LOOKUP_FAILURES),
- TABLE_TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS);
+ TABLE_TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS),
+ TABLE_COUNT_BLOCK_BYTES_SCANNED(COUNT_BLOCK_BYTES_SCANNED);
private MetricType metricType;
private PhoenixTableMetric metric;
From f4e3843f103a78aca74d2247f25d29a696846e06 Mon Sep 17 00:00:00 2001
From: Palash Chauhan
Date: Fri, 18 Oct 2024 11:51:28 -0700
Subject: [PATCH 2/5] update block size progress in ScannerContext
---
.../iterate/ScanningResultIterator.java | 4 +-
.../regionserver/ScannerContextUtil.java | 1 +
.../BlockBytesScannedMetricIT.java | 128 ++++++++++++++++++
3 files changed, 131 insertions(+), 2 deletions(-)
create mode 100644 phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index e0aee52ccc5..63f46ab0a5e 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -167,6 +167,8 @@ private void updateMetrics() {
changeMetric(scanMetricsHolder.getCountOfBytesScanned(),
scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME));
changeMetric(scanMetricsHolder.getCountOfRowsPaged(), dummyRowCounter);
+ changeMetric(scanMetricsHolder.getCountOfBlockBytesScanned(),
+ scanMetricsMap.get(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME));
changeMetric(GLOBAL_SCAN_BYTES,
scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME));
@@ -193,8 +195,6 @@ private void updateMetrics() {
changeMetric(GLOBAL_HBASE_COUNT_ROWS_FILTERED,
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));
changeMetric(GLOBAL_PAGED_ROWS_COUNTER, dummyRowCounter);
- changeMetric(GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED,
- scanMetricsMap.get(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME));
scanMetricsUpdated = true;
}
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
index 7bdb2c0b8bb..217049734de 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
@@ -42,6 +42,7 @@ public static void updateMetrics(ScannerContext src, ScannerContext dst) {
for (Map.Entry entry : src.getMetrics().getMetricsMap().entrySet()) {
dst.metrics.addToCounter(entry.getKey(), entry.getValue());
}
+ dst.incrementBlockProgress((int) src.getBlockSizeProgress());
}
}
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
new file mode 100644
index 00000000000..6fdc901b954
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.monitoring.MetricType;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class BlockBytesScannedMetricIT extends BaseTest {
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map props = Maps.newHashMapWithExpectedSize(2);
+ props.put(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, "true");
+ setUpTestDriver(new ReadOnlyProps(props));
+ }
+
+ @Test
+ public void testBlockBytesScannedMetric() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String tableName = generateUniqueName();
+ PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
+ stmt.execute("CREATE TABLE " + tableName
+ + " (A UNSIGNED_LONG NOT NULL PRIMARY KEY, Z UNSIGNED_LONG)");
+ for (int i = 1; i <= 200; i++) {
+ String sql = String.format("UPSERT INTO %s VALUES (%d, %d)", tableName, i, i);
+ stmt.execute(sql);
+ if (i%50==0) {
+ conn.commit();
+ flush(tableName);
+ }
+ }
+ clearBlockCache(tableName);
+
+ long count1 = countBlockBytesScannedFromSql(stmt, "SELECT * FROM " + tableName + " WHERE A = 67");
+ Assert.assertTrue(count1 > 0);
+
+ long count2 = countBlockBytesScannedFromSql(stmt, "SELECT * FROM " + tableName);
+ Assert.assertTrue(count2 > 0);
+
+ long count3 = countBlockBytesScannedFromSql(stmt, "SELECT * FROM " + tableName
+ + " WHERE A > 21 AND A < 67");
+ Assert.assertTrue(count3 > 0);
+ }
+
+ private void clearBlockCache(String tableName) {
+ HRegionServer regionServer = utility.getMiniHBaseCluster().getRegionServer(0);
+ for (HRegion region : regionServer.getRegions(TableName.valueOf(tableName))) {
+ regionServer.clearRegionBlockCache(region);
+ }
+ }
+
+ private void flush(String tableName) throws IOException {
+ HRegionServer regionServer = utility.getMiniHBaseCluster().getRegionServer(0);
+ for (HRegion region : regionServer.getRegions(TableName.valueOf(tableName))) {
+ region.flush(true);
+ }
+ }
+
+
+ private long countBlockBytesScannedFromSql(Statement stmt, String sql) throws SQLException {
+ ResultSet rs = stmt.executeQuery(sql);
+ while (rs.next()) {
+ // loop to the end
+ }
+ return getBlockBytesScanned(rs);
+ }
+
+ private long getBlockBytesScanned(ResultSet rs) throws SQLException {
+ if (!(rs instanceof PhoenixResultSet)) {
+ return -1;
+ }
+ Map> metrics = PhoenixRuntime.getRequestReadMetricInfo(rs);
+
+ long sum = 0;
+ boolean valid = false;
+ for (Map.Entry> entry : metrics.entrySet()) {
+ Long val = entry.getValue().get(MetricType.COUNT_BLOCK_BYTES_SCANNED);
+ if (val != null) {
+ sum += val.longValue();
+ valid = true;
+ }
+ }
+ if (valid) {
+ return sum;
+ } else {
+ return -1;
+ }
+ }
+}
\ No newline at end of file
From 3f2754a41ca07b0176e94380819fa5a909fa8db0 Mon Sep 17 00:00:00 2001
From: Palash Chauhan
Date: Fri, 18 Oct 2024 14:02:52 -0700
Subject: [PATCH 3/5] add more tests
---
.../BlockBytesScannedMetricIT.java | 55 +++++++++++++++----
1 file changed, 43 insertions(+), 12 deletions(-)
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
index 6fdc901b954..a246bb1ff0f 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.hbase.regionserver;
-import static org.junit.Assert.assertEquals;
-
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
@@ -54,31 +52,64 @@ public static synchronized void doSetup() throws Exception {
}
@Test
- public void testBlockBytesScannedMetric() throws Exception {
+ public void testPointLookupBlockBytesScannedMetric() throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
String tableName = generateUniqueName();
PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
stmt.execute("CREATE TABLE " + tableName
+ " (A UNSIGNED_LONG NOT NULL PRIMARY KEY, Z UNSIGNED_LONG)");
- for (int i = 1; i <= 200; i++) {
+ for (int i = 1; i <= 10; i++) {
String sql = String.format("UPSERT INTO %s VALUES (%d, %d)", tableName, i, i);
stmt.execute(sql);
- if (i%50==0) {
- conn.commit();
- flush(tableName);
- }
}
+ conn.commit();
+
+ String POINT_LOOKUP_QUERY = "SELECT * FROM " + tableName + " WHERE A = 9";
+
+ // read from memory, block bytes read should be 0
+ long count0 = countBlockBytesScannedFromSql(stmt, POINT_LOOKUP_QUERY);
+ Assert.assertTrue(count0 == 0);
+
+ // flush and clear block cache
+ flush(tableName);
clearBlockCache(tableName);
- long count1 = countBlockBytesScannedFromSql(stmt, "SELECT * FROM " + tableName + " WHERE A = 67");
+ long count1 = countBlockBytesScannedFromSql(stmt, POINT_LOOKUP_QUERY);
+ Assert.assertTrue(count1 > 0);
+ }
+
+ @Test
+ public void testRangeScanBlockBytesScannedMetric() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String tableName = generateUniqueName();
+ PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
+ // create table with small block size and upsert enough rows to have at least 2 blocks
+ stmt.execute("CREATE TABLE " + tableName
+ + " (A UNSIGNED_LONG NOT NULL PRIMARY KEY, Z UNSIGNED_LONG) BLOCKSIZE=200");
+ for (int i = 1; i <= 20; i++) {
+ String sql = String.format("UPSERT INTO %s VALUES (%d, %d)", tableName, i, i);
+ stmt.execute(sql);
+ }
+ conn.commit();
+ flush(tableName);
+ clearBlockCache(tableName);
+
+ String RANGE_SCAN_QUERY = "SELECT * FROM " + tableName + " WHERE A > 14 AND A < 18";
+ String SERVER_FILTER_QUERY = "SELECT * FROM " + tableName + " WHERE Z > 14 AND Z < 18";
+ String SELECT_ALL_QUERY = "SELECT * FROM " + tableName;
+
+ long count1 = countBlockBytesScannedFromSql(stmt, RANGE_SCAN_QUERY);
Assert.assertTrue(count1 > 0);
- long count2 = countBlockBytesScannedFromSql(stmt, "SELECT * FROM " + tableName);
+ long count2 = countBlockBytesScannedFromSql(stmt, SERVER_FILTER_QUERY);
Assert.assertTrue(count2 > 0);
+ // where clause has non PK column, will have to scan all rows
+ Assert.assertTrue(count2 > count1);
- long count3 = countBlockBytesScannedFromSql(stmt, "SELECT * FROM " + tableName
- + " WHERE A > 21 AND A < 67");
+ long count3 = countBlockBytesScannedFromSql(stmt, SELECT_ALL_QUERY);
Assert.assertTrue(count3 > 0);
+ // should be same as previous query which also scans all rows
+ Assert.assertEquals(count3, count2);
}
private void clearBlockCache(String tableName) {
From 6133bad18d7c92bfea23da6225772bb504a58e31 Mon Sep 17 00:00:00 2001
From: Palash Chauhan
Date: Fri, 18 Oct 2024 16:45:21 -0700
Subject: [PATCH 4/5] add metric to track fs_read time
---
.../iterate/ScanningResultIterator.java | 8 +++
.../monitoring/GlobalClientMetrics.java | 2 +
.../apache/phoenix/monitoring/MetricType.java | 2 +
.../phoenix/monitoring/ScanMetricsHolder.java | 7 ++
.../monitoring/TableClientMetrics.java | 4 +-
.../regionserver/ScannerContextUtil.java | 1 +
.../BlockBytesScannedMetricIT.java | 56 ++--------------
.../regionserver/wal/FSReadTimeMetricIT.java | 67 +++++++++++++++++++
.../monitoring/CountRowsScannedIT.java | 32 +--------
.../org/apache/phoenix/util/TestUtil.java | 27 ++++++++
10 files changed, 125 insertions(+), 81 deletions(-)
create mode 100644 phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/FSReadTimeMetricIT.java
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 63f46ab0a5e..86354542616 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -29,6 +29,7 @@
import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.BLOCK_BYTES_SCANNED_KEY_METRIC_NAME;
import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
+import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME;
import static org.apache.phoenix.exception.SQLExceptionCode.OPERATION_TIMED_OUT;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
@@ -42,8 +43,10 @@
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_RPC_CALLS;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_RPC_RETRIES;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_SCANNED_REGIONS;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_FS_READ_TIME;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PAGED_ROWS_COUNTER;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SCAN_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME;
import static org.apache.phoenix.util.ScanUtil.isDummy;
import java.io.IOException;
@@ -169,6 +172,8 @@ private void updateMetrics() {
changeMetric(scanMetricsHolder.getCountOfRowsPaged(), dummyRowCounter);
changeMetric(scanMetricsHolder.getCountOfBlockBytesScanned(),
scanMetricsMap.get(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME));
+ changeMetric(scanMetricsHolder.getFsReadTime(),
+ scanMetricsMap.get(FS_READ_TIME_METRIC_NAME));
changeMetric(GLOBAL_SCAN_BYTES,
scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME));
@@ -195,6 +200,9 @@ private void updateMetrics() {
changeMetric(GLOBAL_HBASE_COUNT_ROWS_FILTERED,
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));
changeMetric(GLOBAL_PAGED_ROWS_COUNTER, dummyRowCounter);
+ changeMetric(GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED,
+ scanMetricsMap.get(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME));
+ changeMetric(GLOBAL_HBASE_FS_READ_TIME, scanMetricsMap.get(FS_READ_TIME));
scanMetricsUpdated = true;
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
index f3c40dbfbd9..75c43a769df 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
@@ -22,6 +22,7 @@
import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_EVICTION_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_REMOVAL_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED;
+import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME;
import static org.apache.phoenix.monitoring.MetricType.HCONNECTIONS_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
import static org.apache.phoenix.monitoring.MetricType.MEMORY_WAIT_TIME;
@@ -147,6 +148,7 @@ public enum GlobalClientMetrics {
GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED),
GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY(COUNTER_METADATA_INCONSISTENCY),
GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED(COUNT_BLOCK_BYTES_SCANNED),
+ GLOBAL_HBASE_FS_READ_TIME(FS_READ_TIME),
GLOBAL_HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME(HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME),
GLOBAL_HA_PARALLEL_POOL1_TASK_END_TO_END_TIME(HA_PARALLEL_POOL1_TASK_END_TO_END_TIME),
GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTION_TIME(HA_PARALLEL_POOL1_TASK_EXECUTION_TIME),
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
index 1bbb9d75672..f13fc6a98fe 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
@@ -189,6 +189,8 @@ public enum MetricType {
COUNT_BLOCK_BYTES_SCANNED("bbs", "Count of Block Bytes Scanned",
LogLevel.DEBUG,PLong.INSTANCE),
+ FS_READ_TIME("fsrd", "", LogLevel.DEBUG,PLong.INSTANCE),
+
//HA Related Metrics
HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER("hpoac","Number of Operations to the active cluster",LogLevel.DEBUG,PLong.INSTANCE),
HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER("hposc","Number of Operations to the standby cluster",LogLevel.DEBUG,PLong.INSTANCE),
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
index b0e31aa7bd2..b27bca9e8a4 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
@@ -29,6 +29,7 @@
import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_CALLS;
import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_RETRIES;
import static org.apache.phoenix.monitoring.MetricType.COUNT_SCANNED_REGIONS;
+import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME;
import static org.apache.phoenix.monitoring.MetricType.PAGED_ROWS_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
@@ -56,6 +57,7 @@ public class ScanMetricsHolder {
private final CombinableMetric countOfRowsPaged;
private final CombinableMetric countOfBlockBytesScanned;
+ private final CombinableMetric fsReadTime;
private Map scanMetricMap;
private Object scan;
@@ -88,6 +90,7 @@ private ScanMetricsHolder(ReadMetricQueue readMetrics, String tableName,Scan sca
countOfBytesScanned = readMetrics.allotMetric(SCAN_BYTES,tableName);
countOfRowsPaged = readMetrics.allotMetric(PAGED_ROWS_COUNTER, tableName);
countOfBlockBytesScanned = readMetrics.allotMetric(COUNT_BLOCK_BYTES_SCANNED, tableName);
+ fsReadTime = readMetrics.allotMetric(FS_READ_TIME, tableName);
}
public CombinableMetric getCountOfRemoteRPCcalls() {
@@ -150,6 +153,10 @@ public CombinableMetric getCountOfBlockBytesScanned() {
return countOfBlockBytesScanned;
}
+ public CombinableMetric getFsReadTime() {
+ return fsReadTime;
+ }
+
public void setScanMetricMap(Map scanMetricMap) {
this.scanMetricMap = scanMetricMap;
}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java
index 4bec6eb37b6..5b3095a31ca 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java
@@ -24,6 +24,7 @@
import java.util.Map;
import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED;
+import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME;
import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE;
import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_FAILED_SIZE;
import static org.apache.phoenix.monitoring.MetricType.MUTATION_BYTES;
@@ -142,7 +143,8 @@ public enum TableMetrics {
TABLE_NUM_SYSTEM_TABLE_RPC_FAILURES(NUM_SYSTEM_TABLE_RPC_FAILURES),
TABLE_NUM_METADATA_LOOKUP_FAILURES(NUM_METADATA_LOOKUP_FAILURES),
TABLE_TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS),
- TABLE_COUNT_BLOCK_BYTES_SCANNED(COUNT_BLOCK_BYTES_SCANNED);
+ TABLE_COUNT_BLOCK_BYTES_SCANNED(COUNT_BLOCK_BYTES_SCANNED),
+ TABLE_FS_READ_TIME(FS_READ_TIME);
private MetricType metricType;
private PhoenixTableMetric metric;
diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
index 217049734de..1f51ac6ef3e 100644
--- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
+++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
@@ -43,6 +43,7 @@ public static void updateMetrics(ScannerContext src, ScannerContext dst) {
dst.metrics.addToCounter(entry.getKey(), entry.getValue());
}
dst.incrementBlockProgress((int) src.getBlockSizeProgress());
+ dst.getMetrics().fsReadTime.addAndGet(src.getMetrics().fsReadTime.get());
}
}
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
index a246bb1ff0f..64ab2398556 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
@@ -18,24 +18,21 @@
package org.apache.hadoop.hbase.regionserver;
-import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
-import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Map;
import org.apache.hadoop.hbase.TableName;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.monitoring.MetricType;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -71,8 +68,8 @@ public void testPointLookupBlockBytesScannedMetric() throws Exception {
Assert.assertTrue(count0 == 0);
// flush and clear block cache
- flush(tableName);
- clearBlockCache(tableName);
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ TestUtil.clearBlockCache(utility, TableName.valueOf(tableName));
long count1 = countBlockBytesScannedFromSql(stmt, POINT_LOOKUP_QUERY);
Assert.assertTrue(count1 > 0);
@@ -91,8 +88,8 @@ public void testRangeScanBlockBytesScannedMetric() throws Exception {
stmt.execute(sql);
}
conn.commit();
- flush(tableName);
- clearBlockCache(tableName);
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ TestUtil.clearBlockCache(utility, TableName.valueOf(tableName));
String RANGE_SCAN_QUERY = "SELECT * FROM " + tableName + " WHERE A > 14 AND A < 18";
String SERVER_FILTER_QUERY = "SELECT * FROM " + tableName + " WHERE Z > 14 AND Z < 18";
@@ -112,48 +109,7 @@ public void testRangeScanBlockBytesScannedMetric() throws Exception {
Assert.assertEquals(count3, count2);
}
- private void clearBlockCache(String tableName) {
- HRegionServer regionServer = utility.getMiniHBaseCluster().getRegionServer(0);
- for (HRegion region : regionServer.getRegions(TableName.valueOf(tableName))) {
- regionServer.clearRegionBlockCache(region);
- }
- }
-
- private void flush(String tableName) throws IOException {
- HRegionServer regionServer = utility.getMiniHBaseCluster().getRegionServer(0);
- for (HRegion region : regionServer.getRegions(TableName.valueOf(tableName))) {
- region.flush(true);
- }
- }
-
-
private long countBlockBytesScannedFromSql(Statement stmt, String sql) throws SQLException {
- ResultSet rs = stmt.executeQuery(sql);
- while (rs.next()) {
- // loop to the end
- }
- return getBlockBytesScanned(rs);
- }
-
- private long getBlockBytesScanned(ResultSet rs) throws SQLException {
- if (!(rs instanceof PhoenixResultSet)) {
- return -1;
- }
- Map> metrics = PhoenixRuntime.getRequestReadMetricInfo(rs);
-
- long sum = 0;
- boolean valid = false;
- for (Map.Entry> entry : metrics.entrySet()) {
- Long val = entry.getValue().get(MetricType.COUNT_BLOCK_BYTES_SCANNED);
- if (val != null) {
- sum += val.longValue();
- valid = true;
- }
- }
- if (valid) {
- return sum;
- } else {
- return -1;
- }
+ return TestUtil.getMetricFromSql(stmt, sql, MetricType.COUNT_BLOCK_BYTES_SCANNED);
}
}
\ No newline at end of file
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/FSReadTimeMetricIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/FSReadTimeMetricIT.java
new file mode 100644
index 00000000000..7d5324dbdb9
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/FSReadTimeMetricIT.java
@@ -0,0 +1,67 @@
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.monitoring.MetricType;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Map;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class FSReadTimeMetricIT extends BaseTest {
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ Map props = Maps.newHashMapWithExpectedSize(2);
+ props.put(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, "true");
+ setUpTestDriver(new ReadOnlyProps(props));
+ }
+
+ @Test
+ public void testFsReadTimeMetric() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String tableName = generateUniqueName();
+ PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
+ // create table with small block size and upsert enough rows to have at least 2 blocks
+ stmt.execute("CREATE TABLE " + tableName
+ + " (A UNSIGNED_LONG NOT NULL PRIMARY KEY, Z UNSIGNED_LONG) BLOCKSIZE=200");
+ for (int i = 1; i <= 20; i++) {
+ String sql = String.format("UPSERT INTO %s VALUES (%d, %d)", tableName, i, i);
+ stmt.execute(sql);
+ }
+ conn.commit();
+ String SELECT_ALL_QUERY = "SELECT * FROM " + tableName;
+
+ // read from memory
+ long time0 = getFsReadTimeFromSql(stmt, SELECT_ALL_QUERY);
+ Assert.assertEquals(0, time0);
+
+ // flush and clear cache
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ TestUtil.clearBlockCache(utility, TableName.valueOf(tableName));
+
+ // read from disk
+ long time1 = getFsReadTimeFromSql(stmt, SELECT_ALL_QUERY);
+ Assert.assertTrue(time1 > 0);
+
+ // read from cache
+ long time2 = getFsReadTimeFromSql(stmt, SELECT_ALL_QUERY);
+ Assert.assertEquals(0, time2);
+ }
+
+ private long getFsReadTimeFromSql(Statement stmt, String sql) throws SQLException {
+ return TestUtil.getMetricFromSql(stmt, sql, MetricType.FS_READ_TIME);
+ }
+}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java
index d074d4a4837..2f83f6a747e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java
@@ -22,19 +22,17 @@
import java.sql.Connection;
import java.sql.DriverManager;
-import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Map;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -301,32 +299,6 @@ public void testUnionAll() throws Exception {
}
private long countRowsScannedFromSql(Statement stmt, String sql) throws SQLException {
- ResultSet rs = stmt.executeQuery(sql);
- while (rs.next()) {
- // loop to the end
- }
- return getRowsScanned(rs);
- }
-
- private long getRowsScanned(ResultSet rs) throws SQLException {
- if (!(rs instanceof PhoenixResultSet)) {
- return -1;
- }
- Map> metrics = PhoenixRuntime.getRequestReadMetricInfo(rs);
-
- long sum = 0;
- boolean valid = false;
- for (Map.Entry> entry : metrics.entrySet()) {
- Long val = entry.getValue().get(MetricType.COUNT_ROWS_SCANNED);
- if (val != null) {
- sum += val.longValue();
- valid = true;
- }
- }
- if (valid) {
- return sum;
- } else {
- return -1;
- }
+ return TestUtil.getMetricFromSql(stmt, sql, MetricType.COUNT_ROWS_SCANNED);
}
}
\ No newline at end of file
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 6ea2a2eb656..ddf828859df 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -129,6 +129,7 @@
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.monitoring.MetricType;
import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.parse.SQLParser;
import org.apache.phoenix.parse.SelectStatement;
@@ -816,6 +817,11 @@ public static void flush(HBaseTestingUtility utility, TableName table) throws IO
admin.flush(table);
}
+ public static void clearBlockCache(HBaseTestingUtility utility, TableName table) throws IOException {
+ Admin admin = utility.getAdmin();
+ admin.clearBlockCache(table);
+ }
+
public static void minorCompact(HBaseTestingUtility utility, TableName table)
throws IOException, InterruptedException {
try {
@@ -1450,4 +1456,25 @@ public static Path createTempDirectory() throws IOException {
return Files.createTempDirectory(Paths.get(System.getProperty("java.io.tmpdir")), null);
}
+ public static long getMetricFromSql(Statement stmt, String sql, MetricType metric)
+ throws SQLException {
+ ResultSet rs = stmt.executeQuery(sql);
+ while (rs.next()) {}
+ Map> metrics = PhoenixRuntime.getRequestReadMetricInfo(rs);
+
+ long sum = 0;
+ boolean valid = false;
+ for (Map.Entry> entry : metrics.entrySet()) {
+ Long val = entry.getValue().get(metric);
+ if (val != null) {
+ sum += val.longValue();
+ valid = true;
+ }
+ }
+ if (valid) {
+ return sum;
+ } else {
+ return -1;
+ }
+ }
}
From ad5c7cac751a312b0f6fe16fe6f9d17dde9b795c Mon Sep 17 00:00:00 2001
From: Palash Chauhan
Date: Fri, 18 Oct 2024 17:30:23 -0700
Subject: [PATCH 5/5] move test classes
---
.../BlockBytesScannedMetricIT.java | 3 +--
.../monitoring}/FSReadTimeMetricIT.java | 21 +++++++++++++++++--
2 files changed, 20 insertions(+), 4 deletions(-)
rename phoenix-core/src/it/java/org/apache/{hadoop/hbase/regionserver => phoenix/monitoring}/BlockBytesScannedMetricIT.java (98%)
rename phoenix-core/src/it/java/org/apache/{hadoop/hbase/regionserver/wal => phoenix/monitoring}/FSReadTimeMetricIT.java (75%)
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BlockBytesScannedMetricIT.java
similarity index 98%
rename from phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
rename to phoenix-core/src/it/java/org/apache/phoenix/monitoring/BlockBytesScannedMetricIT.java
index 64ab2398556..5251e7af725 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/BlockBytesScannedMetricIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BlockBytesScannedMetricIT.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.hbase.regionserver;
+package org.apache.phoenix.monitoring;
import java.sql.Connection;
import java.sql.DriverManager;
@@ -27,7 +27,6 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.monitoring.MetricType;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/FSReadTimeMetricIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/FSReadTimeMetricIT.java
similarity index 75%
rename from phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/FSReadTimeMetricIT.java
rename to phoenix-core/src/it/java/org/apache/phoenix/monitoring/FSReadTimeMetricIT.java
index 7d5324dbdb9..3e134dfdf40 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/FSReadTimeMetricIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/FSReadTimeMetricIT.java
@@ -1,9 +1,26 @@
-package org.apache.hadoop.hbase.regionserver.wal;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.monitoring;
import org.apache.hadoop.hbase.TableName;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.monitoring.MetricType;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;