diff --git a/.github/workflows/build-cloudberry.yml b/.github/workflows/build-cloudberry.yml index 04d5e827b6e..f5d159a63ac 100644 --- a/.github/workflows/build-cloudberry.yml +++ b/.github/workflows/build-cloudberry.yml @@ -307,6 +307,10 @@ jobs: "gpcontrib/gp_sparse_vector:installcheck", "gpcontrib/gp_toolkit:installcheck"] }, + {"test":"ic-diskquota", + "make_configs":["gpcontrib/diskquota:installcheck"], + "shared_preload_libraries":"diskquota-2.3" + }, {"test":"ic-fixme", "make_configs":["src/test/regress:installcheck-fixme"], "enable_core_check":false @@ -1265,7 +1269,15 @@ jobs: { chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh - if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then + + # Build BLDWRAP_POSTGRES_CONF_ADDONS for shared_preload_libraries if specified + EXTRA_CONF="" + if [[ -n "${{ matrix.shared_preload_libraries }}" ]]; then + EXTRA_CONF="shared_preload_libraries='${{ matrix.shared_preload_libraries }}'" + echo "Adding shared_preload_libraries: ${{ matrix.shared_preload_libraries }}" + fi + + if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' BLDWRAP_POSTGRES_CONF_ADDONS=\"${EXTRA_CONF}\" SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then echo "::error::Demo cluster creation failed" exit 1 fi diff --git a/.github/workflows/build-deb-cloudberry.yml b/.github/workflows/build-deb-cloudberry.yml index 38c2391376e..6b707a556cb 100644 --- a/.github/workflows/build-deb-cloudberry.yml +++ b/.github/workflows/build-deb-cloudberry.yml @@ -1234,7 +1234,15 @@ jobs: { chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh - if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then + + # Build BLDWRAP_POSTGRES_CONF_ADDONS for shared_preload_libraries if specified + EXTRA_CONF="" + if [[ -n "${{ matrix.shared_preload_libraries }}" ]]; then + EXTRA_CONF="shared_preload_libraries='${{ matrix.shared_preload_libraries }}'" + echo "Adding shared_preload_libraries: ${{ matrix.shared_preload_libraries }}" + fi + + if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' BLDWRAP_POSTGRES_CONF_ADDONS=\"${EXTRA_CONF}\" SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then echo "::error::Demo cluster creation failed" exit 1 fi diff --git a/LICENSE b/LICENSE index 603400aa4cd..28796e982e1 100644 --- a/LICENSE +++ b/LICENSE @@ -311,12 +311,18 @@ The Greenplum Database software includes: gpcontrib/orafce/* see licenses/LICENSE-orafce.txt - ---------------------------- +---------------------------- BSD 3 Clause License gpcontrib/gpcloud/test/googletest see licenses/LICENSE-googletest.txt +---------------------------- + PostgreSQL License + + gpcontrib/diskquota/* + see licenses/LICENSE-diskquota.txt + ================================================================================ Apache Cloudberry includes codes from diff --git a/gpcontrib/Makefile b/gpcontrib/Makefile index 60fef1778c6..8d95a14f876 100644 --- a/gpcontrib/Makefile +++ b/gpcontrib/Makefile @@ -22,7 +22,8 @@ ifeq "$(enable_debug_extensions)" "yes" gp_legacy_string_agg \ gp_replica_check \ gp_toolkit \ - pg_hint_plan + pg_hint_plan \ + diskquota else recurse_targets = gp_sparse_vector \ gp_distribution_policy \ @@ -30,7 +31,8 @@ else gp_legacy_string_agg \ gp_exttable_fdw \ gp_toolkit \ - pg_hint_plan + pg_hint_plan \ + diskquota endif ifeq "$(with_zstd)" "yes" @@ -97,3 +99,4 @@ installcheck: $(MAKE) -C gp_sparse_vector installcheck $(MAKE) -C gp_toolkit installcheck $(MAKE) -C gp_exttable_fdw installcheck + $(MAKE) -C diskquota installcheck diff --git a/gpcontrib/diskquota/.gitignore b/gpcontrib/diskquota/.gitignore new file mode 100644 index 00000000000..bb04034d8a6 --- /dev/null +++ b/gpcontrib/diskquota/.gitignore @@ -0,0 +1,13 @@ +# Build directory +build*/ + +# The tests results +results/ + +# For IDE/Editors +.vscode +.idea +tags +cscope* +.ccls-cache/ +compile_commands.json diff --git a/gpcontrib/diskquota/CMakeLists.txt b/gpcontrib/diskquota/CMakeLists.txt new file mode 100644 index 00000000000..fad393cb101 --- /dev/null +++ b/gpcontrib/diskquota/CMakeLists.txt @@ -0,0 +1,178 @@ +cmake_minimum_required(VERSION 3.20) +# cmake_path requires 3.20 + +project(diskquota) + +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "Setting build type to 'Debug' as none was specified.") + set(CMAKE_BUILD_TYPE "Debug" CACHE + STRING "Choose the type of build." FORCE) +endif() + +# generate 'compile_commands.json' +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +# Retrieve repository information +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Git.cmake) +GitHash_Get(DISKQUOTA_GIT_HASH) + +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Gpdb.cmake) + + +# set include directories for all sub-projects +include_directories(${PG_INCLUDE_DIR_SERVER}) +include_directories(${PG_INCLUDE_DIR}) # for libpq +# For in-tree builds, libpq headers are in a separate directory +if(PG_INCLUDE_DIR_LIBPQ) + include_directories(${PG_INCLUDE_DIR_LIBPQ}) +endif() +# Overwrite the default build type flags set by cmake. +# We don't want the '-O3 -DNDEBUG' from cmake. Instead, those will be set by the CFLAGS from pg_config. +# And, the good news is, GPDB release always have '-g'. +set(CMAKE_C_FLAGS_RELEASE "" CACHE + STRING "Flags for RELEASE build" FORCE) +set(CMAKE_C_FLAGS_DEBUG "-DDISKQUOTA_DEBUG" + CACHE STRING "Flags for DEBUG build" FORCE) +# set link flags for all sub-projects +set(CMAKE_MODULE_LINKER_FLAGS "${PG_LD_FLAGS}") +if (APPLE) + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -bundle_loader ${PG_BIN_DIR}/postgres") +endif() +# set c and ld flags for all projects +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${PG_C_FLAGS}") + +# generate version +if(NOT DEFINED DISKQUOTA_VERSION) + file(STRINGS VERSION DISKQUOTA_VERSION) +endif() + +string(REGEX REPLACE "^([0-9]+).[0-9]+.[0-9]+$" "\\1" DISKQUOTA_MAJOR_VERSION + ${DISKQUOTA_VERSION}) +string(REGEX REPLACE "^[0-9]+.([0-9]+).[0-9]+$" "\\1" DISKQUOTA_MINOR_VERSION + ${DISKQUOTA_VERSION}) +string(REGEX REPLACE "^[0-9]+.[0-9]+.([0-9]+)$" "\\1" DISKQUOTA_PATCH_VERSION + ${DISKQUOTA_VERSION}) + +if("${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}" STREQUAL "1.0") + # in special, version 1.0.x do not has suffix + set(DISKQUOTA_BINARY_NAME "diskquota") +else() + set(DISKQUOTA_BINARY_NAME + "diskquota-${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}") +endif() + +add_compile_definitions( + DISKQUOTA_VERSION="${DISKQUOTA_VERSION}" + DISKQUOTA_MAJOR_VERSION=${DISKQUOTA_MAJOR_VERSION} + DISKQUOTA_MINOR_VERSION=${DISKQUOTA_MINOR_VERSION} + DISKQUOTA_PATCH_VERSION=${DISKQUOTA_PATCH_VERSION} + DISKQUOTA_BINARY_NAME="${DISKQUOTA_BINARY_NAME}") + +set(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/src") +file(GLOB diskquota_SRC "${SRC_DIR}/*.c") + +set(DISKQUOTA_DDL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/control/ddl") +file(GLOB diskquota_DDL "${DISKQUOTA_DDL_DIR}/*") + +add_library(diskquota MODULE ${diskquota_SRC}) + +if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) + set(CMAKE_INSTALL_PREFIX + "${PG_HOME}" + CACHE PATH "default install prefix" FORCE) +endif() + +set_target_properties( + diskquota + PROPERTIES OUTPUT_NAME ${DISKQUOTA_BINARY_NAME} + PREFIX "" + C_STANDARD 99 + LINKER_LANGUAGE "C") + +TARGET_LINK_LIBRARIES(diskquota ${PG_LIB_DIR}/libpq.so) + +# packing part, move to a separate file if this part is too large +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) + +if(DEFINED DISKQUOTA_LAST_RELEASE_PATH) + message(STATUS "Copy pervious installer from ${DISKQUOTA_LAST_RELEASE_PATH}") + file(ARCHIVE_EXTRACT INPUT ${DISKQUOTA_LAST_RELEASE_PATH} PATTERNS "*.so") + file(GLOB so_files_list + "${CMAKE_BINARY_DIR}/lib/postgresql/*.so") + + foreach(so_path IN LISTS so_files_list) + get_filename_component(so_name ${so_path} NAME_WLE) + # Replace 'diskquota-x.y' with 'x.y'. 'diskquota' won't be replaced, which belongs to 1.x release. + string(REPLACE "diskquota-" "" so_ver ${so_name}) + # Install the previous so files. Those so files have versions less than current version. + # diskqutoa.so doesn't have version string in the file name. It belongs to 1.x release. + if((${so_ver} STREQUAL "diskquota") OR + ${so_ver} VERSION_LESS ${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}) + list(APPEND DISKQUOTA_PREVIOUS_LIBRARY ${so_path}) + endif() + endforeach() + + install(PROGRAMS ${DISKQUOTA_PREVIOUS_LIBRARY} DESTINATION "lib/postgresql/") + + get_filename_component( + DISKQUOTA_LAST_RELEASE_FILENAME ${DISKQUOTA_LAST_RELEASE_PATH} NAME CACHE + "last release installer name") + string( + REGEX + REPLACE "^diskquota-([0-9]+).[0-9]+.[0-9]+-.*$" "\\1" + DISKQUOTA_LAST_MAJOR_VERSION ${DISKQUOTA_LAST_RELEASE_FILENAME}) + string( + REGEX + REPLACE "^diskquota-[0-9]+.([0-9]+).[0-9]+-.*$" "\\1" + DISKQUOTA_LAST_MINOR_VERSION ${DISKQUOTA_LAST_RELEASE_FILENAME}) + string( + REGEX + REPLACE "^diskquota-[0-9]+.[0-9]+.([0-9]+)-.*$" "\\1" + DISKQUOTA_LAST_PATCH_VERSION ${DISKQUOTA_LAST_RELEASE_FILENAME}) + + set(DISKQUOTA_LAST_VERSION + "${DISKQUOTA_LAST_MAJOR_VERSION}.${DISKQUOTA_LAST_MINOR_VERSION}.${DISKQUOTA_LAST_PATCH_VERSION}" + ) +endif() + +set(tgz_NAME + "diskquota-${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}.${DISKQUOTA_PATCH_VERSION}-${DISTRO_NAME}_x86_64" +) +set(CPACK_GENERATOR "TGZ") +set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF) +set(CPACK_PACKAGE_FILE_NAME ${tgz_NAME}) +include(CPack) +# create_artifact target is used to tar the package with version into a version-less tarball to be +# used on concourse gcs resource. It will be uploaded to a gcs version file (no diskquota version +# string in the file name), and be retrieved in the release step. Then we don't have to firgure out +# a way to add the version string back to the release file name, just untar it. +set(artifact_NAME "diskquota.tar.gz") +add_custom_target(create_artifact + COMMAND + ${CMAKE_COMMAND} --build . --target package + COMMAND + ${CMAKE_COMMAND} -E tar czvf ${artifact_NAME} "${tgz_NAME}.tar.gz") +# packing end + +# Create build-info +# The diskquota-build-info shouldn't be copied to GPDB release by install_gpdb_component +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/BuildInfo.cmake) +set(build_info_PATH ${CMAKE_CURRENT_BINARY_DIR}/diskquota-build-info) +BuildInfo_Create(${build_info_PATH} + VARS + DISKQUOTA_GIT_HASH + DISKQUOTA_VERSION + GP_MAJOR_VERSION + GP_VERSION + CMAKE_BUILD_TYPE) +# Create build-info end + +# Add installcheck targets +add_subdirectory(tests) +add_subdirectory(upgrade_test) + +# NOTE: keep install part at the end of file, to overwrite previous binary +install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") +install(FILES ${diskquota_DDL} DESTINATION "share/postgresql/extension/") +install(TARGETS diskquota DESTINATION "lib/postgresql/") +install(FILES ${build_info_PATH} DESTINATION ".") diff --git a/gpcontrib/diskquota/LICENSE b/gpcontrib/diskquota/LICENSE new file mode 100644 index 00000000000..6e94d88cbc9 --- /dev/null +++ b/gpcontrib/diskquota/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2004-2020 Pivotal Software, Inc. +Copyright (c) 2020-Present VMware, Inc. or its affiliates + +diskquota is licensed under the PostgreSQL license, the same license +as PostgreSQL. It contains parts of PostgreSQL source code. A copy of +the license is below: + +-------------- +PostgreSQL Database Management System +(formerly known as Postgres, then as Postgres95) + +Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + +Portions Copyright (c) 1994, The Regents of the University of California + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose, without fee, and without a written agreement +is hereby granted, provided that the above copyright notice and this +paragraph and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR +DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING +LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS +DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO +PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/gpcontrib/diskquota/Makefile b/gpcontrib/diskquota/Makefile new file mode 100644 index 00000000000..1ae174f0d6f --- /dev/null +++ b/gpcontrib/diskquota/Makefile @@ -0,0 +1,76 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Makefile for diskquota extension +# +# This Makefile wraps the CMake build system for integration with +# the Cloudberry build process. +# +# Usage: +# make # build the extension +# make install # install the extension +# make installcheck # run regression tests +# make clean # clean build artifacts + +ifdef USE_PGXS +# Standalone build: pg_config must be in PATH +PG_CONFIG_ABS := $(shell which pg_config) +PG_PREFIX := $(shell $(PG_CONFIG_ABS) --prefix) +CMAKE_OPTS := -DPG_CONFIG=$(PG_CONFIG_ABS) +else +# In-tree build +subdir = gpcontrib/diskquota +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +# Get absolute source directory path +PG_SRC_DIR_ABS := $(shell cd $(top_builddir) && pwd) +PG_CONFIG_ABS := $(PG_SRC_DIR_ABS)/src/bin/pg_config/pg_config +PG_PREFIX := $(prefix) +# Pass PG_SRC_DIR to CMake so it doesn't try to derive it from pg_config +CMAKE_OPTS := -DPG_CONFIG=$(PG_CONFIG_ABS) -DPG_SRC_DIR=$(PG_SRC_DIR_ABS) +endif + +.PHONY: all +all: build + +.PHONY: build +build: + @echo "Building diskquota with CMake..." + @if [ ! -f build/Makefile ]; then \ + mkdir -p build && \ + cd build && \ + cmake $(CMAKE_OPTS) -DCMAKE_INSTALL_PREFIX=$(DESTDIR)$(PG_PREFIX) .. ; \ + fi + cd build && $(MAKE) + +.PHONY: install +install: build + cd build && $(MAKE) install + +.PHONY: installcheck +installcheck: + @echo "Running diskquota regression tests..." + @if [ ! -f build/Makefile ]; then \ + mkdir -p build && \ + cd build && \ + cmake $(CMAKE_OPTS) -DCMAKE_INSTALL_PREFIX=$(DESTDIR)$(PG_PREFIX) .. ; \ + fi + cd build && $(MAKE) installcheck + +.PHONY: clean +clean: + rm -rf build diff --git a/gpcontrib/diskquota/README.md b/gpcontrib/diskquota/README.md new file mode 100644 index 00000000000..a6ce511273d --- /dev/null +++ b/gpcontrib/diskquota/README.md @@ -0,0 +1,359 @@ +# Diskquota for Apache Cloudberry + +> **Note**: This project is forked from [greenplum-db/diskquota](https://github.com/greenplum-db/diskquota-archive) +> and has been adapted specifically for [Apache Cloudberry](https://cloudberry.apache.org/). +> It requires Apache Cloudberry 2.0+ (based on PostgreSQL 14). + +## Overview + +Diskquota is an extension that provides disk usage enforcement for database +objects in Apache Cloudberry. Currently it supports setting quota limits on schema +and role in a given database and limiting the amount of disk space that a schema +or a role can use. + +This project is inspired by Heikki's +[pg_quota project](https://github.com/hlinnaka/pg_quota) and enhances it in +two aspects: + +1. To support different kinds of DDL and DML which may change the disk usage +of database objects. + +2. To support diskquota extension on MPP architecture. + +Diskquota is a soft limit of disk usage. On one hand it has some delay to +detect the schemas or roles whose quota limit is exceeded. On the other hand, +'soft limit' supports two kinds of enforcement: Query loading data into +out-of-quota schema/role will be forbidden before query is running. Query +loading data into schema/role with rooms will be cancelled when the quota +limit is reached dynamically during the query is running. + +## Design + +Diskquota extension is based on background worker framework in Apache Cloudberry. +There are two kinds of background workers: diskquota launcher and diskquota worker. + +There is only one launcher process per database coordinator. There is no launcher +process for segments. +Launcher process is responsible for managing worker processes: Calling +RegisterDynamicBackgroundWorker() to create new workers and keep their handle. +Calling TerminateBackgroundWorker() to terminate workers which are disabled +when DBA modifies GUC diskquota.monitor_databases. + +There are many worker processes, one for each database which is listed +in diskquota.monitor_databases. Same as launcher process, worker processes +only run at coordinator node. Since each worker process needs to call SPI to fetch +active table size, to limit the total cost of worker processes, we support to +monitor at most 10 databases at the same time currently. Worker processes are +responsible for monitoring the disk usage of schemas and roles for the target +database, and do quota enforcement. It will periodically (can be set via +diskquota.naptime) recalculate the table size of active tables, and update +their corresponding schema or owner's disk usage. Then compare with quota +limit for those schemas or roles. If exceeds the limit, put the corresponding +schemas or roles into the rejectmap in shared memory. Schemas or roles in +rejectmap are used to do query enforcement to cancel queries which plan to +load data into these schemas or roles. + +From MPP perspective, diskquota launcher and worker processes all run at +the Coordinator side. Coordinator-only design allows us to save the memory resource on +Segments, and simplifies the communication from Coordinator to Segment by calling SPI +queries periodically. Segments are used to detect the active tables and +calculate the active table size. Coordinator aggregates the table size from each +segment and maintains the disk quota model. + +### Active table + +Active tables are the tables whose table size may change in the last quota +check interval. Active tables are detected at Segment QE side: hooks in +smgrcreate(), smgrextend() and smgrtruncate() are used to detect active tables +and store them (currently relfilenode) in the shared memory. Diskquota worker +process will periodically call dispatch queries to all the segments and +consume active tables in shared memories, convert relfilenode to relation oid, +and calculate table size by calling pg_table_size(), which will sum +the size of table (including: base, vm, fsm, toast) in each segment. + +### Enforcement + +Enforcement is implemented as hooks. There are two kinds of enforcement hooks: +enforcement before query is running and enforcement during query is running. +The 'before query' one is implemented at ExecutorCheckPerms_hook in function +ExecCheckRTPerms(). +The 'during query' one is implemented at DispatcherCheckPerms_hook in function +checkDispatchResult(). For queries loading a huge number of data, dispatcher +will poll the connection with a poll timeout. Hook will be called at every +poll timeout with waitMode == DISPATCH_WAIT_NONE. Currently only async +dispatcher supports 'during query' quota enforcement. + +### Quota setting store + +Quota limit of a schema or a role is stored in table 'quota_config' in +'diskquota' schema in monitored database. So each database stores and manages +its own disk quota configuration. Note that although role is a db object in +cluster level, we limit the diskquota of a role to be database specific. +That is to say, a role may have different quota limit on different databases +and their disk usage is isolated between databases. + +## Development + +### Prerequisites + +The following packages need to be installed: + +- openssl-devel +- krb5-devel +- [cmake](https://cmake.org) (>= 3.20) + +On RHEL/CentOS/Rocky Linux: +```bash +sudo yum install openssl-devel krb5-devel cmake +``` + +On Ubuntu/Debian: +```bash +sudo apt-get install libssl-dev libkrb5-dev cmake +``` + +### Build & Install + +Diskquota uses CMake as its build system, wrapped by a Makefile for integration with the Cloudberry build process. + +#### Option 1: Build with Apache Cloudberry Source Tree + +Diskquota is included in the Apache Cloudberry source tree: + +```bash +cd +./configure [options...] + +# Build everything +make -j$(nproc) +make install + +# Or build diskquota only +make -C gpcontrib/diskquota +make -C gpcontrib/diskquota install +``` + +#### Option 2: Standalone Build (without source tree) + +If you only have an installed Apache Cloudberry (no source tree): + +```bash +# Source the environment first +source /path/to/cloudberry-db/cloudberry-env.sh + +cd gpcontrib/diskquota +make +make install +``` + +### Setup + +1. Create database to store global information: +```sql +CREATE DATABASE diskquota; +``` + +2. Enable diskquota as preload library: +```bash +# Set USER environment variable if not set (required by gpconfig) +export USER=$(whoami) + +# enable diskquota in preload library +gpconfig -c shared_preload_libraries -v 'diskquota-' +# restart database +gpstop -ar +``` + +3. Config GUC of diskquota: +```bash +# set naptime (seconds) to refresh the disk quota stats periodically +gpconfig -c diskquota.naptime -v 2 +``` + +4. Create diskquota extension in monitored database: +```sql +CREATE EXTENSION diskquota; +``` + +5. Initialize existing table size information (needed if `CREATE EXTENSION` is not executed in a newly created database): +```sql +SELECT diskquota.init_table_size_table(); +``` + +## Usage + +### Set/update/delete schema quota limit + +```sql +CREATE SCHEMA s1; +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SET search_path TO s1; + +CREATE TABLE a(i int) DISTRIBUTED BY (i); +-- insert small data succeeded +INSERT INTO a SELECT generate_series(1,100); +-- insert large data failed +INSERT INTO a SELECT generate_series(1,10000000); +-- insert small data failed +INSERT INTO a SELECT generate_series(1,100); + +-- delete quota configuration +SELECT diskquota.set_schema_quota('s1', '-1'); +-- insert small data succeed +SELECT pg_sleep(5); +INSERT INTO a SELECT generate_series(1,100); +RESET search_path; +``` + +### Set/update/delete role quota limit + +```sql +CREATE ROLE u1 NOLOGIN; +CREATE TABLE b (i int) DISTRIBUTED BY (i); +ALTER TABLE b OWNER TO u1; +SELECT diskquota.set_role_quota('u1', '1 MB'); + +-- insert small data succeeded +INSERT INTO b SELECT generate_series(1,100); +-- insert large data failed +INSERT INTO b SELECT generate_series(1,10000000); +-- insert small data failed +INSERT INTO b SELECT generate_series(1,100); + +-- delete quota configuration +SELECT diskquota.set_role_quota('u1', '-1'); +-- insert small data succeed +SELECT pg_sleep(5); +INSERT INTO b SELECT generate_series(1,100); +RESET search_path; +``` + +### Show schema quota limit and current usage + +```sql +SELECT * FROM diskquota.show_fast_schema_quota_view; +``` + +## Test + +Before running regression tests, make sure: + +1. The diskquota extension is installed (`make install`) on all nodes +2. The `shared_preload_libraries` is configured and the cluster is restarted +3. The `diskquota` database exists + +```bash +# Set USER environment variable if not set (required by gpconfig) +export USER=$(whoami) + +# Configure shared_preload_libraries (use current version) +gpconfig -c shared_preload_libraries -v 'diskquota-2.3' + +# Restart the cluster +gpstop -ar + +# Create diskquota database if not exists +createdb diskquota +``` + +Run regression tests: +```bash +# From source tree build: +make -C gpcontrib/diskquota installcheck + +# Or from build directory: +cd gpcontrib/diskquota/build +make installcheck +``` + +Show quick diff of regress results: +```bash +cd gpcontrib/diskquota/build +make diff__ +``` + +## HA + +Not implemented yet. One solution would be: start launcher process on standby +and enable it to fork worker processes when switching from standby Coordinator to Coordinator. + +## Benchmark & Performance Test + +### Cost of diskquota worker +To be added. + +### Impact on OLTP queries +To be added. + +## Notes + +### Drop database with diskquota enabled + +If DBA created diskquota extension in a database, there will be a connection +to this database from diskquota worker process. DBA needs to first drop the diskquota +extension in this database, and then the database can be dropped successfully. + +### Temp table + +Diskquota supports limiting the disk usage of temp tables as well. +But schema and role are different. For role, i.e. the owner of the temp table, +diskquota will treat it the same as normal tables and sum its table size to +its owner's quota. While for schema, temp table is located under namespace +'pg_temp_backend_id', so temp table size will not be summed to the current schema's quota. + +## Known Issues + +### Uncommitted transactions + +Since Apache Cloudberry doesn't support READ UNCOMMITTED isolation level, +our implementation cannot detect the newly created table inside an +uncommitted transaction (see below example). Hence enforcement on +that newly created table will not work. After transaction commit, +diskquota worker process could detect the newly created table +and do enforcement accordingly in later queries. + +```sql +-- suppose quota of schema s1 is 1MB +SET search_path TO s1; +CREATE TABLE b (i int) DISTRIBUTED BY (i); +BEGIN; +CREATE TABLE a (i int) DISTRIBUTED BY (i); +-- Issue: quota enforcement doesn't work on table a +INSERT INTO a SELECT generate_series(1,200000); +-- quota enforcement works on table b +INSERT INTO b SELECT generate_series(1,200000); +-- quota enforcement works on table a, +-- since quota limit of schema s1 has already been exceeded +INSERT INTO a SELECT generate_series(1,200000); +END; +``` + +'CREATE TABLE AS' command has the similar problem. + +One solution direction is that we calculate the additional 'uncommitted data size' +for schema and role in worker process. Since pg_table_size needs to hold +AccessShareLock to relation (and worker process doesn't even know this reloid exists), +we need to skip it, and call stat() directly with tolerance to file unlink. +Skipping lock is dangerous and we plan to leave it as a known issue at the current stage. + +### Missing empty schema or role in views + +Currently, if there is no table in a specific schema or no table's owner is a +specific role, these schemas or roles will not be listed in +show_fast_schema_quota_view and show_fast_role_quota_view. + +### Out of shared memory + +Diskquota extension uses two kinds of shared memories. One is used to save +rejectmap and another one is to save active table list. The rejectmap shared +memory can support up to 1 MiB database objects which exceed quota limit. +The active table list shared memory can support up to 1 MiB active tables in +default, and user could reset it in GUC diskquota_max_active_tables. + +As shared memory is pre-allocated, user needs to restart DB if they updated +this GUC value. + +If rejectmap shared memory is full, it's possible to load data into some +schemas or roles whose quota limits are reached. +If active table shared memory is full, disk quota worker may fail to detect +the corresponding disk usage change in time. diff --git a/gpcontrib/diskquota/VERSION b/gpcontrib/diskquota/VERSION new file mode 100644 index 00000000000..276cbf9e285 --- /dev/null +++ b/gpcontrib/diskquota/VERSION @@ -0,0 +1 @@ +2.3.0 diff --git a/gpcontrib/diskquota/cmake/BuildInfo.cmake b/gpcontrib/diskquota/cmake/BuildInfo.cmake new file mode 100644 index 00000000000..6e256f34502 --- /dev/null +++ b/gpcontrib/diskquota/cmake/BuildInfo.cmake @@ -0,0 +1,32 @@ +# Create a build info file based on the given cmake variables +# For example: +# BuildInfo_Create( +# ${CMAKE_CURRENT_BINARY_DIR}/build-info +# VARS +# DISKQUOTA_GIT_HASH +# GP_MAJOR_VERSION) +# ) +# will create a build info file: +# ❯ cat build-info +# DISKQUOTA_GIT_HASH = 151ed92 +# GP_MAJOR_VERSION = 6 + +function(BuildInfo_Create path) + cmake_parse_arguments( + arg + "" + "" + "VARS" + ${ARGN}) + + # Set REGRESS test cases + foreach(key IN LISTS arg_VARS) + get_property(val VARIABLE PROPERTY ${key}) + list(APPEND info_list "${key} = ${val}") + endforeach() + file(WRITE ${path} "") + foreach(content IN LISTS info_list) + file(APPEND ${path} "${content}\n") + endforeach() +endfunction() + diff --git a/gpcontrib/diskquota/cmake/Distro.cmake b/gpcontrib/diskquota/cmake/Distro.cmake new file mode 100644 index 00000000000..bf7bcbf687d --- /dev/null +++ b/gpcontrib/diskquota/cmake/Distro.cmake @@ -0,0 +1,44 @@ +# Cmake utility to identify the distribution names. Currently Below distributions +# can be identified: +# - centos6 +# - centos7 +# - unbuntu18 +include_guard() + +set(DISTRO_NAME "" CACHE STRING "Distribution name of current build environment") + +if(NOT DISTRO_NAME) + SET(DISTRO_NAME unknown) + if(EXISTS "/etc/redhat-release") + file(READ /etc/redhat-release rh_release) + string(REGEX MATCH "CentOS release 6.*" matched6 "${rh_release}") + string(REGEX MATCH "CentOS Linux release 7.*" matched7 "${rh_release}") + string(REGEX MATCH "Red Hat Enterprise Linux release 8.*" matched_rhel8 "${rh_release}") + string(REGEX MATCH "CentOS Linux release 8.*" matched_centos8 "${rh_release}") + string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") + string(REGEX MATCH "Red Hat Enterprise Linux release 9.*" matched_rhel9 "${rh_release}") + string(REGEX MATCH "Rocky Linux release 9.*" matched_rocky9 "${rh_release}") + if (matched6) + set(DISTRO_NAME rhel6) + elseif(matched7) + set(DISTRO_NAME rhel7) + elseif(matched_rhel8 OR matched_centos8 OR matched_rocky8) + set(DISTRO_NAME rhel8) + elseif(matched_rhel9 OR matched_rocky9) + set(DISTRO_NAME rhel9) + endif() + elseif(EXISTS "/etc/os-release") + file(READ /etc/os-release os_release) + string(REGEX MATCH "ID=ubuntu" isubuntu "${os_release}") + string(REGEX MATCH "VERSION_ID=\"18.04\"" matched1804 "${os_release}") + if (isubuntu AND matched1804) + SET(DISTRO_NAME ubuntu18.04) + endif() + + string(REGEX MATCH "ID=photon" isphoton "${os_release}") + string(REGEX MATCH "VERSION_ID=3.0" matched30 "${os_release}") + if (isphoton AND matched30) + SET(DISTRO_NAME photon3) + endif() + endif() +endif() diff --git a/gpcontrib/diskquota/cmake/Git.cmake b/gpcontrib/diskquota/cmake/Git.cmake new file mode 100644 index 00000000000..81a68b1f1f4 --- /dev/null +++ b/gpcontrib/diskquota/cmake/Git.cmake @@ -0,0 +1,9 @@ +# get git hash +macro(GitHash_Get _git_hash) + find_package(Git) + execute_process( + COMMAND ${GIT_EXECUTABLE} log -1 --pretty=format:%h + OUTPUT_VARIABLE ${_git_hash} + OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +endmacro() diff --git a/gpcontrib/diskquota/cmake/Gpdb.cmake b/gpcontrib/diskquota/cmake/Gpdb.cmake new file mode 100644 index 00000000000..bd2ba722476 --- /dev/null +++ b/gpcontrib/diskquota/cmake/Gpdb.cmake @@ -0,0 +1,93 @@ +# Use pg_config to detect postgres dependencies +# +# Variables: +# +# PG_CONFIG - the path to the pg_config executable to be used. this determines the +# version to be built with. +# GP_MAJOR_VERSION - the major version parsed from gpdb source +# GP_VERSION - The GP_VERSION string +# PG_BIN_DIR - location of user executables +# PG_INCLUDE_DIR - location of C header files of the client +# PG_INCLUDE_DIR_SERVER - location of C header files for the server +# PG_LIBS - LIBS value used when PostgreSQL was built +# PG_LIB_DIR - location of object code libraries +# PG_PKG_LIB_DIR - location of dynamically loadable modules +# PG_SHARE_DIR - location of architecture-independent support files +# PG_PGXS - location of extension makefile +# PG_CPP_FLAGS - CPPFLAGS value used when PostgreSQL was built +# PG_C_FLAGS - CFLAGS value used when PostgreSQL was built +# PG_LD_FLAGS - LDFLAGS value used when PostgreSQL was built +# PG_HOME - The installation directory of Greenplum +# PG_SRC_DIR - The directory of the postgres/greenplum source code + +include_guard() +find_program(PG_CONFIG pg_config) +if(PG_CONFIG) + message(STATUS "Use '${PG_CONFIG}'") +else() + message(FATAL_ERROR "Unable to find 'pg_config'") +endif() +exec_program(${PG_CONFIG} ARGS --includedir OUTPUT_VARIABLE PG_INCLUDE_DIR) +exec_program(${PG_CONFIG} ARGS --includedir-server OUTPUT_VARIABLE PG_INCLUDE_DIR_SERVER) +exec_program(${PG_CONFIG} ARGS --pkglibdir OUTPUT_VARIABLE PG_PKG_LIB_DIR) +exec_program(${PG_CONFIG} ARGS --sharedir OUTPUT_VARIABLE PG_SHARE_DIR) +exec_program(${PG_CONFIG} ARGS --bindir OUTPUT_VARIABLE PG_BIN_DIR) +exec_program(${PG_CONFIG} ARGS --cppflags OUTPUT_VARIABLE PG_CPP_FLAGS) +exec_program(${PG_CONFIG} ARGS --cflags OUTPUT_VARIABLE PG_C_FLAGS) +exec_program(${PG_CONFIG} ARGS --ldflags OUTPUT_VARIABLE PG_LD_FLAGS) +exec_program(${PG_CONFIG} ARGS --libs OUTPUT_VARIABLE PG_LIBS) +exec_program(${PG_CONFIG} ARGS --libdir OUTPUT_VARIABLE PG_LIB_DIR) +exec_program(${PG_CONFIG} ARGS --pgxs OUTPUT_VARIABLE PG_PGXS) +get_filename_component(PG_HOME "${PG_BIN_DIR}/.." ABSOLUTE) + +# If PG_SRC_DIR is provided (in-tree build), use source tree paths +# This is necessary because pg_config returns install paths, +# which don't exist yet during in-tree builds +if(PG_SRC_DIR) + set(PG_INCLUDE_DIR "${PG_SRC_DIR}/src/include") + set(PG_INCLUDE_DIR_SERVER "${PG_SRC_DIR}/src/include") + # libpq headers and library are in src/interfaces/libpq in source tree + set(PG_INCLUDE_DIR_LIBPQ "${PG_SRC_DIR}/src/interfaces/libpq") + set(PG_LIB_DIR "${PG_SRC_DIR}/src/interfaces/libpq") + message(STATUS "In-tree build: using source include path '${PG_INCLUDE_DIR}'") +else() + # Standalone build: try to derive PG_SRC_DIR from Makefile.global (optional) + get_filename_component(pgsx_SRC_DIR ${PG_PGXS} DIRECTORY) + set(makefile_global ${pgsx_SRC_DIR}/../Makefile.global) + if(EXISTS ${makefile_global}) + execute_process( + COMMAND grep "^abs_top_builddir" ${makefile_global} + COMMAND sed s/.*abs_top_builddir.*=\(.*\)/\\1/ + OUTPUT_VARIABLE PG_SRC_DIR OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET) + if(PG_SRC_DIR) + string(STRIP ${PG_SRC_DIR} PG_SRC_DIR) + endif() + endif() +endif() + +# Get the GP_MAJOR_VERSION from header +file(READ ${PG_INCLUDE_DIR}/pg_config.h config_header) +string(REGEX MATCH "#define *GP_MAJORVERSION *\"[0-9]+\"" macrodef "${config_header}") +string(REGEX MATCH "[0-9]+" GP_MAJOR_VERSION "${macrodef}") +if (GP_MAJOR_VERSION) + message(STATUS "Build extension for Cloudberry ${GP_MAJOR_VERSION}") +else() + message(FATAL_ERROR "Cannot read GP_MAJORVERSION from '${PG_INCLUDE_DIR}/pg_config.h'") +endif() +string(REGEX MATCH "#define *GP_VERSION *\"[^\"]*\"" macrodef "${config_header}") +string(REGEX REPLACE ".*\"\(.*\)\".*" "\\1" GP_VERSION "${macrodef}") +if (GP_VERSION) + message(STATUS "The exact Cloudberry version is '${GP_VERSION}'") +else() + message(FATAL_ERROR "Cannot read GP_VERSION from '${PG_INCLUDE_DIR}/pg_config.h'") +endif() + +# Check if PG_SRC_DIR is available (for source-dependent features like isolation2 tests) +if ("${PG_SRC_DIR}" STREQUAL "" OR NOT EXISTS "${PG_SRC_DIR}") + message(STATUS "PG_SRC_DIR not found or empty, source-dependent features will be disabled") + set(PG_SRC_DIR_AVAILABLE OFF CACHE BOOL "Whether PG_SRC_DIR is available") +else() + message(STATUS "PG_SRC_DIR is '${PG_SRC_DIR}'") + set(PG_SRC_DIR_AVAILABLE ON CACHE BOOL "Whether PG_SRC_DIR is available") +endif() diff --git a/gpcontrib/diskquota/cmake/Regress.cmake b/gpcontrib/diskquota/cmake/Regress.cmake new file mode 100644 index 00000000000..9f823e4d998 --- /dev/null +++ b/gpcontrib/diskquota/cmake/Regress.cmake @@ -0,0 +1,252 @@ +# CMake module for create regress test target. +# +# Usage: +# RegressTarget_Add( +# SQL_DIR [ ...] +# EXPECTED_DIR [ ...] +# RESULTS_DIR +# [INIT_FILE ...] +# [SCHEDULE_FILE ...] +# [REGRESS ...] +# [EXCLUDE ...] +# [REGRESS_OPTS ...] +# [REGRESS_TYPE isolation2/regress] +# [RUN_TIMES ] +# [EXCLUDE_FAULT_INJECT_TEST ] +# ) +# All the file path can be the relative path to ${CMAKE_CURRENT_SOURCE_DIR}. +# A bunch of diff targets will be created as well for comparing the regress results. The diff +# target names like diff__ +# +# Use RUN_TIMES to specify how many times the regress tests should be executed. A negative RUN_TIMES +# will run the test infinite times. +# +# NOTE: To use this cmake file in another project, below files needs to be placed alongside: +# - regress_show_diff.sh +# - regress_loop.sh +# +# NOTE: If the input sql file extension is ".in.sql" instead of ".sql", the "@VAR@" in the input +# file will be replaced by the corresponding cmake VAR before tests are executed. +# +# NOTE: The directory that comes later in the SQL_DIR/EXPECTED_DIR list has a higher priory. The +# test case with the same name will be overwritten by the case that comes after in the directory +# list.t +# +# Example: +# RegressTarget_Add(installcheck_avro_fmt +# REGRESS ${avro_regress_TARGETS} +# INIT_FILE init_file +# DATA_DIR data +# SQL_DIR sql +# EXPECTED_DIR expected_${GP_MAJOR_VERSION}) + +# CMAKE_CURRENT_FUNCTION_LIST_DIR - 3.17 +cmake_minimum_required(VERSION 3.17) + +# pg_isolation2_regress was not shipped with GPDB release. It needs to be created from source. +function(_PGIsolation2Target_Add working_DIR) + if(TARGET pg_isolation2_regress) + return() + endif() + + add_custom_target( + pg_isolation2_regress + COMMAND + make -C ${PG_SRC_DIR}/src/test/isolation2 install + COMMAND + ${CMAKE_COMMAND} -E copy_if_different + ${PG_SRC_DIR}/src/test/isolation2/sql_isolation_testcase.py ${working_DIR} + ) +endfunction() + +# Find all tests in the given directory which uses fault injector, and add them to +# fault_injector_test_list. +function(_Find_FaultInjector_Tests sql_DIR) + if (NOT fault_injector_test_list) + set(fault_injector_test_list "" PARENT_SCOPE) + endif() + set(test_list ${fault_injector_test_list}) + + get_filename_component(sql_DIR ${sql_DIR} ABSOLUTE) + file(GLOB files "${sql_DIR}/*.sql") + foreach(f ${files}) + set(ret 1) + execute_process( + COMMAND + grep gp_inject_fault ${f} + OUTPUT_QUIET + RESULT_VARIABLE ret) + if(ret EQUAL 0) + get_filename_component(test_name ${f} NAME_WE) + if (NOT test_name IN_LIST test_list) + list(APPEND test_list ${test_name}) + endif() + endif() + endforeach() + + set(fault_injector_test_list ${test_list} PARENT_SCOPE) +endfunction() + +# Create symbolic links in the binary dir to input SQL files. +function(_Link_Test_Files src_DIR dest_DIR suffix) + get_filename_component(src_DIR ${src_DIR} ABSOLUTE) + file(MAKE_DIRECTORY ${dest_DIR}) + file(GLOB files "${src_DIR}/*.${suffix}") + foreach(f ${files}) + get_filename_component(file_name ${f} NAME) + file(CREATE_LINK ${f} ${dest_DIR}/${file_name} SYMBOLIC) + endforeach() + file(GLOB files "${src_DIR}/*.in.${suffix}") + foreach(f ${files}) + get_filename_component(file_name ${f} NAME_WE) + configure_file(${f} ${dest_DIR}/${file_name}.${suffix}) + endforeach() +endfunction() + +function(RegressTarget_Add name) + cmake_parse_arguments( + arg + "" + "RESULTS_DIR;DATA_DIR;REGRESS_TYPE;RUN_TIMES;EXCLUDE_FAULT_INJECT_TEST" + "SQL_DIR;EXPECTED_DIR;REGRESS;EXCLUDE;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" + ${ARGN}) + if (NOT arg_EXPECTED_DIR) + message(FATAL_ERROR + "'EXPECTED_DIR' needs to be specified.") + endif() + if (NOT arg_SQL_DIR) + message(FATAL_ERROR + "'SQL_DIR' needs to be specified.") + endif() + if (NOT arg_RESULTS_DIR) + message(FATAL_ERROR "'RESULTS_DIR' needs to be specified") + endif() + + set(working_DIR "${CMAKE_CURRENT_BINARY_DIR}/${name}") + file(MAKE_DIRECTORY ${working_DIR}) + + # Isolation2 test has different executable to run + if(arg_REGRESS_TYPE STREQUAL isolation2) + set(regress_BIN ${PG_SRC_DIR}/src/test/isolation2/pg_isolation2_regress) + _PGIsolation2Target_Add(${working_DIR}) + else() + # For in-tree builds, use source tree path; for standalone builds, use installed path + if(PG_SRC_DIR AND EXISTS ${PG_SRC_DIR}/src/test/regress/pg_regress) + set(regress_BIN ${PG_SRC_DIR}/src/test/regress/pg_regress) + else() + set(regress_BIN ${PG_PKG_LIB_DIR}/pgxs/src/test/regress/pg_regress) + endif() + if (NOT EXISTS ${regress_BIN}) + message(FATAL_ERROR + "Cannot find 'pg_regress' executable by path '${regress_BIN}'. Is 'pg_config' in the $PATH?") + endif() + endif() + + # Link input sql files to the build dir + foreach(sql_DIR IN LISTS arg_SQL_DIR) + _Link_Test_Files(${sql_DIR} ${working_DIR}/sql sql) + # Find all tests using fault injector + if(arg_EXCLUDE_FAULT_INJECT_TEST) + _Find_FaultInjector_Tests(${sql_DIR}) + endif() + endforeach() + + # Link output out files to the build dir + foreach(expected_DIR IN LISTS arg_EXPECTED_DIR) + _Link_Test_Files(${expected_DIR} ${working_DIR}/expected out) + endforeach() + + # Set REGRESS test cases + foreach(r IN LISTS arg_REGRESS) + if (arg_EXCLUDE_FAULT_INJECT_TEST AND (r IN_LIST fault_injector_test_list)) + continue() + endif() + set(regress_arg ${regress_arg} ${r}) + endforeach() + + # Set REGRESS options + foreach(o IN LISTS arg_INIT_FILE) + get_filename_component(init_file_PATH ${o} ABSOLUTE) + list(APPEND arg_REGRESS_OPTS "--init=${init_file_PATH}") + endforeach() + foreach(o IN LISTS arg_SCHEDULE_FILE) + get_filename_component(schedule_file_PATH ${o} ABSOLUTE) + list(APPEND arg_REGRESS_OPTS "--schedule=${schedule_file_PATH}") + endforeach() + foreach(o IN LISTS arg_EXCLUDE) + list(APPEND to_exclude ${o}) + endforeach() + if(arg_EXCLUDE_FAULT_INJECT_TEST) + list(APPEND to_exclude ${fault_injector_test_list}) + endif() + if (to_exclude) + set(exclude_arg "--exclude-tests=${to_exclude}") + string(REPLACE ";" "," exclude_arg "${exclude_arg}") + set(regress_opts_arg ${regress_opts_arg} ${exclude_arg}) + endif() + foreach(o IN LISTS arg_REGRESS_OPTS) + # If the fault injection tests are excluded, ignore the --load-extension=gp_inject_fault as + # well. + if (arg_EXCLUDE_FAULT_INJECT_TEST AND (o MATCHES ".*inject_fault")) + continue() + endif() + set(regress_opts_arg ${regress_opts_arg} ${o}) + endforeach() + + get_filename_component(results_DIR ${arg_RESULTS_DIR} ABSOLUTE) + if (arg_DATA_DIR) + get_filename_component(data_DIR ${arg_DATA_DIR} ABSOLUTE) + set(ln_data_dir_CMD ln -s ${data_DIR} data) + endif() + + set(regress_command + ${regress_BIN} ${regress_opts_arg} ${regress_arg}) + if (arg_RUN_TIMES) + set(test_command + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/regress_loop.sh + ${arg_RUN_TIMES} + ${regress_command}) + else() + set(test_command ${regress_command}) + endif() + + # Create the target + add_custom_target( + ${name} + WORKING_DIRECTORY ${working_DIR} + COMMAND rm -f results + COMMAND mkdir -p ${results_DIR} + COMMAND ln -s ${results_DIR} results + COMMAND rm -f data + COMMAND ${ln_data_dir_CMD} + COMMAND + ${test_command} + || + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/regress_show_diff.sh ${working_DIR} + ) + + if(arg_REGRESS_TYPE STREQUAL isolation2) + add_dependencies(${name} pg_isolation2_regress) + endif() + + # Add targets for easily showing results diffs + FILE(GLOB expected_files ${expected_DIR}/*.out) + foreach(f IN LISTS expected_files) + get_filename_component(casename ${f} NAME_WE) + set(diff_target_name diff_${name}_${casename}) + # Check if the diff target has been created before + if(NOT TARGET ${diff_target_name}) + add_custom_target(${diff_target_name} + COMMAND + diff + ${working_DIR}/expected/${casename}.out + ${working_DIR}/results/${casename}.out || exit 0 + COMMAND + echo ${working_DIR}/expected/${casename}.out + COMMAND + echo ${working_DIR}/results/${casename}.out + ) + endif() + endforeach() +endfunction() + diff --git a/gpcontrib/diskquota/cmake/install_gpdb_component b/gpcontrib/diskquota/cmake/install_gpdb_component new file mode 100755 index 00000000000..9929df4f7ed --- /dev/null +++ b/gpcontrib/diskquota/cmake/install_gpdb_component @@ -0,0 +1,2 @@ +#!/bin/bash +cp -r lib share $GPHOME || exit 1 diff --git a/gpcontrib/diskquota/cmake/regress_loop.sh b/gpcontrib/diskquota/cmake/regress_loop.sh new file mode 100755 index 00000000000..48cf94b6eed --- /dev/null +++ b/gpcontrib/diskquota/cmake/regress_loop.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Usage: +# regress_loop.sh +# Use negative number for infinite loop + +run_times=$1 +count=1 + +while [ "$run_times" -lt 0 ] || [ "$count" -le "$run_times" ]; do + echo "Run regress ${count} times" + "${@:2}" || exit 1 + count=$(( count + 1 )) +done diff --git a/gpcontrib/diskquota/cmake/regress_show_diff.sh b/gpcontrib/diskquota/cmake/regress_show_diff.sh new file mode 100755 index 00000000000..cc1de5c2c8f --- /dev/null +++ b/gpcontrib/diskquota/cmake/regress_show_diff.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +if [ -z "${SHOW_REGRESS_DIFF}" ]; then + exit 1 +fi + +diff_files=$(find "$1" -name regression.diffs) +for diff_file in ${diff_files}; do + if [ -f "${diff_file}" ]; then + cat <<-FEOF +====================================================================== +DIFF FILE: ${diff_file} +====================================================================== + +$(grep -v GP_IGNORE "${diff_file}") +FEOF + fi +done +exit 1 diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.2--2.3.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.2--2.3.sql new file mode 100644 index 00000000000..4669f79a9eb --- /dev/null +++ b/gpcontrib/diskquota/control/ddl/diskquota--2.2--2.3.sql @@ -0,0 +1,45 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.3.so + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.3.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.3.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'show_worker_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.3.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.3.so', 'show_relation_cache' LANGUAGE C; + +DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.3.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.2.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.2.sql new file mode 100644 index 00000000000..49a4b1dbe32 --- /dev/null +++ b/gpcontrib/diskquota/control/ddl/diskquota--2.2.sql @@ -0,0 +1,325 @@ +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION diskquota" to load this file. \quit + +CREATE SCHEMA diskquota; + +-- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; +-- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; +CREATE TABLE diskquota.quota_config( + targetOid oid, + quotatype int, + quotalimitMB int8, + segratio float4 DEFAULT 0, + PRIMARY KEY(targetOid, quotatype) +) DISTRIBUTED BY (targetOid, quotatype); + +CREATE TABLE diskquota.target ( + rowId serial, + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +); + +CREATE TABLE diskquota.table_size( + tableid oid, + size bigint, + segid smallint, + PRIMARY KEY(tableid, segid) +) DISTRIBUTED BY (tableid, segid); + +CREATE TABLE diskquota.state( + state int, + PRIMARY KEY(state) +) DISTRIBUTED BY (state); + +-- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly +SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); + +CREATE TYPE diskquota.diskquota_active_table_type AS ( + "TABLE_OID" oid, + "TABLE_SIZE" int8, + "GP_SEGMENT_ID" smallint +); + +CREATE TYPE diskquota.rejectmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); + +CREATE TYPE diskquota.rejectmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[], + RELAM oid +); + +CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.2.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.2.so', 'show_rejectmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.2.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.2.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.2.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; + +-- view part +CREATE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; + +CREATE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +CREATE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +CREATE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; + +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + +-- view end + +-- prepare to boot +INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; + +-- re-dispatch pause status to false. in case user pause-drop-recreate. +-- refer to see test case 'test_drop_after_pause' +SELECT FROM diskquota.resume(); + + +--- Starting the worker has to be the last step. +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.3--2.2.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.3--2.2.sql new file mode 100644 index 00000000000..35dd1b29b76 --- /dev/null +++ b/gpcontrib/diskquota/control/ddl/diskquota--2.3--2.2.sql @@ -0,0 +1,45 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.2.so + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.2.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.2.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'show_worker_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.2.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.2.so', 'show_relation_cache' LANGUAGE C; + +DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.2.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.3.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.3.sql new file mode 100644 index 00000000000..bf2e7411fa6 --- /dev/null +++ b/gpcontrib/diskquota/control/ddl/diskquota--2.3.sql @@ -0,0 +1,322 @@ +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION diskquota" to load this file. \quit + +CREATE SCHEMA diskquota; + +-- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; +-- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; +CREATE TABLE diskquota.quota_config( + targetOid oid, + quotatype int, + quotalimitMB int8, + segratio float4 DEFAULT 0, + PRIMARY KEY(targetOid, quotatype) +) WITH (appendonly=false) DISTRIBUTED BY (targetOid, quotatype); + +CREATE TABLE diskquota.target ( + rowId serial, + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +) WITH (appendonly=false); + +CREATE TABLE diskquota.table_size( + tableid oid, + size bigint, + segid smallint, + PRIMARY KEY(tableid, segid) +) WITH (appendonly=false) DISTRIBUTED BY (tableid, segid); + +CREATE TABLE diskquota.state( + state int, + PRIMARY KEY(state) +) WITH (appendonly=false) DISTRIBUTED BY (state); + +-- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly +SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); + +CREATE TYPE diskquota.diskquota_active_table_type AS ( + "TABLE_OID" oid, + "TABLE_SIZE" int8, + "GP_SEGMENT_ID" smallint +); + +CREATE TYPE diskquota.rejectmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); + +CREATE TYPE diskquota.rejectmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[], + RELAM oid +); + +CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.3.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.3.so', 'show_rejectmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.3.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.3.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.3.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; + +-- view part +CREATE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; + +CREATE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +CREATE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +CREATE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; + +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + +-- view end + +-- re-dispatch pause status to false. in case user pause-drop-recreate. +-- refer to see test case 'test_drop_after_pause' +SELECT FROM diskquota.resume(); + + +--- Starting the worker has to be the last step. +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/gpcontrib/diskquota/control/ddl/diskquota.control b/gpcontrib/diskquota/control/ddl/diskquota.control new file mode 100644 index 00000000000..67dc913740d --- /dev/null +++ b/gpcontrib/diskquota/control/ddl/diskquota.control @@ -0,0 +1,5 @@ +# diskquota extension +comment = 'disk usage enforcement for database objects' +default_version = '2.3' +module_pathname = 'do-not-use-this' +relocatable = true diff --git a/gpcontrib/diskquota/control/test/diskquota_test--1.0.sql b/gpcontrib/diskquota/control/test/diskquota_test--1.0.sql new file mode 100644 index 00000000000..2a86e965417 --- /dev/null +++ b/gpcontrib/diskquota/control/test/diskquota_test--1.0.sql @@ -0,0 +1,37 @@ +CREATE SCHEMA diskquota_test; + +-- test function +CREATE FUNCTION diskquota_test.wait(sql text) RETURNS bool +AS $$ +DECLARE +res bool := false; +count integer := 10; +BEGIN + WHILE count > 0 LOOP + EXECUTE sql into res; + IF res THEN + RETURN res; + ELSE + count = count - 1; + EXECUTE 'select pg_sleep(1);'; + END IF; + END LOOP; + RETURN res; +END; +$$ LANGUAGE plpgsql; + +CREATE TYPE diskquota_test.db_status AS ( + "dbid" oid, + "datname" text, + "status" text, + "epoch" int8, + "paused" bool +); +CREATE FUNCTION diskquota_test.db_status() RETURNS setof diskquota_test.db_status AS '$libdir/diskquota-2.3.so', 'db_status' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota_test.cur_db_status() RETURNS diskquota_test.db_status AS $$ +SELECT * from diskquota_test.db_status() where datname = current_database(); +$$ LANGUAGE SQL; + +CREATE FUNCTION diskquota_test.check_cur_db_status(text) RETURNS boolean AS $$ +SELECT $1 = db.status from diskquota_test.db_status() as db where db.datname = current_database(); +$$ LANGUAGE SQL; diff --git a/gpcontrib/diskquota/control/test/diskquota_test.control b/gpcontrib/diskquota/control/test/diskquota_test.control new file mode 100644 index 00000000000..11a91927fc0 --- /dev/null +++ b/gpcontrib/diskquota/control/test/diskquota_test.control @@ -0,0 +1,5 @@ +# diskquota test extension +comment = 'extension to test diskquota' +default_version = '1.0' +module_pathname = 'do-not-use-this' +relocatable = true diff --git a/gpcontrib/diskquota/doc/rfc_001_hard_limit.md b/gpcontrib/diskquota/doc/rfc_001_hard_limit.md new file mode 100644 index 00000000000..de8357bbe20 --- /dev/null +++ b/gpcontrib/diskquota/doc/rfc_001_hard_limit.md @@ -0,0 +1,103 @@ +# [RFC 001] Hard Limit for Diskquota + +This document describes the design of the hard limit feature for Diskquota 2.0. + +## Motivation + +Diskquota 1.0 only supports so-call "soft limit", meaning that Diskquota will not interrupt any running query even though the amount of data the query writes exceeds some quota. + +Common types of queries that can write a large amount of data include +- `CREATE TABLE AS` +- `CREATE INDEX` +- `VACUUM FULL` + +Running one single query of such types can take up all the space of a disk, which can cause issues, such as a [Disk Full Failure](https://www.postgresql.org/docs/current/disk-full.html) that crashes the whole database system at worst. + +Therefore, to mitigate the risk of having disk full issues, we plan to introduce "hard limit" in Diskquota 2.0, which enables Diskquota to terminate an in-progress query if the amount of data it writes exceeds some quota. + +Due to the difficulty of observing the intermediate states of an in-progress query in Greenplum, implementing hard limit is not easy. Specifically, there are two major challenges in the way: +1. Observing intermediate states of a query under Greenplum's MVCC mechanism. +2. Ensuring data consistency after seeing uncommitted changes. + +The rest of this doc will analyze the challenges, propose possible approaches to tackle them, and introduce the design decisions with the rationales behind. + +## Challenge 1: Observing Intermediate States + +Diskquota cares about what relations, including tables, indexes, and more, that receives new data. Those relations are called "**active**" relations. Diskquota uses background workers (bgworkers) to collect active relations periodically and then calculates their sizes using an OS system call like `stat()`. + +Active relations can be produced in two ways: +- Case 1: By writing new data to existing relations, e.g., using `INSERT` or `COPY FROM`. In this case, Diskquota do not need to observe any intermediate state during execution because the information of the active relations is committed and is visible to the background worker. +- Case 2: By creating new relations with data, e.g., using `CREATE TABLE AS` or `CREATE INDEX`. This is the hard part. In this case, the information of the active relations and has not been committed yet during execution. Therefore, the information is not visible to the bgworkers when it scans the catalog tables under MVCC. + +For Case 2, to enable the bgworkers to observe the active relations created by an in-progress query, there are two options: +1. **The `SNAPSHOT_DIRTY` approach:** Disregarding MVCC and scanning the catalog tables using `SNAPSHOT_DIRTY`. In this way, the bgworkers can see uncommitted information of the active relations by doing a table scan. +2. **The pub-sub approach:** Publishing the information of newly created active relations to a shared memory area using hooks when executing a query. For example, we can use the `object_access_hook` to write the information in the relation cache of a relation that is being created to the shared memory area. The bgworkers can then retrieve the information from the shared memory area periodically. + +## Challenge 2: Ensuring Data Consistency + +Since bgworkers are allowed to observe uncommitted states, extra work is required to ensure the bgworkers will never see inconsistent snapshots for both options. +- For the `SNAPSHOT_DIRTY` approach, it is required to determine which version should take effect given that there may be multiple versions for one tuple, including the versions created by aborted transactions. +- For the pub-sub approach, it is required to sync the information in the shared memory area against the latest committed version of the catalogs. + +The `SNAPSHOT_DIRTY` approach is more complicated and more error-prone than the pub-sub approach since it requires Diskquota to do visibility checks on its own. Therefore, we choose the pub-sub approach to implement hard limit. + +Even though taking the pub-sub approach frees us from the complicated visibility check process, keeping the shared memory area and the catalogs in sync is still non-trivial. Note that the information of a relation in the catalogs can either be updated by altering the relation, or be deleted by dropping the relation. A natural idea is to monitor each of these operations, e.g., using the `object_access_hook`, and replay it to the shared memory area. However, this does not solve the consistency issue because these operations can be aborted. Due to the MVCC mechanism, nothing needs to be done to the catalogs when aborting such operations and no hook can be used to rollback the changes to the shared memory area at that time. + +### Aligning with the Catalogs + +Given that it is useless to replay each modification operation to the shared memory area, we choose not to replay any operation at all but to align the entries in the shared memory area against tuples in the catalogs. + +Specifically, for each entry in the shared memory area, search the catalogs for the tuple with the same key under MVCC, then +- if a tuple is found in the catalogs, that tuple must be written by the latest committed transaction and therefore must be no later than the transaction that writes the entry to the shared memory area. Therefore, the tuple in the catalogs prevails and the shared memory entry is deleted. +- otherwise, there are still two cases: + 1. **Tuple Uncommitted:** the transaction that writes the entry to the shared memory area is the latest one and has not yet been committed. In this case, Diskquota should use the information in the shared memory entry since it is the only source. + 2. **Tuple Deleted:** the tuple in the catalogs has been deleted by a committed transaction and the shared memory area has not been cleaned. We must prevent this case from happening because it is hard to distinguish it from the uncommitted case. Fortunately, Greenplum provides an `unlink` hook that gets called at the end of a transaction to delete files of relations. Diskquota can use the `unlink` hook to delete entries that corresponding to relations to be deleted from the shared memory area. + +The alignment process is summarized as the following two pieces of pseudo code: +- Each time the Diskquota bgworker retrieves information of active relations, do + ```python + for entry in shared memory area: + tuple = SearchSysCache(entry.key) + if HeapTupleIsValid(tuple): + del entry from shared memory area + ``` +- Each time the `unlink` hook gets called for a `relfilenode`, do + ```python + entry = Search shared memory area by relfilenode + del entry from shared memory area + ``` + +With alignment, entries in the shared memory area only represents uncommitted relations and tuples in the catalogs are used for committed relations. There is no intersection between the two sets, which guarantees that the Diskquota bgworker will always see a consistent snapshot. + +### Limitations and Workarounds + +While the pub-sub approach with alignment enables Diskquota to observe uncommitted active relations and guarantees data consistency, it does have some inherent limitations. + +One of the most notable limitation is that it does not support hard limit for any operation that modifies existing tuples in the catalogs, such as +- `ALTER TABLE` +- `DROP TABLE` +- `TRUNCATE` + +Such operations will not be visible to Diskquota until the transaction is committed. For example, if a user changes the tablespace of a table `t` using +```sql +ALTER TABLE t SET TABLESPACE new_tablespace; +``` + +From the Diskquota's perspective, table `t` still belongs to the old tablespace when it is being copied to the new tablespace. As a result, the size of table `t` will be limited by the quota on the *old* tablespace instead of the *new* tablespace until the `ALTER TABLE` command is completed. + +The root cause of this limitation that such modification operations will not take effect until the transaction is committed. Specifically, +- Due to MVCC, they will not take effect **in the catalogs** until committed. +- Due to the alignment mechanism, they will not take effect **in the shared memory area** neither given that table `t` is already visible from the catalogs to Diskquota and the corresponding shared memory entry will be deleted when the bgworker retrives active relations. + +One way to overcome this limitation is to enhance the **soft limit** mechanism to calculate the resulting quota usage of such catalog modification operations and reject those that will cause quota excess before execution. This is also not trivial to implement but is in our plan. + +For now, as a workaround, in order to make the catalog modification operations hard-limited based on the new information of relations instead of the old information, the user can use the `CREATE AS` command to create a new relation with the new information and then drop the old one. Because Diskquota can see relations that have not yet been committed, the `CREATE AS` command can be hard-limited and will be hard-limited based on the new infomation. + +In the above example of changing the tablespace, in order to count the size of table `t` in the quota usage of the new tablespace, the user can replace the `ALTER TABLE` command with the following `CRATE`-`DROP`-`RENAME` transaction: +```sql +BEGIN; +CREATE TABLE t_1 TABLESPACE new_tablespace AS SELECT * FROM t; +DROP TABLE t; +ALTER TABLE t_1 RENAME TO t; +COMMIT; +``` diff --git a/gpcontrib/diskquota/src/diskquota.c b/gpcontrib/diskquota/src/diskquota.c new file mode 100644 index 00000000000..d94dc49ee66 --- /dev/null +++ b/gpcontrib/diskquota/src/diskquota.c @@ -0,0 +1,1984 @@ +/* ------------------------------------------------------------------------- + * + * diskquota.c + * + * Diskquota is used to limit the amount of disk space that a schema or a role + * can use. Diskquota is based on background worker framework. It contains a + * launcher process which is responsible for starting/refreshing the diskquota + * worker processes which monitor given databases. + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/diskquota.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "diskquota.h" +#include "gp_activetable.h" + +#include "funcapi.h" +#include "pgstat.h" +#include "access/xact.h" +#include "cdb/cdbgang.h" +#include "cdb/cdbvars.h" +#include "commands/dbcommands.h" +#include "executor/spi.h" +#include "libpq/libpq-be.h" +#include "miscadmin.h" +#include "port/atomics.h" +#include "storage/ipc.h" +#include "storage/proc.h" +#include "storage/sinval.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/faultinjector.h" +#include "utils/guc.h" +#include "utils/memutils.h" +#include "utils/ps_status.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" +#include "utils/timestamp.h" +#include "utils/formatting.h" +#include "tcop/pquery.h" + +PG_MODULE_MAGIC; + +#define DISKQUOTA_DB "diskquota" +#define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" + +/* clang-format off */ +#if !defined(DISKQUOTA_VERSION) || \ + !defined(DISKQUOTA_MAJOR_VERSION) || \ + !defined(DISKQUOTA_PATCH_VERSION) || \ + !defined(DISKQUOTA_MINOR_VERSION) || \ + !defined(DISKQUOTA_BINARY_NAME) + #error Version not found. Please check if the VERSION file exists. +#endif +/* clang-format on */ + +#include // for useconds_t +extern int usleep(useconds_t usec); // in + +/* flags set by signal handlers */ +static volatile sig_atomic_t got_sighup = false; +static volatile sig_atomic_t got_sigterm = false; +static volatile sig_atomic_t got_sigusr1 = false; +static volatile sig_atomic_t got_sigusr2 = false; + +/* GUC variables */ +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; +int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ +bool diskquota_hardlimit = false; +int diskquota_max_workers = 10; +int diskquota_max_table_segments = 0; +int diskquota_max_monitored_databases = 0; +int diskquota_max_quota_probes = 0; + +DiskQuotaLocks diskquota_locks; +ExtensionDDLMessage *extension_ddl_message = NULL; + +// Only access in diskquota worker, different from each worker. +// a pointer to DiskquotaLauncherShmem->workerEntries in shared memory +static DiskQuotaWorkerEntry *volatile MyWorkerInfo = NULL; + +// how many database diskquota are monitoring on +static int num_db = 0; + +/* how many TableSizeEntry are maintained in all the table_size_map in shared memory*/ +pg_atomic_uint32 *diskquota_table_size_entry_num; + +/* how many QuotaInfoEntry are maintained in all the quota_info_map in shared memory*/ +pg_atomic_uint32 *diskquota_quota_info_entry_num; + +static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; + +#define MIN_SLEEPTIME 100 /* milliseconds */ +#define BGWORKER_LOG_TIME 3600000 /* milliseconds */ + +/* + * bgworker handles, in launcher local memory, + * bgworker_handles[i] is the handle of DiskquotaLauncherShmem->[i] + * the actually useable reference is DiskquotaLauncherShmem->{freeWorkers, runningWorkers} + * + * size: GUC diskquota_max_workers + */ +BackgroundWorkerHandle **bgworker_handles; + +typedef enum +{ + SUCCESS, + INVALID_DB, + NO_FREE_WORKER, + UNKNOWN, +} StartWorkerState; +/* functions of disk quota*/ +void _PG_init(void); +void _PG_fini(void); +void disk_quota_worker_main(Datum); +void disk_quota_launcher_main(Datum); + +static void disk_quota_sigterm(SIGNAL_ARGS); +static void disk_quota_sighup(SIGNAL_ARGS); +static void define_guc_variables(void); +static StartWorkerState start_worker(DiskquotaDBEntry *dbEntry); +static void create_monitor_db_table(void); +static void add_dbid_to_database_list(Oid dbid); +static void del_dbid_from_database_list(Oid dbid); +static void process_extension_ddl_message(void); +static void do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_extension_ddl_message); +static void terminate_all_workers(void); +static void on_add_db(Oid dbid, MessageResult *code); +static void on_del_db(Oid dbid, MessageResult *code); +static bool is_valid_dbid(Oid dbid); +extern void invalidate_database_rejectmap(Oid dbid); +static void FreeWorkerOnExit(int code, Datum arg); +static void FreeWorker(DiskQuotaWorkerEntry *worker); +static void init_database_list(void); +static DiskquotaDBEntry *next_db(DiskquotaDBEntry *curDB); +static DiskQuotaWorkerEntry *next_worker(void); +static DiskquotaDBEntry *add_db_entry(Oid dbid); +static void release_db_entry(Oid dbid); +static char *get_db_name(Oid dbid); +static void reset_worker(DiskQuotaWorkerEntry *dq_worker); +static void vacuum_db_entry(DiskquotaDBEntry *db); +static void init_bgworker_handles(void); +static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); +static void free_bgworker_handle(uint32 worker_id); +static bool is_altering_extension_to_default_version(char *version); +static bool check_alter_extension(void); + +/* + * diskquota_launcher_shmem_size + * Compute space needed for diskquota launcher related shared memory + */ +Size +diskquota_launcher_shmem_size(void) +{ + Size size; + + size = MAXALIGN(sizeof(DiskquotaLauncherShmemStruct)); + // hidden memory for DiskQuotaWorkerEntry + size = add_size(size, mul_size(diskquota_max_workers, sizeof(struct DiskQuotaWorkerEntry))); + // hidden memory for dbArray + size = add_size(size, mul_size(diskquota_max_monitored_databases, sizeof(struct DiskquotaDBEntry))); + return size; +} + +/* + * Check whether altering the extension to the default version. + */ +static bool +is_altering_extension_to_default_version(char *version) +{ + int spi_ret; + bool ret = false; + SPI_connect(); + spi_ret = SPI_execute("select default_version from pg_available_extensions where name ='diskquota'", true, 0); + if (spi_ret != SPI_OK_SELECT) + elog(ERROR, "[diskquota] failed to select diskquota default version during diskquota update."); + if (SPI_processed > 0) + { + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + bool isnull; + + dat = SPI_getbinval(tup, SPI_tuptable->tupdesc, 1, &isnull); + if (!isnull) + { + char *default_version = DatumGetCString(dat); + if (strcmp(version, default_version) == 0) ret = true; + } + } + SPI_finish(); + return ret; +} + +static bool +check_alter_extension(void) +{ + if (ActivePortal == NULL) return false; + /* QD: When the sourceTag is T_AlterExtensionStmt, then return true */ + if (ActivePortal->sourceTag == T_AlterExtensionStmt) return true; + + /* + * QE: The sourceTag won't be T_AlterExtensionStmt, we should check the sourceText. + * If the sourceText contains 'alter extension diskquota update', we consider it is + * a alter extension query. + */ + char *query = asc_tolower(ActivePortal->sourceText, strlen(ActivePortal->sourceText)); + char *pos = query; + bool ret = true; + static char *regs[] = {"alter", "extension", "diskquota", "update"}; + int i; + + /* Check whether the sql statement is alter extension. */ + for (i = 0; i < sizeof(regs) / sizeof(char *); i++) + { + pos = strstr(pos, regs[i]); + if (pos == 0) + { + ret = false; + break; + } + } + + /* + * If the current version is the final version, which is altered, + * we need to throw an error to the user. + */ + if (ret) + { + /* + * If version is set in alter extension statement, then compare the current version + * with the version in this statement. Otherwise, compare the current version with + * the default version of diskquota. + */ + pos = strstr(pos, "to"); + if (pos) + ret = strstr(pos, DISKQUOTA_VERSION) != 0; + else + ret = is_altering_extension_to_default_version(DISKQUOTA_VERSION); + } + + pfree(query); + return ret; +} + +/* + * Entrypoint of diskquota module. + * + * Init shared memory and hooks. + * Define GUCs. + * start diskquota launcher process. + */ +void +_PG_init(void) +{ + /* diskquota.so must be in shared_preload_libraries to init SHM. */ + if (!process_shared_preload_libraries_in_progress) + { + /* + * To support the continuous upgrade/downgrade, we should skip the library + * check in _PG_init() during upgrade/downgrade. + */ + if (IsNormalProcessingMode() && check_alter_extension()) + { + ereport(LOG, (errmsg("[diskquota] altering diskquota version to " DISKQUOTA_VERSION "."))); + return; + } + ereport(ERROR, (errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME + " not in shared_preload_libraries. abort."))); + } + else + { + ereport(INFO, (errmsg("booting diskquota-" DISKQUOTA_VERSION))); + } + + BackgroundWorker worker; + memset(&worker, 0, sizeof(BackgroundWorker)); + + /* values are used in later calls */ + define_guc_variables(); + + init_disk_quota_shmem(); + init_disk_quota_enforcement(); + init_active_table_hook(); + + /* start disk quota launcher only on master */ + if (!IS_QUERY_DISPATCHER()) + { + return; + } + + /* set up common data for diskquota launcher worker */ + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_start_time = BgWorkerStart_RecoveryFinished; + /* launcher process should be restarted after pm reset. */ + worker.bgw_restart_time = BGW_DEFAULT_RESTART_INTERVAL; + snprintf(worker.bgw_library_name, BGW_MAXLEN, DISKQUOTA_BINARY_NAME); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "disk_quota_launcher_main"); + worker.bgw_notify_pid = 0; + + snprintf(worker.bgw_name, BGW_MAXLEN, "[diskquota] - launcher"); + + RegisterBackgroundWorker(&worker); +} + +void +_PG_fini(void) +{} + +/* + * Signal handler for SIGTERM + * Set a flag to let the main loop to terminate, and set our latch to wake + * it up. + */ +static void +disk_quota_sigterm(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sigterm = true; + if (MyProc) SetLatch(&MyProc->procLatch); + + errno = save_errno; +} + +/* + * Signal handler for SIGHUP + * Set a flag to tell the main loop to reread the config file, and set + * our latch to wake it up. + */ +static void +disk_quota_sighup(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sighup = true; + if (MyProc) SetLatch(&MyProc->procLatch); + + errno = save_errno; +} + +/* + * Signal handler for SIGUSR1 + * Set a flag to tell the launcher to handle extension ddl message + */ +static void +disk_quota_sigusr1(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sigusr1 = true; + + if (MyProc) SetLatch(&MyProc->procLatch); + + errno = save_errno; +} + +/* + * Signal handler for SIGUSR2 + * Set a flag to tell the launcher to handle extension ddl message + */ +static void +disk_quota_sigusr2(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sigusr2 = true; + + if (MyProc) SetLatch(&MyProc->procLatch); + + errno = save_errno; +} +/* + * Define GUC variables used by diskquota + */ +static void +define_guc_variables(void) +{ +#if DISKQUOTA_DEBUG + const int min_naptime = 0; +#else + const int min_naptime = 1; +#endif + + DefineCustomIntVariable("diskquota.naptime", "Duration between each check (in seconds).", NULL, &diskquota_naptime, + 2, min_naptime, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); + + DefineCustomIntVariable("diskquota.max_active_tables", "Max number of active tables monitored by disk-quota.", NULL, + &diskquota_max_active_tables, 300 * 1024, 1, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); + + DefineCustomIntVariable("diskquota.worker_timeout", "Duration between each check (in seconds).", NULL, + &diskquota_worker_timeout, 60, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); + DefineCustomBoolVariable("diskquota.hard_limit", "Set this to 'on' to enable disk-quota hardlimit.", NULL, + &diskquota_hardlimit, false, PGC_SIGHUP, 0, NULL, NULL, NULL); + DefineCustomIntVariable( + "diskquota.max_workers", + "Max number of backgroud workers to run diskquota extension, should be less than max_worker_processes.", + NULL, &diskquota_max_workers, 10, 1, 20, PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomIntVariable("diskquota.max_table_segments", "Max number of tables segments on the cluster.", NULL, + &diskquota_max_table_segments, 10 * 1024 * 1024, INIT_NUM_TABLE_SIZE_ENTRIES * 1024, + INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomIntVariable("diskquota.max_monitored_databases", "Max number of database on the cluster.", NULL, + &diskquota_max_monitored_databases, 50, 1, 1024, PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomIntVariable("diskquota.max_quota_probes", "Max number of quotas on the cluster.", NULL, + &diskquota_max_quota_probes, 1024 * 1024, 1024 * INIT_QUOTA_MAP_ENTRIES, INT_MAX, + PGC_POSTMASTER, 0, NULL, NULL, NULL); +} + +/* ---- Functions for disk quota worker process ---- */ + +/* + * Disk quota worker process will refresh disk quota model periodically. + * Refresh logic is defined in quotamodel.c + */ +void +disk_quota_worker_main(Datum main_arg) +{ + char dbname[NAMEDATALEN]; + + MyWorkerInfo = (DiskQuotaWorkerEntry *)DatumGetPointer(MyBgworkerEntry->bgw_main_arg); + Assert(MyWorkerInfo != NULL); + + memcpy(dbname, MyWorkerInfo->dbname.data, NAMEDATALEN); + + /* Disable ORCA to avoid fallback */ + optimizer = false; + + ereport(DEBUG1, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); + /* Establish signal handlers before unblocking signals. */ + pqsignal(SIGHUP, disk_quota_sighup); + pqsignal(SIGTERM, disk_quota_sigterm); + pqsignal(SIGUSR1, disk_quota_sigusr1); + + if (!MyWorkerInfo->dbEntry->inited) + { + MyWorkerInfo->dbEntry->last_log_time = GetCurrentTimestamp(); + ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); + } + /* To avoid last_log_time from being uninitialized. */ + if (MyWorkerInfo->dbEntry->last_log_time > GetCurrentTimestamp()) + MyWorkerInfo->dbEntry->last_log_time = GetCurrentTimestamp(); + /* + * The shmem exit hook is registered after registering disk_quota_sigterm. + * So if the SIGTERM arrives before this statement, the shmem exit hook + * won't be called. + * + * TODO: launcher to free the unused worker? + */ + on_shmem_exit(FreeWorkerOnExit, 0); + + /* We're now ready to receive signals */ + BackgroundWorkerUnblockSignals(); + + BackgroundWorkerInitializeConnection(dbname, NULL, 0); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, + 0, true); + + /* diskquota worker should has Gp_role as dispatcher */ + Gp_role = GP_ROLE_DISPATCH; + + /* + * Initialize diskquota related local hash map and refresh model + * immediately + */ + init_disk_quota_model(MyWorkerInfo->dbEntry->id); + + // FIXME: version check should be run for each starting bgworker? + // check current binary version and SQL DLL version are matched + int times = 0; + while (!got_sigterm) + { + CHECK_FOR_INTERRUPTS(); + + int major = -1, minor = -1; + int has_error = worker_spi_get_extension_version(&major, &minor) != 0; + + if (major == DISKQUOTA_MAJOR_VERSION && minor == DISKQUOTA_MINOR_VERSION) break; + if (has_error) + { + static char _errfmt[] = "find issues in pg_class.pg_extension check server log. waited %d seconds", + _errmsg[sizeof(_errfmt) + sizeof("2147483647" /* INT_MAX */) + 1] = {}; + snprintf(_errmsg, sizeof(_errmsg), _errfmt, times * diskquota_naptime); + + { + char _psbuf[256]; + if (_errmsg[0] != '\0') + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s %s", dbname, _errmsg); + else + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s", dbname); + set_ps_display(_psbuf); + } + } + else + { + { + char _psbuf[256]; + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s v" DISKQUOTA_VERSION " is not matching with current SQL. stop working", + dbname); + set_ps_display(_psbuf); + } + } + + ereportif(!has_error && times == 0, WARNING, + (errmsg("[diskquota] worker for \"%s\" detected the installed version is \"%d.%d\", " + "but current version is %s. abort due to version not match", + dbname, major, minor, DISKQUOTA_VERSION), + errhint("run alter extension diskquota update to \"%d.%d\"", DISKQUOTA_MAJOR_VERSION, + DISKQUOTA_MINOR_VERSION))); + + int rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); + ResetLatch(&MyProc->procLatch); + if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", dbname))); + proc_exit(-1); + } + + times++; + } + + /* + * Set ps display name of the worker process of diskquota, so we can + * distinguish them quickly. Note: never mind parameter name of the + * function `init_ps_display`, we only want the ps name looks like + * 'bgworker: [diskquota] ...' + */ + { + char _psbuf[256]; + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s", dbname); + set_ps_display(_psbuf); + } + + /* suppose the database is ready, if not, then set it to false */ + bool is_ready = true; + /* Waiting for diskquota state become ready */ + while (!got_sigterm) + { + int rc; + /* If the database has been inited before, no need to check the ready state again */ + if (MyWorkerInfo->dbEntry->inited) break; + + CHECK_FOR_INTERRUPTS(); + + /* + * Check whether the state is in ready mode. The state would be + * unknown, when you `create extension diskquota` at the first time. + * After running UDF init_table_size_table() The state will changed to + * be ready. + */ + if (check_diskquota_state_is_ready()) + { + is_ready = true; + break; + } + + if (is_ready) + { + update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_UNREADY); + is_ready = false; + } + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); + ResetLatch(&MyProc->procLatch); + + // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true + if (!diskquota_naptime) usleep(1); + + /* Emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", dbname))); + proc_exit(1); + } + + /* In case of a SIGHUP, just reload the configuration. */ + if (got_sighup) + { + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + } + } + + if (!MyWorkerInfo->dbEntry->inited) update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_RUNNING); + + bool is_gang_destroyed = false; + TimestampTz loop_start_timestamp = 0; + TimestampTz loop_end_timestamp; + TimestampTz log_time; + long sleep_time = diskquota_naptime * 1000; + long secs; + int usecs; + + while (!got_sigterm) + { + int rc; + + /* + * The log printed from the bgworker does not contain the database name + * but contains the bgworker's pid. We should print the database name + * every BGWORKER_LOG_TIME to ensure that we can find the database name + * by the bgworker's pid in the log file. + */ + log_time = GetCurrentTimestamp(); + if (TimestampDifferenceExceeds(MyWorkerInfo->dbEntry->last_log_time, log_time, BGWORKER_LOG_TIME)) + { + ereport(LOG, (errmsg("[diskquota] disk quota worker process is monitoring database:%s", dbname))); + MyWorkerInfo->dbEntry->last_log_time = log_time; + } + + /* + * If the bgworker receives a signal, the latch will be set ahead of the diskquota.naptime. + * To avoid too frequent diskquota refresh caused by receiving the signal, we use + * loop_start_timestamp and loop_end_timestamp to maintain the elapsed time since the last + * diskquota refresh. If the latch is set ahead of diskquota.naptime, + * refresh_disk_quota_model() should be skipped. + */ + loop_end_timestamp = GetCurrentTimestamp(); + TimestampDifference(loop_start_timestamp, loop_end_timestamp, &secs, &usecs); + sleep_time += secs * 1000 + usecs / 1000; + if (sleep_time >= diskquota_naptime * 1000) + { + SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); + if (!diskquota_is_paused()) + { + /* Refresh quota model with init mode */ + refresh_disk_quota_model(!MyWorkerInfo->dbEntry->inited); + MyWorkerInfo->dbEntry->inited = true; + is_gang_destroyed = false; + } + else if (!is_gang_destroyed) + { + DisconnectAndDestroyAllGangs(false); + is_gang_destroyed = true; + } + worker_increase_epoch(MyWorkerInfo->dbEntry->dbid); + + sleep_time = 0; + } + loop_start_timestamp = GetCurrentTimestamp(); + + if (DiskquotaLauncherShmem->isDynamicWorker) + { + break; + } + CHECK_FOR_INTERRUPTS(); + + /* + * Background workers mustn't call usleep() or any direct equivalent: + * instead, they may wait on their process latch, which sleeps as + * necessary, but is awakened if postmaster dies. That way the + * background process goes away immediately in an emergency. + */ + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000 - sleep_time); + ResetLatch(&MyProc->procLatch); + + // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true + if (!diskquota_naptime) usleep(1); + + /* Emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", dbname))); + proc_exit(1); + } + + /* In case of a SIGHUP, just reload the configuration. */ + if (got_sighup) + { + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + } + } + + if (got_sigterm) + ereport(LOG, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); + ereport(DEBUG1, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); +#if DISKQUOTA_DEBUG + TimestampDifference(MyWorkerInfo->dbEntry->last_run_time, GetCurrentTimestamp(), &secs, &usecs); + MyWorkerInfo->dbEntry->cost = secs * 1000L + usecs / 1000L; +#endif + proc_exit(0); +} + +static inline bool +isAbnormalLoopTime(int diff_sec) +{ + int max_time; + if (diskquota_naptime > 6) + max_time = diskquota_naptime * 2; + else + max_time = diskquota_naptime + 6; + return diff_sec > max_time; +} + +/* ---- Functions for launcher process ---- */ +/* + * Launcher process manages the worker processes based on + * GUC diskquota.monitor_databases in configuration file. + */ +void +disk_quota_launcher_main(Datum main_arg) +{ + time_t loop_begin, loop_end; + + /* the launcher should exit when the master boots in utility mode */ + if (Gp_role != GP_ROLE_DISPATCH) + { + proc_exit(0); + } + + MemoryContextSwitchTo(TopMemoryContext); + init_bgworker_handles(); + + /* establish signal handlers before unblocking signals. */ + pqsignal(SIGHUP, disk_quota_sighup); + pqsignal(SIGTERM, disk_quota_sigterm); + pqsignal(SIGUSR1, disk_quota_sigusr1); + pqsignal(SIGUSR2, disk_quota_sigusr2); + /* we're now ready to receive signals */ + BackgroundWorkerUnblockSignals(); + + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + extension_ddl_message->launcher_pid = MyProcPid; + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + /* + * connect to our database 'diskquota'. launcher process will exit if + * 'diskquota' database is not existed. + */ + + BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL, 0); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, + 0, true); + + /* + * use table diskquota_namespace.database_list to store diskquota enabled + * database. + */ + create_monitor_db_table(); + + init_database_list(); + DisconnectAndDestroyAllGangs(false); + + loop_end = time(NULL); + + struct timeval nap; + nap.tv_sec = diskquota_naptime; + nap.tv_usec = 0; + /* main loop: do this until the SIGTERM handler tells us to terminate. */ + ereport(LOG, (errmsg("[diskquota launcher] start main loop"))); + DiskquotaDBEntry *curDB = NULL; + Oid curDBId = 0; + bool advance_one_db = true; + bool timeout = false; + int try_times = 0; + while (!got_sigterm) + { + int rc; + CHECK_FOR_INTERRUPTS(); + /* pick a db to run */ + if (advance_one_db) + { + curDB = next_db(curDB); + timeout = false; + try_times = 0; + if (curDB != NULL) + { + curDBId = curDB->dbid; + elog(DEBUG1, "[diskquota] next db to run:%u", curDBId); + } + else + elog(DEBUG1, "[diskquota] no db to run"); + } + /* + * Modify wait time + * + * If there is no db needed to run or has exceeded the next_run_time, + * just sleep to wait a db or a free worker. + * + * Otherwise check the next_run_time to determin how much time to wait + */ + if (timeout || curDB == NULL) + { + nap.tv_sec = diskquota_naptime > 0 ? diskquota_naptime : 1; + nap.tv_usec = 0; + } + else + { + TimestampTz curTime = GetCurrentTimestamp(); + long sec; + int usec; + TimestampDifference(curTime, curDB->next_run_time, &sec, &usec); + nap.tv_sec = sec; + nap.tv_usec = usec; + + /* if the sleep time is too short, just skip the sleeping */ + if (nap.tv_sec == 0 && nap.tv_usec < MIN_SLEEPTIME * 1000L) + { + nap.tv_usec = 0; + } + + /* if the sleep time is too long, advance the next_run_time */ + if (nap.tv_sec > diskquota_naptime) + { + nap.tv_sec = diskquota_naptime; + nap.tv_usec = 0; + curDB->next_run_time = TimestampTzPlusMilliseconds(curTime, diskquota_naptime * 1000L); + } + } + + bool sigusr1 = false; + bool sigusr2 = false; + + /* + * background workers mustn't call usleep() or any direct equivalent: + * instead, they may wait on their process latch, which sleeps as + * necessary, but is awakened if postmaster dies. That way the + * background process goes away immediately in an emergency. + */ + + if (nap.tv_sec != 0 || nap.tv_usec != 0) + { + elog(DEBUG1, "[diskquota] naptime sec:%ld, usec:%ld", nap.tv_sec, nap.tv_usec); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + (nap.tv_sec * 1000L) + (nap.tv_usec / 1000L)); + ResetLatch(&MyProc->procLatch); + + /* Emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota launcher] launcher is being terminated by postmaster death."))); + proc_exit(1); + } + } + /* process extension ddl message */ + if (got_sigusr2) + { + elog(DEBUG1, "[diskquota] got sigusr2"); + got_sigusr2 = false; + process_extension_ddl_message(); + sigusr2 = true; + } + + /* in case of a SIGHUP, just reload the configuration. */ + if (got_sighup) + { + elog(DEBUG1, "[diskquota] got sighup"); + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + } + + /* + * When the bgworker for diskquota worker starts or stops, + * postmsater prosess will send sigusr1 to launcher as + * worker.bgw_notify_pid has been set to launcher pid. + */ + if (got_sigusr1) + { + elog(DEBUG1, "[diskquota] got sigusr1"); + got_sigusr1 = false; + sigusr1 = true; + } + + /* + * Try to starts a bgworker for the curDB + * + */ + + /* + * When db list is empty, curDB is NULL. + * When curDB->in_use is false means dbEtnry has been romoved + * When curDB->dbid doesn't equtal curDBId, it means the slot has + * been used by another db + * + * For the above conditions, we just skip this loop and try to fetch + * next db to run. + */ + if (curDB == NULL || !curDB->in_use || curDB->dbid != curDBId) + { + advance_one_db = true; + continue; + } + + /* + * Try to start a worker to run the db if has exceeded the next_run_time. + * if start_worker fails, advance_one_db will be set to false, so in the + * next loop will run the db again. + */ + if (TimestampDifferenceExceeds(curDB->next_run_time, GetCurrentTimestamp(), MIN_SLEEPTIME)) + { + StartWorkerState ret = start_worker(curDB); + /* when start_worker successfully or db is invalid, pick up next db to run */ + advance_one_db = (ret == SUCCESS || ret == INVALID_DB) ? true : false; + if (!advance_one_db) + { + /* has exceeded the next_run_time of current db */ + timeout = true; + /* when start_worker return is not 2(no free worker), increase the try_times*/ + if (ret != NO_FREE_WORKER) try_times++; + /* only try to start bgworker for a database at most 3 times */ + if (try_times >= 3) advance_one_db = true; + } + } + else + { + advance_one_db = false; + } + + loop_begin = loop_end; + loop_end = time(NULL); + if (isAbnormalLoopTime(loop_end - loop_begin)) + { + ereport(WARNING, (errmsg("[diskquota launcher] loop takes too much time %d/%d", + (int)(loop_end - loop_begin), diskquota_naptime))); + } + } + + /* terminate all the diskquota worker processes before launcher exit */ + ereport(LOG, (errmsg("[diskquota launcher] launcher is being terminated by SIGTERM."))); + terminate_all_workers(); + proc_exit(0); +} + +/* + * Create table to record the list of monitored databases + * we need a place to store the database with diskquota enabled + * (via CREATE EXTENSION diskquota). Currently, we store them into + * heap table in diskquota_namespace schema of diskquota database. + * When database restarted, diskquota launcher will start worker processes + * for these databases. + */ +static void +create_monitor_db_table(void) +{ + const char *sql; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + + /* + * Create function diskquota.diskquota_fetch_table_stat in launcher + * We need this function to distribute dbid to segments when creating + * a diskquota extension. + */ + sql = "create schema if not exists diskquota_namespace;" + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);" + "DROP SCHEMA IF EXISTS " LAUNCHER_SCHEMA + " CASCADE;" + "CREATE SCHEMA " LAUNCHER_SCHEMA + ";" + "CREATE TYPE " LAUNCHER_SCHEMA + ".diskquota_active_table_type AS (TABLE_OID oid, TABLE_SIZE int8, GP_SEGMENT_ID " + "smallint);" + "CREATE FUNCTION " LAUNCHER_SCHEMA ".diskquota_fetch_table_stat(int4, oid[]) RETURNS setof " LAUNCHER_SCHEMA + ".diskquota_active_table_type AS '$libdir/" DISKQUOTA_BINARY_NAME + ".so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE;"; + + StartTransactionCommand(); + + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota launcher process should + * tolerate this kind of errors. + */ + PG_TRY(); + { + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota launcher] unable to connect to execute internal query. return code: %d.", + ret_code))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + + /* debug_query_string need to be set for SPI_execute utility functions. */ + debug_query_string = sql; + + ret_code = SPI_execute(sql, false, 0); + if (ret_code != SPI_OK_UTILITY) + { + int saved_errno = errno; + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql: \"%s\", reason: %s, ret_code: %d.", + sql, strerror(saved_errno), ret_code))); + } + } + PG_CATCH(); + { + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + debug_query_string = NULL; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); + + debug_query_string = NULL; +} + +/* + * When launcher started, it will start all worker processes of + * diskquota-enabled databases from diskquota_namespace.database_list + */ +static void +init_database_list(void) +{ + TupleDesc tupdesc; + int num = 0; + int ret; + int i; + + /* + * Don't catch errors in start_workers_from_dblist. Since this is the + * startup worker for diskquota launcher. If error happens, we just let + * launcher exits. + */ + StartTransactionCommand(); + PushActiveSnapshot(GetTransactionSnapshot()); + + ret = SPI_connect(); + if (ret != SPI_OK_CONNECT) + { + int saved_errno = errno; + ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, reason: %s, return code: %d.", + strerror(saved_errno), ret))); + } + ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); + if (ret != SPI_OK_SELECT) + { + int saved_errno = errno; + ereport(ERROR, + (errmsg("[diskquota launcher] 'select diskquota_namespace.database_list', reason: %s, return code: %d.", + strerror(saved_errno), ret))); + } + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 1 || tupdesc->attrs[0].atttypid != OIDOID) + { + ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", + tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0].atttypid))); + ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, launcher will exit. natts: "))); + } + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup; + Oid dbid; + Datum dat; + bool isnull; + DiskquotaDBEntry *dbEntry; + + tup = SPI_tuptable->vals[i]; + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) ereport(ERROR, (errmsg("[diskquota launcher] dbid cann't be null in table database_list"))); + dbid = DatumGetObjectId(dat); + if (!is_valid_dbid(dbid)) + { + ereport(LOG, (errmsg("[diskquota launcher] database(oid:%u) in table database_list is not a valid database", + dbid))); + continue; + } + dbEntry = add_db_entry(dbid); + if (dbEntry == NULL) continue; + num++; + /* + * diskquota only supports to monitor at most diskquota_max_monitored_databases + * databases + */ + if (num >= diskquota_max_monitored_databases) + { + ereport(LOG, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) " + "will not enable diskquota", + dbid))); + break; + } + } + num_db = num; + /* As update_monitor_db_mpp needs to execute sql, so can not put in the loop above */ + for (int i = 0; i < diskquota_max_monitored_databases; i++) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; + if (dbEntry->in_use) + { + update_monitor_db_mpp(dbEntry->dbid, ADD_DB_TO_MONITOR, LAUNCHER_SCHEMA); + } + } + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); + /* TODO: clean invalid database */ + if (num_db > diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = true; +} + +/* + * This function is called by launcher process to handle message from other backend + * processes which call CREATE/DROP EXTENSION diskquota; It must be able to catch errors, + * and return an error code back to the backend process. + */ +static void +process_extension_ddl_message() +{ + MessageResult code = ERR_UNKNOWN; + ExtensionDDLMessage local_extension_ddl_message; + + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + memcpy(&local_extension_ddl_message, extension_ddl_message, sizeof(ExtensionDDLMessage)); + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + + /* create/drop extension message must be valid */ + if (local_extension_ddl_message.req_pid == 0 || local_extension_ddl_message.launcher_pid != MyProcPid) return; + + ereport(LOG, + (errmsg("[diskquota launcher]: received create/drop extension diskquota message, extension launcher"))); + + do_process_extension_ddl_message(&code, local_extension_ddl_message); + + /* Send createdrop extension diskquota result back to QD */ + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + memset(extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); + extension_ddl_message->launcher_pid = MyProcPid; + extension_ddl_message->result = (int)code; + LWLockRelease(diskquota_locks.extension_ddl_message_lock); +} + +/* + * Process 'create extension' and 'drop extension' message. + * For 'create extension' message, store dbid into table + * 'database_list' and start the diskquota worker process. + * For 'drop extension' message, remove dbid from table + * 'database_list' and stop the diskquota worker process. + */ +static void +do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_extension_ddl_message) +{ + int old_num_db = num_db; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + + StartTransactionCommand(); + + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota launcher process should + * tolerate this kind of errors. + */ + PG_TRY(); + { + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + + switch (local_extension_ddl_message.cmd) + { + case CMD_CREATE_EXTENSION: + on_add_db(local_extension_ddl_message.dbid, code); + num_db++; + *code = ERR_OK; + break; + case CMD_DROP_EXTENSION: + on_del_db(local_extension_ddl_message.dbid, code); + if (num_db > 0) num_db--; + *code = ERR_OK; + break; + default: + ereport(LOG, (errmsg("[diskquota launcher]:received unsupported message cmd=%d", + local_extension_ddl_message.cmd))); + *code = ERR_UNKNOWN; + break; + } + } + PG_CATCH(); + { + error_context_stack = NULL; + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + num_db = old_num_db; + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); + /* update something in memory after transaction committed */ + if (ret) + { + PG_TRY(); + { + /* update_monitor_db_mpp runs sql to distribute dbid to segments */ + StartTransactionCommand(); + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + Oid dbid = local_extension_ddl_message.dbid; + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); + } + switch (local_extension_ddl_message.cmd) + { + case CMD_CREATE_EXTENSION: + if (num_db > diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = true; + add_db_entry(dbid); + /* TODO: how about this failed? */ + update_monitor_db_mpp(dbid, ADD_DB_TO_MONITOR, LAUNCHER_SCHEMA); + break; + case CMD_DROP_EXTENSION: + if (num_db <= diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = false; + /* terminate bgworker in release_db_entry rountine */ + release_db_entry(dbid); + update_monitor_db_mpp(dbid, REMOVE_DB_FROM_BEING_MONITORED, LAUNCHER_SCHEMA); + /* clear the out-of-quota rejectmap in shared memory */ + invalidate_database_rejectmap(dbid); + break; + default: + ereport(LOG, (errmsg("[diskquota launcher]:received unsupported message cmd=%d", + local_extension_ddl_message.cmd))); + break; + } + SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); + CommitTransactionCommand(); + } + PG_CATCH(); + { + error_context_stack = NULL; + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + } + DisconnectAndDestroyAllGangs(false); +} + +/* + * Handle create extension diskquota + * if we know the exact error which caused failure, + * we set it, and error out + */ +static void +on_add_db(Oid dbid, MessageResult *code) +{ + if (num_db >= diskquota_max_monitored_databases) + { + *code = ERR_EXCEED; + ereport(ERROR, (errmsg("[diskquota launcher] too many databases to monitor"))); + } + if (!is_valid_dbid(dbid)) + { + *code = ERR_INVALID_DBID; + ereport(ERROR, (errmsg("[diskquota launcher] invalid database oid"))); + } + + /* + * add dbid to diskquota_namespace.database_list set *code to + * ERR_ADD_TO_DB if any error occurs + */ + PG_TRY(); + { + add_dbid_to_database_list(dbid); + } + PG_CATCH(); + { + *code = ERR_ADD_TO_DB; + PG_RE_THROW(); + } + PG_END_TRY(); +} + +/* + * Handle message: drop extension diskquota + * do: + * 1. kill the associated worker process + * 2. delete dbid from diskquota_namespace.database_list + * 3. invalidate reject-map entries and monitored_dbid_cache from shared memory + */ +static void +on_del_db(Oid dbid, MessageResult *code) +{ + if (!is_valid_dbid(dbid)) + { + *code = ERR_INVALID_DBID; + ereport(ERROR, (errmsg("[diskquota launcher] invalid database oid"))); + } + + /* + * delete dbid from diskquota_namespace.database_list set *code to + * ERR_DEL_FROM_DB if any error occurs + */ + PG_TRY(); + { + del_dbid_from_database_list(dbid); + } + PG_CATCH(); + { + *code = ERR_DEL_FROM_DB; + PG_RE_THROW(); + } + PG_END_TRY(); +} + +/* + * Add the database id into table 'database_list' in + * database 'diskquota' to store the diskquota enabled + * database info. + */ +static void +add_dbid_to_database_list(Oid dbid) +{ + int ret; + + Oid argt[1] = {OIDOID}; + Datum argv[1] = {ObjectIdGetDatum(dbid)}; + + ret = SPI_execute_with_args("select * from diskquota_namespace.database_list where dbid = $1", 1, argt, argv, NULL, + true, 0); + + if (ret != SPI_OK_SELECT) + { + int saved_errno = errno; + ereport(ERROR, (errmsg("[diskquota launcher] error occured while checking database_list, " + " code: %d, reason: %s.", + ret, strerror(saved_errno)))); + } + + if (SPI_processed == 1) + { + ereport(WARNING, (errmsg("[diskquota launcher] database id %d is already actived, " + "skip database_list update", + dbid))); + return; + } + + ret = SPI_execute_with_args("insert into diskquota_namespace.database_list values($1)", 1, argt, argv, NULL, false, + 0); + + if (ret != SPI_OK_INSERT || SPI_processed != 1) + { + int saved_errno = errno; + ereport(ERROR, (errmsg("[diskquota launcher] error occured while updating database_list, " + " code: %d, reason: %s.", + ret, strerror(saved_errno)))); + } + + return; +} + +/* + * Delete database id from table 'database_list' in + * database 'diskquota'. + */ +static void +del_dbid_from_database_list(Oid dbid) +{ + int ret; + + /* errors will be cached in outer function */ + ret = SPI_execute_with_args("delete from diskquota_namespace.database_list where dbid = $1", 1, + (Oid[]){ + OIDOID, + }, + (Datum[]){ + ObjectIdGetDatum(dbid), + }, + NULL, false, 0); + if (ret != SPI_OK_DELETE) + { + int saved_errno = errno; + ereport(ERROR, (errmsg("[diskquota launcher] del_dbid_from_database_list: reason: %s, ret_code: %d.", + strerror(saved_errno), ret))); + } +} + +/* + * When launcher exits, it should also terminate all the workers. + */ +static void +terminate_all_workers(void) +{ + dlist_iter iterdb; + DiskQuotaWorkerEntry *worker; + BackgroundWorkerHandle *handle; + LWLockAcquire(diskquota_locks.workerlist_lock, LW_SHARED); + dlist_foreach(iterdb, &DiskquotaLauncherShmem->runningWorkers) + { + worker = dlist_container(DiskQuotaWorkerEntry, node, iterdb.cur); + handle = get_bgworker_handle(worker->id); + if (handle != NULL) TerminateBackgroundWorker(handle); + } + LWLockRelease(diskquota_locks.workerlist_lock); +} + +/* + * Dynamically launch an disk quota worker process. + * This function is called when launcher process + * schedules a database's diskquota worker to run. + * + * return: + * SUCCESS means starting the bgworker sucessfully. + * INVALID_DB means the database is invalid + * NO_FREE_WORKER means there is no avaliable free workers + * UNKNOWN means registering or starting the bgworker + * failed, maybe there is no free bgworker, or + * forking a process failed and so on. + */ + +static StartWorkerState +start_worker(DiskquotaDBEntry *dbEntry) +{ + BackgroundWorker worker; + bool ret; + DiskQuotaWorkerEntry *dq_worker; + MemoryContext old_ctx; + char *dbname = NULL; + int result = SUCCESS; + + dq_worker = next_worker(); + if (dq_worker == NULL) + { + elog(DEBUG1, "[diskquota] no free workers"); + result = NO_FREE_WORKER; + return result; + } + /* free the BackgroundWorkerHandle used by last database */ + free_bgworker_handle(dq_worker->id); + + dbEntry->workerId = dq_worker->id; + dq_worker->dbEntry = dbEntry; + +#if DISKQUOTA_DEBUG + dbEntry->last_run_time = GetCurrentTimestamp(); +#endif + + /* register a dynamic bgworker and wait for it to start */ + memset(&worker, 0, sizeof(BackgroundWorker)); + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_start_time = BgWorkerStart_RecoveryFinished; + + /* + * diskquota worker should not restart by bgworker framework. If + * postmaster reset, all the bgworkers will be terminated and diskquota + * launcher is restarted by postmaster. All the diskquota workers should + * be started by launcher process again. + */ + worker.bgw_restart_time = BGW_NEVER_RESTART; + sprintf(worker.bgw_library_name, DISKQUOTA_BINARY_NAME); + sprintf(worker.bgw_function_name, "disk_quota_worker_main"); + dbname = get_db_name(dbEntry->dbid); + if (dbname == NULL) + { + result = INVALID_DB; + goto Failed; + } + /* We do not need to get lock here, since this entry is not used by other process. */ + namestrcpy(&(dq_worker->dbname), dbname); + + snprintf(worker.bgw_name, sizeof(worker.bgw_name), "diskquota bgworker %d", dbEntry->dbid); + pfree(dbname); + + /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ + worker.bgw_notify_pid = MyProcPid; + worker.bgw_main_arg = (Datum)PointerGetDatum(dq_worker); + + old_ctx = MemoryContextSwitchTo(TopMemoryContext); + ret = RegisterDynamicBackgroundWorker(&worker, &(bgworker_handles[dq_worker->id])); + MemoryContextSwitchTo(old_ctx); + if (!ret) + { + elog(WARNING, "Create bgworker failed"); + result = UNKNOWN; + goto Failed; + } + BgwHandleStatus status; + pid_t pid; + status = WaitForBackgroundWorkerStartup(bgworker_handles[dq_worker->id], &pid); + if (status == BGWH_STOPPED) + { + ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not start background process"), + errhint("More details may be available in the server log."))); + result = UNKNOWN; + goto Failed; + } + if (status == BGWH_POSTMASTER_DIED) + { + ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("cannot start background processes without postmaster"), + errhint("Kill all remaining database processes and restart the database."))); + result = UNKNOWN; + goto Failed; + } + + Assert(status == BGWH_STARTED); + return result; +Failed: + + elog(DEBUG1, "[diskquota] diskquota, starts diskquota failed"); + FreeWorker(dq_worker); + return result; +} + +/* + * Check whether db oid is valid. + */ +static bool +is_valid_dbid(Oid dbid) +{ + HeapTuple tuple; + + if (dbid == InvalidOid) return false; + tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); + if (!HeapTupleIsValid(tuple)) return false; + ReleaseSysCache(tuple); + return true; +} + +static const char * +diskquota_status_check_soft_limit() +{ + // should run on coordinator only. + Assert(IS_QUERY_DISPATCHER()); + + bool found, paused; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + { + entry = hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + paused = found ? entry->paused : false; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + + // if worker no booted, aka 'CREATE EXTENSION' not called, diskquota is paused + if (!found) return "paused"; + + // if worker booted, check 'worker_map->is_paused' + return paused ? "paused" : "on"; +} + +static const char * +diskquota_status_check_hard_limit() +{ + // should run on coordinator only. + Assert(IS_QUERY_DISPATCHER()); + + bool hardlimit = diskquota_hardlimit; + + bool paused = false; + paused = diskquota_is_paused(); + // if worker booted and 'is_paused == true' and hardlimit is enabled + // hard limits should also paused + if (paused && hardlimit) return "paused"; + + return hardlimit ? "on" : "off"; +} + +static const char * +diskquota_status_binary_version() +{ + return DISKQUOTA_VERSION; +} + +static const char * +diskquota_status_schema_version() +{ + static char ret_version[64]; + int ret = SPI_connect(); + Assert(ret = SPI_OK_CONNECT); + + ret = SPI_execute("select extversion from pg_extension where extname = 'diskquota'", true, 0); + + if (ret != SPI_OK_SELECT || SPI_processed != 1) + { + ereport(WARNING, + (errmsg("[diskquota] when reading installed version lines %ld code = %d", SPI_processed, ret))); + goto fail; + } + + if (SPI_processed == 0) + { + goto fail; + } + + bool is_null = false; + Datum version_datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + + char *version = TextDatumGetCString(version_datum); + if (version == NULL || *version == '\0') + { + ereport(WARNING, (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. may catalog corrupted"))); + goto fail; + } + + /* copy and ensure null termination */ + snprintf(ret_version, sizeof(ret_version), "%s", version); + + SPI_finish(); + return ret_version; + +fail: + SPI_finish(); + return ""; +} + +PG_FUNCTION_INFO_V1(diskquota_status); +Datum +diskquota_status(PG_FUNCTION_ARGS) +{ + typedef struct Context + { + int index; + } Context; + + typedef struct FeatureStatus + { + const char *name; + const char *(*status)(void); + } FeatureStatus; + + static const FeatureStatus fs[] = { + {.name = "soft limits", .status = diskquota_status_check_soft_limit}, + {.name = "hard limits", .status = diskquota_status_check_hard_limit}, + {.name = "current binary version", .status = diskquota_status_binary_version}, + {.name = "current schema version", .status = diskquota_status_schema_version}, + }; + + FuncCallContext *funcctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + funcctx = SRF_FIRSTCALL_INIT(); + + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + { + tupdesc = DiskquotaCreateTemplateTupleDesc(2); + TupleDescInitEntry(tupdesc, 1, "name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, 2, "status", TEXTOID, -1, 0); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + Context *context = (Context *)palloc(sizeof(Context)); + context->index = 0; + funcctx->user_fctx = context; + } + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + Context *context = (Context *)funcctx->user_fctx; + + if (context->index >= sizeof(fs) / sizeof(FeatureStatus)) + { + SRF_RETURN_DONE(funcctx); + } + + bool nulls[2] = {false, false}; + Datum v[2] = { + DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].name)), + DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].status())), + }; + ReturnSetInfo *rsi = (ReturnSetInfo *)fcinfo->resultinfo; + HeapTuple tuple = heap_form_tuple(rsi->expectedDesc, v, nulls); + + context->index++; + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); +} + +static void +FreeWorker(DiskQuotaWorkerEntry *worker) +{ + if (worker != NULL) + { + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + if (worker->dbEntry != NULL) + { + bool in_use = worker->dbEntry->in_use; + if (in_use && worker->dbEntry->workerId == worker->id) + { + worker->dbEntry->workerId = INVALID_WORKER_ID; + worker->dbEntry->next_run_time = + TimestampTzPlusMilliseconds(GetCurrentTimestamp(), diskquota_naptime * 1000L); + } + } + LWLockRelease(diskquota_locks.dblist_lock); + LWLockAcquire(diskquota_locks.workerlist_lock, LW_EXCLUSIVE); + dlist_delete(&worker->node); + worker->dbEntry = NULL; + dlist_push_head(&DiskquotaLauncherShmem->freeWorkers, &worker->node); + elog(DEBUG1, "[diskquota] free worker %d", worker->id); + LWLockRelease(diskquota_locks.workerlist_lock); + } +} + +static void +FreeWorkerOnExit(int code, Datum arg) +{ + if (MyWorkerInfo != NULL) + { + FreeWorker(MyWorkerInfo); + } +} + +void +init_launcher_shmem() +{ + bool found; + DiskquotaLauncherShmem = (DiskquotaLauncherShmemStruct *)ShmemInitStruct("Diskquota launcher Data", + diskquota_launcher_shmem_size(), &found); + memset(DiskquotaLauncherShmem, 0, diskquota_launcher_shmem_size()); + if (!found) + { + dlist_init(&DiskquotaLauncherShmem->freeWorkers); + dlist_init(&DiskquotaLauncherShmem->runningWorkers); + + // a pointer to the start address of hidden memory + uint8_t *hidden_memory_prt = (uint8_t *)DiskquotaLauncherShmem + MAXALIGN(sizeof(DiskquotaLauncherShmemStruct)); + + // get DiskQuotaWorkerEntry from the hidden memory + DiskQuotaWorkerEntry *worker = (DiskQuotaWorkerEntry *)hidden_memory_prt; + hidden_memory_prt += mul_size(diskquota_max_workers, sizeof(DiskQuotaWorkerEntry)); + + // get dbArray from the hidden memory + DiskquotaDBEntry *dbArray = (DiskquotaDBEntry *)hidden_memory_prt; + hidden_memory_prt += mul_size(diskquota_max_monitored_databases, sizeof(struct DiskquotaDBEntry)); + + // get the dbArrayTail from the hidden memory + DiskquotaDBEntry *dbArrayTail = (DiskquotaDBEntry *)hidden_memory_prt; + + /* add all worker to the free worker list */ + for (int i = 0; i < diskquota_max_workers; i++) + { + memset(&worker[i], 0, sizeof(DiskQuotaWorkerEntry)); + worker[i].id = i; + dlist_push_head(&DiskquotaLauncherShmem->freeWorkers, &worker[i].node); + } + + DiskquotaLauncherShmem->dbArray = dbArray; + DiskquotaLauncherShmem->dbArrayTail = dbArrayTail; + + for (int i = 0; i < diskquota_max_monitored_databases; i++) + { + memset(&DiskquotaLauncherShmem->dbArray[i], 0, sizeof(DiskquotaDBEntry)); + DiskquotaLauncherShmem->dbArray[i].id = i; + DiskquotaLauncherShmem->dbArray[i].workerId = INVALID_WORKER_ID; + } + } + /* init TableSizeEntry counter */ + diskquota_table_size_entry_num = + ShmemInitStruct("diskquota TableSizeEntry counter", sizeof(pg_atomic_uint32), &found); + if (!found) pg_atomic_init_u32(diskquota_table_size_entry_num, 0); + + /* init QuotaInfoEntry counter */ + diskquota_quota_info_entry_num = + ShmemInitStruct("diskquota QuotaInfoEntry counter", sizeof(pg_atomic_uint32), &found); + if (!found) pg_atomic_init_u32(diskquota_quota_info_entry_num, 0); +} + +/* + * Look for an unused slot. If we find one, grab it. + * + * We always look for the slot from the lower-numbers slots + * firstly, so that we can recycle the slots instead of using + * the unused slots in order to recycle the shared memory + * allocated before. + */ +static DiskquotaDBEntry * +add_db_entry(Oid dbid) +{ + DiskquotaDBEntry *result = NULL; + + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + /* if there is already dbEntry's dbid equals dbid, returning the existing one */ + for (int i = 0; i < diskquota_max_monitored_databases; i++) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; + if (!dbEntry->in_use && result == NULL) + { + dbEntry->dbid = dbid; + dbEntry->in_use = true; + dbEntry->next_run_time = GetCurrentTimestamp(); + result = dbEntry; + } + else if (dbEntry->in_use && dbEntry->dbid == dbid) + { + result = dbEntry; + break; + } + } + if (result == NULL) + ereport(WARNING, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) " + "will not enable diskquota", + dbid))); + if (result != NULL) elog(DEBUG1, "[diskquota] add db entry: id: %d, %u", result->id, dbid); + + LWLockRelease(diskquota_locks.dblist_lock); + return result; +} + +static void +release_db_entry(Oid dbid) +{ + DiskquotaDBEntry *db = NULL; + for (int i = 0; i < diskquota_max_monitored_databases; i++) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; + if (dbEntry->in_use && dbEntry->dbid == dbid) + { + db = dbEntry; + break; + } + } + if (db == NULL) + { + return; + } + + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + if (db->workerId != INVALID_WORKER_ID) + { + BackgroundWorkerHandle *handle = get_bgworker_handle(db->workerId); + TerminateBackgroundWorker(handle); + } + vacuum_disk_quota_model(db->id); + /* should be called at last to set in_use to false */ + vacuum_db_entry(db); + LWLockRelease(diskquota_locks.dblist_lock); +} + +/* + * Pick next db to run. + * If the curDB is NULL, pick the head db to run. + * If the dbList empty, return NULL. + * If the picked db is in running status, skip it, pick the next one to run. + */ +static DiskquotaDBEntry * +next_db(DiskquotaDBEntry *curDB) +{ + DiskquotaDBEntry *result = NULL; + int nextSlot = 0; + if (curDB != NULL) + { + nextSlot = curDB->id + 1; + } + + /* + * SearchSysCache should be run in a transaction + */ + StartTransactionCommand(); + LWLockAcquire(diskquota_locks.dblist_lock, LW_SHARED); + for (int i = 0; i < diskquota_max_monitored_databases; i++) + { + if (nextSlot >= diskquota_max_monitored_databases) nextSlot = 0; + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[nextSlot]; + nextSlot++; + if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid) continue; + /* TODO: should release the invalid db related things */ + if (!is_valid_dbid(dbEntry->dbid)) continue; + result = dbEntry; + break; + } + LWLockRelease(diskquota_locks.dblist_lock); + CommitTransactionCommand(); + return result; +} + +static DiskQuotaWorkerEntry * +next_worker(void) +{ + DiskQuotaWorkerEntry *dq_worker = NULL; + dlist_node *wnode; + + /* acquire worker from worker list */ + LWLockAcquire(diskquota_locks.workerlist_lock, LW_EXCLUSIVE); + if (dlist_is_empty(&DiskquotaLauncherShmem->freeWorkers)) goto out; + wnode = dlist_pop_head_node(&DiskquotaLauncherShmem->freeWorkers); + dq_worker = dlist_container(DiskQuotaWorkerEntry, node, wnode); + reset_worker(dq_worker); + dlist_push_head(&DiskquotaLauncherShmem->runningWorkers, &dq_worker->node); + elog(DEBUG1, "[diskquota] gets a worker %d", dq_worker->id); +out: + LWLockRelease(diskquota_locks.workerlist_lock); + return dq_worker; +} + +static char * +get_db_name(Oid dbid) +{ + char *dbname = NULL; + MemoryContext old_ctx; + if (dbid == InvalidOid) + { + elog(WARNING, "database oid is invalid"); + return NULL; + } + + StartTransactionCommand(); + (void)GetTransactionSnapshot(); + old_ctx = MemoryContextSwitchTo(TopMemoryContext); + dbname = get_database_name(dbid); + MemoryContextSwitchTo(old_ctx); + CommitTransactionCommand(); + return dbname; +} + +static void +reset_worker(DiskQuotaWorkerEntry *dq_worker) +{ + if (dq_worker == NULL) return; + dq_worker->dbEntry = NULL; +} + +/* + * id can not be changed + */ +static void +vacuum_db_entry(DiskquotaDBEntry *db) +{ + if (db == NULL) return; + db->dbid = InvalidOid; + db->inited = false; + db->workerId = INVALID_WORKER_ID; + db->in_use = false; +} + +static void +init_bgworker_handles(void) +{ + bgworker_handles = (BackgroundWorkerHandle **)(palloc(sizeof(BackgroundWorkerHandle *) * diskquota_max_workers)); + for (int i = 0; i < diskquota_max_workers; i++) + { + bgworker_handles[i] = NULL; + } + return; +} + +static BackgroundWorkerHandle * +get_bgworker_handle(uint32 worker_id) +{ + if (worker_id >= 0) + return bgworker_handles[worker_id]; + else + return NULL; +} + +static void +free_bgworker_handle(uint32 worker_id) +{ + BackgroundWorkerHandle **handle = &bgworker_handles[worker_id]; + if (*handle != NULL) + { + WaitForBackgroundWorkerShutdown(*handle); + pfree(*handle); + *handle = NULL; + } +} diff --git a/gpcontrib/diskquota/src/diskquota.h b/gpcontrib/diskquota/src/diskquota.h new file mode 100644 index 00000000000..f45dd852548 --- /dev/null +++ b/gpcontrib/diskquota/src/diskquota.h @@ -0,0 +1,310 @@ +/* ------------------------------------------------------------------------- + * + * diskquota.h + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/diskquota.c + * + * ------------------------------------------------------------------------- + */ +#ifndef DISK_QUOTA_H +#define DISK_QUOTA_H + +#include "c.h" +#include "postgres.h" +#include "port/atomics.h" + +#include "catalog/pg_class.h" +#include "lib/ilist.h" +#include "lib/stringinfo.h" +#include "fmgr.h" +#include "storage/lock.h" +#include "storage/lwlock.h" +#include "storage/relfilenode.h" +#include "postmaster/bgworker.h" + +#include "utils/hsearch.h" +#include "utils/relcache.h" +#include "utils/timestamp.h" + +#include + +/* init number of TableSizeEntry in table_size_map */ +#define INIT_NUM_TABLE_SIZE_ENTRIES 128 +/* max number of TableSizeEntry in table_size_map */ +#define MAX_NUM_TABLE_SIZE_ENTRIES (diskquota_max_table_segments / SEGMENT_SIZE_ARRAY_LENGTH) +/* length of segment size array in TableSizeEntry */ +#define SEGMENT_SIZE_ARRAY_LENGTH 100 +/* max number of keys in QuotaInfoEntryKey */ +#define MAX_NUM_KEYS_QUOTA_MAP 8 +/* init number of QuotaInfoEntry in quota_info_map */ +#define INIT_QUOTA_MAP_ENTRIES 128 +#define AVG_QUOTA_MAP_ENTRIES (diskquota_max_quota_probes / diskquota_max_monitored_databases) +/* max number of QuotaInfoEntry in quota_info_map */ +#define MAX_QUOTA_MAP_ENTRIES (AVG_QUOTA_MAP_ENTRIES < 1024 ? 1024 : AVG_QUOTA_MAP_ENTRIES) + +typedef enum +{ + DISKQUOTA_TAG_HASH = 0, + DISKQUOTA_OID_HASH, + DISKQUOTA_STRING_HASH, +} DiskquotaHashFunction; + +/* max number of monitored database with diskquota enabled */ +#define LAUNCHER_SCHEMA "diskquota_utility" +#define EXTENSION_SCHEMA "diskquota" +extern int diskquota_worker_timeout; + +#define TableIsHeap(relstorage, relam) \ + ((bool)(relam != 0 && relam != AO_ROW_TABLE_AM_OID && relam != AO_COLUMN_TABLE_AM_OID)) +#define TableIsAoRows(relstorage, relam) ((bool)(relam == AO_ROW_TABLE_AM_OID)) +#define TableIsAoCols(relstorage, relam) ((bool)(relam == AO_COLUMN_TABLE_AM_OID)) +#define DiskquotaCreateTemplateTupleDesc(natts) CreateTemplateTupleDesc(natts); +#define DiskquotaWaitLatch(latch, wakeEvents, timeout) WaitLatch(latch, wakeEvents, timeout, WAIT_EVENT_PG_SLEEP) +#define DiskquotaGetRelstorage(classForm) (0) + +typedef enum +{ + NAMESPACE_QUOTA = 0, + ROLE_QUOTA, + NAMESPACE_TABLESPACE_QUOTA, + ROLE_TABLESPACE_QUOTA, + /* + * TABLESPACE_QUOTA + * used in `quota_config` table, + * when set_per_segment_quota("xx",1.0) is called + * to set per segment quota to '1.0', the config + * will be: + * quotatype = 4 (TABLESPACE_QUOTA) + * quotalimitMB = 0 (invalid quota confined) + * segratio = 1.0 + */ + TABLESPACE_QUOTA, + + NUM_QUOTA_TYPES, +} QuotaType; + +/* + * table disk size and corresponding schema, owner and tablespace + */ +typedef struct QuotaInfoEntryKey +{ + QuotaType type; + Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; + int16 segid; +} QuotaInfoEntryKey; + +typedef struct QuotaInfoEntry +{ + QuotaInfoEntryKey key; + int64 size; + int64 limit; +} QuotaInfoEntry; + +typedef enum +{ + FETCH_ACTIVE_OID, /* fetch active table list */ + FETCH_ACTIVE_SIZE, /* fetch size for active tables */ + ADD_DB_TO_MONITOR, + REMOVE_DB_FROM_BEING_MONITORED, + PAUSE_DB_TO_MONITOR, + RESUME_DB_TO_MONITOR, +} FetchTableStatType; + +typedef enum +{ + DISKQUOTA_UNKNOWN_STATE, + DISKQUOTA_READY_STATE +} DiskQuotaState; + +struct DiskQuotaLocks +{ + LWLock *active_table_lock; + LWLock *reject_map_lock; + LWLock *extension_ddl_message_lock; + LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ + LWLock *monitored_dbid_cache_lock; + LWLock *relation_cache_lock; + /* dblist_lock is used to protect a DiskquotaDBEntry's content */ + LWLock *dblist_lock; + LWLock *workerlist_lock; + LWLock *altered_reloid_cache_lock; +}; +typedef struct DiskQuotaLocks DiskQuotaLocks; +#define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void *)) + +/* + * MessageBox is used to store a message for communication between + * the diskquota launcher process and backends. + * When backend create an extension, it send a message to launcher + * to start the diskquota worker process and write the corresponding + * + * dbOid into diskquota database_list table in postgres database. + * When backend drop an extension, it will send a message to launcher + * to stop the diskquota worker process and remove the dbOid from diskquota + * database_list table as well. + */ +struct ExtensionDDLMessage +{ + int launcher_pid; /* diskquota launcher pid */ + int req_pid; /* pid of the QD process which create/drop + * diskquota extension */ + int cmd; /* message command type, see MessageCommand */ + int result; /* message result writen by launcher, see + * MessageResult */ + int dbid; /* dbid of create/drop diskquota + * extensionstatement */ +}; + +enum MessageCommand +{ + CMD_CREATE_EXTENSION = 1, + CMD_DROP_EXTENSION, +}; + +enum MessageResult +{ + ERR_PENDING = 0, + ERR_OK, + /* the number of database exceeds the maximum */ + ERR_EXCEED, + /* add the dbid to diskquota_namespace.database_list failed */ + ERR_ADD_TO_DB, + /* delete dbid from diskquota_namespace.database_list failed */ + ERR_DEL_FROM_DB, + /* cann't start worker process */ + ERR_START_WORKER, + /* invalid dbid */ + ERR_INVALID_DBID, + ERR_UNKNOWN, +}; + +typedef struct ExtensionDDLMessage ExtensionDDLMessage; +typedef enum MessageCommand MessageCommand; +typedef enum MessageResult MessageResult; + +extern DiskQuotaLocks diskquota_locks; +extern ExtensionDDLMessage *extension_ddl_message; + +typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; +typedef struct DiskquotaDBEntry DiskquotaDBEntry; + +/* + * disk quota worker info used by launcher to manage the worker processes + * used in DiskquotaLauncherShmem->{freeWorkers, runningWorkers} + */ +struct DiskQuotaWorkerEntry +{ + dlist_node node; // the double linked list header + + int id; // starts from 0, -1 means invalid + NameData dbname; // the database name. It does not need to be reset, when dbEntry == NULL, dbname is not valid. + DiskquotaDBEntry *dbEntry; // pointer to shared memory. DiskquotaLauncherShmem->dbArray +}; + +typedef struct +{ + dlist_head freeWorkers; // a list of DiskQuotaWorkerEntry + dlist_head runningWorkers; // a list of DiskQuotaWorkerEntry + DiskquotaDBEntry *dbArray; // size == diskquota_max_monitored_databases + DiskquotaDBEntry *dbArrayTail; + volatile bool isDynamicWorker; + /* + DiskQuotaWorkerEntry worker[diskquota_max_workers]; // the hidden memory to store WorkerEntry + DiskquotaDBEntry dbentry[diskquota_max_monitored_databases]; // the hidden memory for dbentry + */ +} DiskquotaLauncherShmemStruct; + +/* In shmem, only used on master */ +struct DiskquotaDBEntry +{ + int id; // the index of DiskquotaLauncherShmem->dbArray, start from 0 + Oid dbid; // the database oid in postgres catalog + +#define INVALID_WORKER_ID -1 + int workerId; // the id of the worker which is running for the (current DB?), 0 means no worker for it. + TimestampTz next_run_time; + TimestampTz last_run_time; + int16 cost; // ms + + bool inited; // this entry is inited, will set to true after the worker finish the frist run. + bool in_use; // this slot is in using. AKA dbid != 0 + + TimestampTz last_log_time; // the last time log current database info. +}; + +typedef enum MonitorDBStatus +{ +#define DB_STATUS(id, str) id, +#include "diskquota_enum.h" +#undef DB_STATUS + DB_STATUS_MAX +} MonitorDBStatus; +/* used in monitored_dbid_cache, in shmem, both on master and segments */ + +typedef struct MonitorDBEntryStruct *MonitorDBEntry; +struct MonitorDBEntryStruct +{ + Oid dbid; // the key + pg_atomic_uint32 status; // enum MonitorDBStatus + bool paused; + bool is_readiness_logged; /* true if we have logged the error message for not ready */ + pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ +}; +extern HTAB *disk_quota_worker_map; + +/* drop extension hook */ +extern void register_diskquota_object_access_hook(void); + +/* enforcement interface*/ +extern void init_disk_quota_enforcement(void); +extern void invalidate_database_rejectmap(Oid dbid); + +/* quota model interface*/ +extern void init_disk_quota_shmem(void); +extern void init_disk_quota_model(uint32 id); +extern void refresh_disk_quota_model(bool force); +extern bool check_diskquota_state_is_ready(void); +extern bool quota_check_common(Oid reloid, RelFileNode *relfilenode); + +/* quotaspi interface */ +extern void init_disk_quota_hook(void); + +extern Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS); +extern int diskquota_naptime; +extern int diskquota_max_active_tables; +extern bool diskquota_hardlimit; + +extern int SEGCOUNT; +extern int worker_spi_get_extension_version(int *major, int *minor); +extern void truncateStringInfo(StringInfo str, int nchars); +extern List *get_rel_oid_list(bool is_init); +extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam); +extern Relation diskquota_relation_open(Oid relid); +extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname); +extern List *diskquota_get_index_list(Oid relid); +extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); +extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); + +extern bool worker_increase_epoch(Oid dbid); +extern unsigned int worker_get_epoch(Oid dbid); +extern bool diskquota_is_paused(void); +extern bool do_check_diskquota_state_is_ready(void); +extern bool diskquota_is_readiness_logged(void); +extern void diskquota_set_readiness_logged(void); +extern Size diskquota_launcher_shmem_size(void); +extern void init_launcher_shmem(void); +extern void vacuum_disk_quota_model(uint32 id); +extern void update_monitor_db(Oid dbid, FetchTableStatType action); +extern void update_monitor_db_mpp(Oid dbid, FetchTableStatType action, const char *schema); +extern void diskquota_stop_worker(void); +extern void update_monitordb_status(Oid dbid, uint32 status); +extern HTAB *diskquota_hash_create(const char *tabname, long nelem, HASHCTL *info, int flags, + DiskquotaHashFunction hashFunction); +extern HTAB *DiskquotaShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags, + DiskquotaHashFunction hash_function); +extern void refresh_monitored_dbid_cache(void); +#endif diff --git a/gpcontrib/diskquota/src/diskquota_enum.h b/gpcontrib/diskquota/src/diskquota_enum.h new file mode 100644 index 00000000000..28923b9833a --- /dev/null +++ b/gpcontrib/diskquota/src/diskquota_enum.h @@ -0,0 +1,8 @@ +#ifdef DB_STATUS +DB_STATUS(DB_STATUS_UNKNOWN = 0, "UNKNOWN") +DB_STATUS(DB_INIT, "INIT") +DB_STATUS(DB_ERROR, "ERROR") +DB_STATUS(DB_UNREADY, "UNREADY") +DB_STATUS(DB_PAUSED, "PAUSED") +DB_STATUS(DB_RUNNING, "RUNNING") +#endif diff --git a/gpcontrib/diskquota/src/diskquota_utility.c b/gpcontrib/diskquota/src/diskquota_utility.c new file mode 100644 index 00000000000..6bb54de64b4 --- /dev/null +++ b/gpcontrib/diskquota/src/diskquota_utility.c @@ -0,0 +1,1654 @@ +/* ------------------------------------------------------------------------- + * + * diskquota_utility.c + * + * Diskquota utility contains some help functions for diskquota. + * set_schema_quota and set_role_quota is used by user to set quota limit. + * init_table_size_table is used to initialize table 'diskquota.table_size' + * diskquota_start_worker is used when 'create extension' DDL. It will start + * the corresponding worker process immediately. + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/diskquota_utility.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include + +#include "access/aomd.h" +#include "access/xact.h" +#include "access/heapam.h" +#include "access/genam.h" +#include "common/hashfn.h" +#include "catalog/gp_indexing.h" +#include "catalog/namespace.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_namespace.h" +#include "catalog/indexing.h" +#include "commands/dbcommands.h" +#include "commands/extension.h" +#include "commands/tablespace.h" +#include "executor/spi.h" +#include "nodes/makefuncs.h" +#include "pgstat.h" +#include "storage/proc.h" +#include "utils/snapmgr.h" +#include "utils/builtins.h" +#include "utils/faultinjector.h" +#include "utils/fmgroids.h" +#include "utils/formatting.h" +#include "utils/numeric.h" +#include "libpq-fe.h" +#include "funcapi.h" + +#include +#include +#include + +#include "diskquota.h" +#include "gp_activetable.h" + +/* disk quota helper function */ + +PG_FUNCTION_INFO_V1(init_table_size_table); +PG_FUNCTION_INFO_V1(diskquota_start_worker); +PG_FUNCTION_INFO_V1(diskquota_pause); +PG_FUNCTION_INFO_V1(diskquota_resume); +PG_FUNCTION_INFO_V1(set_schema_quota); +PG_FUNCTION_INFO_V1(set_role_quota); +PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); +PG_FUNCTION_INFO_V1(set_role_tablespace_quota); +PG_FUNCTION_INFO_V1(set_per_segment_quota); +PG_FUNCTION_INFO_V1(relation_size_local); +PG_FUNCTION_INFO_V1(pull_all_table_size); + +/* timeout count to wait response from launcher process, in 1/10 sec */ +#define WAIT_TIME_COUNT 1200 +/* + * three types values for "quota" column in "quota_config" table: + * 1) more than 0: valid value + * 2) 0: meaningless value, rejected by diskquota UDF + * 3) less than 0: to delete the quota config in the table + * + * the values for segratio column are the same as quota column + * + * In quota_config table, + * 1) when quota type is "TABLESPACE_QUOTA", + * the quota column value is always INVALID_QUOTA + * 2) when quota type is "NAMESPACE_TABLESPACE_QUOTA" or "ROLE_TABLESPACE_QUOTA" + * and no segratio configed for the tablespace, the segratio value is + * INVALID_SEGRATIO. + * 3) when quota type is "NAMESPACE_QUOTA" or "ROLE_QUOTA", the segratio is + * always INVALID_SEGRATIO. + */ +#define INVALID_SEGRATIO 0.0 +#define INVALID_QUOTA 0 + +#define report_ddl_err(ddl_msg, prefix) \ + do \ + { \ + MessageResult ddl_result_ = (MessageResult)ddl_msg->result; \ + const char *ddl_err_; \ + const char *ddl_hint_; \ + ddl_err_code_to_err_message(ddl_result_, &ddl_err_, &ddl_hint_); \ + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("%s: %s", prefix, ddl_err_), \ + ddl_hint_ ? errhint("%s", ddl_hint_) : 0)); \ + } while (0) + +static bool is_database_empty(void); +static void ddl_err_code_to_err_message(MessageResult code, const char **err_msg, const char **hint_msg); +static int64 get_size_in_mb(char *str); +static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, float4 segratio, Oid spcoid); +static int set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); +static float4 get_per_segment_ratio(Oid spcoid); +static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); +static void check_role(Oid roleoid, char *rolname, int64 quota_limit_mb); + +/* ---- Help Functions to set quota limit. ---- */ +/* + * Initialize table diskquota.table_size. + * calculate table size by UDF pg_table_size + * This function is called by user, errors should not + * be catch, and should be sent back to user + */ +Datum +init_table_size_table(PG_FUNCTION_ARGS) +{ + int ret; + + RangeVar *rv; + Relation rel; + /* + * If error happens in init_table_size_table, just return error messages + * to the client side. So there is no need to catch the error. + */ + + /* ensure table diskquota.state exists */ + rv = makeRangeVar("diskquota", "state", -1); + rel = table_openrv_extended(rv, AccessShareLock, true); + if (!rel) + { + /* configuration table is missing. */ + elog(ERROR, + "table \"diskquota.state\" is missing in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); + } + table_close(rel, NoLock); + + /* + * Why don't use insert into diskquota.table_size select from pg_table_size here? + * + * insert into foo select oid, pg_table_size(oid), -1 from pg_class where + * oid >= 16384 and (relkind='r' or relkind='m'); + * ERROR: This query is not currently supported by GPDB. (entry db 127.0.0.1:6000 pid=61114) + * + * Some functions are peculiar in that they do their own dispatching. + * Such as pg_table_size. + * They do not work on entry db since we do not support dispatching + * from entry-db currently. + */ + SPI_connect(); + + /* delete all the table size info in table_size if exist. */ + ret = SPI_execute("truncate table diskquota.table_size", false, 0); + if (ret != SPI_OK_UTILITY) elog(ERROR, "cannot truncate table_size table: error code %d", ret); + + ret = SPI_execute( + "INSERT INTO " + " diskquota.table_size " + "WITH all_size AS " + " (" + " SELECT diskquota.pull_all_table_size() AS a FROM gp_dist_random('gp_id')" + " ) " + "SELECT (a).* FROM all_size", + false, 0); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); + + /* size is the sum of size on master and on all segments when segid == -1. */ + ret = SPI_execute( + "INSERT INTO " + " diskquota.table_size " + "WITH total_size AS " + " (" + " SELECT * from diskquota.pull_all_table_size()" + " UNION ALL " + " SELECT tableid, size, segid FROM diskquota.table_size" + " ) " + "SELECT tableid, sum(size) as size, -1 as segid FROM total_size GROUP BY tableid;", + false, 0); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); + + /* set diskquota state to ready. */ + ret = SPI_execute_with_args("update diskquota.state set state = $1", 1, + (Oid[]){ + INT4OID, + }, + (Datum[]){ + Int32GetDatum(DISKQUOTA_READY_STATE), + }, + NULL, false, 0); + if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update state table: error code %d", ret); + + SPI_finish(); + PG_RETURN_VOID(); +} + +static HTAB * +calculate_all_table_size() +{ + Relation classRel; + HeapTuple tuple; + TableScanDesc relScan; + Oid relid; + Oid prelid; + Size tablesize; + RelFileNodeBackend rnode; + TableEntryKey keyitem; + HTAB *local_table_size_map; + HASHCTL hashctl; + DiskQuotaActiveTableEntry *entry; + bool found; + char relstorage; + + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(TableEntryKey); + hashctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + hashctl.hcxt = CurrentMemoryContext; + + local_table_size_map = + diskquota_hash_create("local_table_size_map", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); + classRel = table_open(RelationRelationId, AccessShareLock); + relScan = table_beginscan_catalog(classRel, 0, NULL); + + while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) + { + Form_pg_class classForm = (Form_pg_class)GETSTRUCT(tuple); + if (classForm->relkind != RELKIND_RELATION && classForm->relkind != RELKIND_MATVIEW && + classForm->relkind != RELKIND_INDEX && classForm->relkind != RELKIND_AOSEGMENTS && + classForm->relkind != RELKIND_AOBLOCKDIR && classForm->relkind != RELKIND_AOVISIMAP && + classForm->relkind != RELKIND_TOASTVALUE) + continue; + + relid = classForm->oid; + /* ignore system table */ + if (relid < FirstNormalObjectId) continue; + + rnode.node.dbNode = MyDatabaseId; + rnode.node.relNode = classForm->relfilenode; + rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; + rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + relstorage = DiskquotaGetRelstorage(classForm); + + tablesize = calculate_relation_size_all_forks(&rnode, relstorage, classForm->relam); + + keyitem.reloid = relid; + keyitem.segid = GpIdentity.segindex; + + prelid = diskquota_parse_primary_table_oid(classForm->relnamespace, classForm->relname.data); + if (OidIsValid(prelid)) + { + keyitem.reloid = prelid; + } + + entry = hash_search(local_table_size_map, &keyitem, HASH_ENTER, &found); + if (!found) + { + entry->tablesize = 0; + } + entry->tablesize += tablesize; + } + table_endscan(relScan); + table_close(classRel, AccessShareLock); + + return local_table_size_map; +} + +Datum +pull_all_table_size(PG_FUNCTION_ARGS) +{ + DiskQuotaActiveTableEntry *entry; + FuncCallContext *funcctx; + struct PullAllTableSizeCtx + { + HASH_SEQ_STATUS iter; + HTAB *local_table_size_map; + } * table_size_ctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext oldcontext; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + tupdesc = DiskquotaCreateTemplateTupleDesc(3); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "TABLEID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "SIZE", INT8OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "SEGID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + /* Create a local hash table and fill it with entries from shared memory. */ + table_size_ctx = (struct PullAllTableSizeCtx *)palloc(sizeof(struct PullAllTableSizeCtx)); + table_size_ctx->local_table_size_map = calculate_all_table_size(); + + /* Setup first calling context. */ + hash_seq_init(&(table_size_ctx->iter), table_size_ctx->local_table_size_map); + funcctx->user_fctx = (void *)table_size_ctx; + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + table_size_ctx = (struct PullAllTableSizeCtx *)funcctx->user_fctx; + + while ((entry = hash_seq_search(&(table_size_ctx->iter))) != NULL) + { + Datum result; + Datum values[3]; + bool nulls[3]; + HeapTuple tuple; + + values[0] = ObjectIdGetDatum(entry->reloid); + values[1] = Int64GetDatum(entry->tablesize); + values[2] = Int16GetDatum(entry->segid); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + + SRF_RETURN_DONE(funcctx); +} +/* + * Trigger to start diskquota worker when create extension diskquota. + * This function is called at backend side, and will send message to + * diskquota launcher. Launcher process is responsible for starting the real + * diskquota worker process. + */ +Datum +diskquota_start_worker(PG_FUNCTION_ARGS) +{ + int rc, launcher_pid; + + /* + * Lock on extension_ddl_lock to avoid multiple backend create diskquota + * extension at the same time. + */ + LWLockAcquire(diskquota_locks.extension_ddl_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + extension_ddl_message->req_pid = MyProcPid; + extension_ddl_message->cmd = CMD_CREATE_EXTENSION; + extension_ddl_message->result = ERR_PENDING; + extension_ddl_message->dbid = MyDatabaseId; + launcher_pid = extension_ddl_message->launcher_pid; + /* setup sig handler to diskquota launcher process */ + rc = kill(launcher_pid, SIGUSR2); + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + if (rc == 0) + { + int count = WAIT_TIME_COUNT; + + while (count-- > 0) + { + CHECK_FOR_INTERRUPTS(); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); + if (rc & WL_POSTMASTER_DEATH) break; + ResetLatch(&MyProc->procLatch); + + ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check + ERROR, + (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid), + errhint("The diskquota launcher process has been terminated for some reasons. Consider to " + "restart the cluster to start it."))); + + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_PENDING) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + break; + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + } + } + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_OK) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); + report_ddl_err(extension_ddl_message, "[diskquota] failed to create diskquota extension"); + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); + + /* notify DBA to run init_table_size_table() when db is not empty */ + if (!is_database_empty()) + { + ereport(WARNING, (errmsg("[diskquota] diskquota is not ready because current database is not empty"), + errhint("please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota"))); + } + PG_RETURN_VOID(); +} + +/* + * this function is called by user. + * pause diskquota in current or specific database. + * After this function being called, diskquota doesn't emit an error when the disk usage limit is exceeded. + */ +Datum +diskquota_pause(PG_FUNCTION_ARGS) +{ + if (!superuser()) + { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to pause diskquota"))); + } + + Oid dbid = MyDatabaseId; + if (PG_NARGS() == 1) + { + dbid = PG_GETARG_OID(0); + } + if (IS_QUERY_DISPATCHER()) + { + // pause current worker + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); + } + update_monitor_db_mpp(dbid, PAUSE_DB_TO_MONITOR, EXTENSION_SCHEMA); + SPI_finish(); + } + PG_RETURN_VOID(); +} + +/* + * this function is called by user. + * active diskquota in current or specific database + */ +Datum +diskquota_resume(PG_FUNCTION_ARGS) +{ + if (!superuser()) + { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to resume diskquota"))); + } + + Oid dbid = MyDatabaseId; + if (PG_NARGS() == 1) + { + dbid = PG_GETARG_OID(0); + } + + // active current worker + if (IS_QUERY_DISPATCHER()) + { + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); + } + update_monitor_db_mpp(dbid, RESUME_DB_TO_MONITOR, EXTENSION_SCHEMA); + SPI_finish(); + } + + PG_RETURN_VOID(); +} + +/* + * Check whether database is empty (no user table created) + */ +static bool +is_database_empty(void) +{ + int ret; + TupleDesc tupdesc; + bool is_empty = false; + + /* + * If error happens in is_database_empty, just return error messages to + * the client side. So there is no need to catch the error. + */ + SPI_connect(); + + ret = SPI_execute( + "INSERT INTO diskquota.state SELECT (count(relname) = 0)::int " + "FROM " + " pg_class AS c, " + " pg_namespace AS n " + "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota' " + "and relkind not in ('v', 'c', 'f') " + "returning state", + false, 0); + if (ret != SPI_OK_INSERT_RETURNING) + { + int saved_errno = errno; + elog(ERROR, "cannot select pg_class and pg_namespace table and update diskquota.state, reason: %s.", + strerror(saved_errno)); + } + + tupdesc = SPI_tuptable->tupdesc; + /* check sql return value whether database is empty */ + if (SPI_processed > 0) + { + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (!isnull) + { + /* check whether condition `count(relname) = 0` is true */ + is_empty = DatumGetBool(dat); + } + } + + /* + * And finish our transaction. + */ + SPI_finish(); + return is_empty; +} + +void +diskquota_stop_worker(void) +{ + int rc, launcher_pid; + + if (!IS_QUERY_DISPATCHER()) + { + return; + } + + /* + * Lock on extension_ddl_lock to avoid multiple backend create diskquota + * extension at the same time. + */ + LWLockAcquire(diskquota_locks.extension_ddl_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + extension_ddl_message->req_pid = MyProcPid; + extension_ddl_message->cmd = CMD_DROP_EXTENSION; + extension_ddl_message->result = ERR_PENDING; + extension_ddl_message->dbid = MyDatabaseId; + launcher_pid = extension_ddl_message->launcher_pid; + rc = kill(launcher_pid, SIGUSR2); + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + if (rc == 0) + { + int count = WAIT_TIME_COUNT; + + while (count-- > 0) + { + CHECK_FOR_INTERRUPTS(); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); + if (rc & WL_POSTMASTER_DEATH) break; + ResetLatch(&MyProc->procLatch); + + ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check + ERROR, + (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid), + errhint("The diskquota launcher process has been terminated for some reasons. Consider to " + "restart the cluster to start it."))); + + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_PENDING) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + break; + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + } + } + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_OK) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); + report_ddl_err(extension_ddl_message, "[diskquota] failed to drop diskquota extension"); + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); +} + +/* + * For extension DDL('create extension/drop extension') + * Using this function to convert error code from diskquota + * launcher to error message and return it to client. + */ +static void +ddl_err_code_to_err_message(MessageResult code, const char **err_msg, const char **hint_msg) +{ + *hint_msg = NULL; + switch (code) + { + case ERR_PENDING: + *err_msg = "no response from diskquota launcher, check whether launcher process exists"; + *hint_msg = "Create \"diskquota\" database and restart the cluster."; + break; + case ERR_OK: + *err_msg = "succeeded"; + break; + case ERR_EXCEED: + *err_msg = "too many databases to monitor"; + break; + case ERR_ADD_TO_DB: + *err_msg = "add dbid to database_list failed"; + break; + case ERR_DEL_FROM_DB: + *err_msg = "delete dbid from database_list failed"; + break; + case ERR_START_WORKER: + *err_msg = "start diskquota worker failed"; + break; + case ERR_INVALID_DBID: + *err_msg = "invalid dbid"; + break; + default: + *err_msg = "unknown error"; + break; + } +} + +static Datum +__get_oid_auto_case_convert(Oid (*f)(const char *name, bool missing_ok), const char *name) +{ + char *b = NULL; + int l = strlen(name); + Oid ret = InvalidOid; + + if (l > 2 && name[0] == '"' && name[l - 1] == '"') + { + // object name wrapped by '"'. eg: "foo" + // l - 2 is the length without quotes, +1 for null terminator + b = palloc(l - 1); + memcpy(b, name + 1, l - 2); + b[l - 2] = '\0'; + } + else + { + // lower the object name if not wrapped by '"' + b = str_tolower(name, strlen(name), DEFAULT_COLLATION_OID); + } + + ret = f(b, false); + + pfree(b); + return ret; +} + +/* + * Set disk quota limit for role. + */ +Datum +set_role_quota(PG_FUNCTION_ARGS) +{ + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; + + if (!superuser()) + { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); + } + + rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + roleoid = __get_oid_auto_case_convert(get_role_oid, rolname); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + if (quota_limit_mb == 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); + } + check_role(roleoid, rolname, quota_limit_mb); + + SPI_connect(); + set_quota_config_internal(roleoid, quota_limit_mb, ROLE_QUOTA, INVALID_SEGRATIO, InvalidOid); + SPI_finish(); + PG_RETURN_VOID(); +} + +/* + * Set disk quota limit for schema. + */ +Datum +set_schema_quota(PG_FUNCTION_ARGS) +{ + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; + + if (!superuser()) + { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); + } + + nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + namespaceoid = __get_oid_auto_case_convert(get_namespace_oid, nspname); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + if (quota_limit_mb == 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); + } + SPI_connect(); + set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA, INVALID_SEGRATIO, InvalidOid); + SPI_finish(); + PG_RETURN_VOID(); +} + +/* + * Set disk quota limit for tablepace role. + */ +Datum +set_role_tablespace_quota(PG_FUNCTION_ARGS) +{ + /* + * Write the quota limit info into target and quota_config table under + * 'diskquota' schema of the current database. + */ + Oid spcoid; + char *spcname; + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; + int row_id; + + if (!superuser()) + { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); + } + + rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + roleoid = __get_oid_auto_case_convert(get_role_oid, rolname); + + spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); + spcoid = __get_oid_auto_case_convert(get_tablespace_oid, spcname); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + if (quota_limit_mb == 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); + } + check_role(roleoid, rolname, quota_limit_mb); + + SPI_connect(); + row_id = set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); + set_quota_config_internal(row_id, quota_limit_mb, ROLE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); + SPI_finish(); + PG_RETURN_VOID(); +} + +/* + * Set disk quota limit for tablepace schema. + */ +Datum +set_schema_tablespace_quota(PG_FUNCTION_ARGS) +{ + /* + * Write the quota limit info into target and quota_config table under + * 'diskquota' schema of the current database. + */ + Oid spcoid; + char *spcname; + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; + int row_id; + + if (!superuser()) + { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); + } + + nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + namespaceoid = __get_oid_auto_case_convert(get_namespace_oid, nspname); + + spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); + spcoid = __get_oid_auto_case_convert(get_tablespace_oid, spcname); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + if (quota_limit_mb == 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); + } + + SPI_connect(); + row_id = set_target_internal(namespaceoid, spcoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); + set_quota_config_internal(row_id, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); + SPI_finish(); + PG_RETURN_VOID(); +} + +/* + * set_quota_config_intenral - insert/update/delete quota_config table + * + * If the segratio is valid, query the segratio from + * the table "quota_config" by spcoid. + * + * DELETE doesn't need the segratio + */ +static void +set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, float4 segratio, Oid spcoid) +{ + int ret; + + /* Report error if diskquota is not ready. */ + do_check_diskquota_state_is_ready(); + + /* + * If error happens in set_quota_config_internal, just return error messages to + * the client side. So there is no need to catch the error. + */ + + ret = SPI_execute_with_args("select true from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, + (Oid[]){ + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, true, 0); + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select quota setting table: error code %d", ret); + + if (to_delete_quota(type, quota_limit_mb, segratio)) + { + if (SPI_processed > 0) + { + ret = SPI_execute_with_args("delete from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, + (Oid[]){ + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); + if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); + } + // else do nothing + } + // to upsert quota_config + else + { + if (SPI_processed == 0) + { + if (segratio == INVALID_SEGRATIO && !(type == ROLE_QUOTA || type == NAMESPACE_QUOTA)) + segratio = get_per_segment_ratio(spcoid); + ret = SPI_execute_with_args("insert into diskquota.quota_config values($1, $2, $3, $4)", 4, + (Oid[]){ + OIDOID, + INT4OID, + INT8OID, + FLOAT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + Int64GetDatum(quota_limit_mb), + Float4GetDatum(segratio), + }, + NULL, false, 0); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } + else + { + // no need to update segratio + if (segratio == INVALID_SEGRATIO) + { + ret = SPI_execute_with_args( + "update diskquota.quota_config set quotalimitMB = $1 where targetoid= $2 and quotatype = $3", 3, + (Oid[]){ + INT8OID, + OIDOID, + INT4OID, + }, + (Datum[]){ + Int64GetDatum(quota_limit_mb), + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); + } + else + { + ret = SPI_execute_with_args( + "update diskquota.quota_config set quotalimitMb = $1, segratio = $2 where targetoid= $3 and " + "quotatype = $4", + 4, + (Oid[]){ + INT8OID, + FLOAT4OID, + OIDOID, + INT4OID, + }, + (Datum[]){ + Int64GetDatum(quota_limit_mb), + Float4GetDatum(segratio), + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); + } + if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update quota setting table, error code %d", ret); + } + } + + return; +} + +static int +set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type) +{ + int ret; + int row_id = -1; + bool is_null = false; + Datum v; + + /* + * If error happens in set_target_internal, just return error messages to + * the client side. So there is no need to catch the error. + */ + + ret = SPI_execute_with_args( + "select t.rowId from diskquota.quota_config as q, diskquota.target as t" + " where t.primaryOid = $1" + " and t.tablespaceOid = $2" + " and t.quotaType = $3" + " and t.quotaType = q.quotaType" + " and t.rowId = q.targetOid", + 3, + (Oid[]){ + OIDOID, + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + Int32GetDatum(type), + }, + NULL, true, 0); + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target setting table: error code %d", ret); + + if (SPI_processed > 0) + { + is_null = false; + v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + row_id = DatumGetInt32(v); + } + + /* if the schema or role's quota has not been set before */ + if (SPI_processed == 0 && quota_limit_mb > 0) + { + ret = SPI_execute_with_args( + "insert into diskquota.target (quotatype, primaryOid, tablespaceOid) values($1, $2, $3) returning " + "rowId", + 3, + (Oid[]){ + INT4OID, + OIDOID, + OIDOID, + }, + (Datum[]){ + Int32GetDatum(type), + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); + if (ret != SPI_OK_INSERT_RETURNING) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + + is_null = false; + v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + row_id = DatumGetInt32(v); + } + else if (SPI_processed > 0 && quota_limit_mb < 0) + { + ret = SPI_execute_with_args( + "delete from diskquota.target where primaryOid = $1 and tablespaceOid = $2 returning rowId", 2, + (Oid[]){ + OIDOID, + OIDOID, + }, + (Datum[]){ + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); + if (ret != SPI_OK_DELETE_RETURNING) + elog(ERROR, "cannot delete item from target setting table, error code %d", ret); + + is_null = false; + v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + row_id = DatumGetInt32(v); + } + /* No need to update the target table */ + + return row_id; +} + +/* + * Convert a human-readable size to a size in MB. + */ +static int64 +get_size_in_mb(char *str) +{ + char *strptr, *endptr; + char saved_char; + Numeric num; + int64 result; + bool have_digits = false; + + /* Skip leading whitespace */ + strptr = str; + while (isspace((unsigned char)*strptr)) strptr++; + + /* Check that we have a valid number and determine where it ends */ + endptr = strptr; + + /* Part (1): sign */ + if (*endptr == '-' || *endptr == '+') endptr++; + + /* Part (2): main digit string */ + if (isdigit((unsigned char)*endptr)) + { + have_digits = true; + do endptr++; + while (isdigit((unsigned char)*endptr)); + } + + /* Part (3): optional decimal point and fractional digits */ + if (*endptr == '.') + { + endptr++; + if (isdigit((unsigned char)*endptr)) + { + have_digits = true; + do endptr++; + while (isdigit((unsigned char)*endptr)); + } + } + + /* Complain if we don't have a valid number at this point */ + if (!have_digits) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid size: \"%s\"", str))); + + /* Part (4): optional exponent */ + if (*endptr == 'e' || *endptr == 'E') + { + long exponent; + char *cp; + + /* + * Note we might one day support EB units, so if what follows 'E' + * isn't a number, just treat it all as a unit to be parsed. + */ + exponent = strtol(endptr + 1, &cp, 10); + (void)exponent; /* Silence -Wunused-result warnings */ + if (cp > endptr + 1) endptr = cp; + } + + /* + * Parse the number, saving the next character, which may be the first + * character of the unit string. + */ + saved_char = *endptr; + *endptr = '\0'; + + num = DatumGetNumeric( + DirectFunctionCall3(numeric_in, CStringGetDatum(strptr), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); + + *endptr = saved_char; + + /* Skip whitespace between number and unit */ + strptr = endptr; + while (isspace((unsigned char)*strptr)) strptr++; + + /* Handle possible unit */ + if (*strptr != '\0') + { + int64 multiplier = 0; + + /* Trim any trailing whitespace */ + endptr = str + strlen(str) - 1; + + while (isspace((unsigned char)*endptr)) endptr--; + + endptr++; + *endptr = '\0'; + + /* Parse the unit case-insensitively */ + if (pg_strcasecmp(strptr, "mb") == 0) + multiplier = ((int64)1); + + else if (pg_strcasecmp(strptr, "gb") == 0) + multiplier = ((int64)1024); + + else if (pg_strcasecmp(strptr, "tb") == 0) + multiplier = ((int64)1024) * 1024; + else if (pg_strcasecmp(strptr, "pb") == 0) + multiplier = ((int64)1024) * 1024 * 1024; + else + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid size: \"%s\"", str), + errdetail("Invalid size unit: \"%s\".", strptr), + errhint("Valid units are \"MB\", \"GB\", \"TB\", and \"PB\"."))); + + if (multiplier > 1) + { + Numeric mul_num; + + mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, Int64GetDatum(multiplier))); + + num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, NumericGetDatum(mul_num), NumericGetDatum(num))); + } + } + + result = DatumGetInt64(DirectFunctionCall1(numeric_int8, NumericGetDatum(num))); + + return result; +} + +/* + * Function to set disk quota ratio for per-segment + */ +Datum +set_per_segment_quota(PG_FUNCTION_ARGS) +{ + int ret; + Oid spcoid; + char *spcname; + float4 ratio; + + ereportif(!superuser(), ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); + + spcname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + spcoid = __get_oid_auto_case_convert(get_tablespace_oid, spcname); + + ratio = PG_GETARG_FLOAT4(1); + + ereportif(ratio == 0, ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("per segment quota ratio can not be set to 0"))); + + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("unable to connect to execute internal query"))); + } + /* + * lock table quota_config table in exlusive mode + * + * Firstly insert the segratio with TABLESPACE_QUOTA + * row into the table(ROWSHARE lock), then udpate the + * segratio for TABLESPACE_SHCEMA/ROLE_QUOTA rows + * (EXLUSIZE lock), if we don't lock the table in + * exlusive mode first, deadlock will heappen. + */ + ret = SPI_execute("LOCK TABLE diskquota.quota_config IN EXCLUSIVE MODE", false, 0); + if (ret != SPI_OK_UTILITY) elog(ERROR, "cannot lock quota_config table, error code %d", ret); + + /* + * insert/update/detele tablespace ratio config in the quota_config table + * for TABLESPACE_QUOTA, it doesn't store any quota info, just used to + * store the ratio for the tablespace. + */ + set_quota_config_internal(spcoid, INVALID_QUOTA, TABLESPACE_QUOTA, ratio, InvalidOid); + + /* + * UPDATEA NAMESPACE_TABLESPACE_PERSEG_QUOTA AND ROLE_TABLESPACE_PERSEG_QUOTA config for this tablespace + */ + + /* set to invalid ratio value if the tablespace per segment quota deleted */ + if (ratio < 0) + { + ratio = INVALID_SEGRATIO; + } + + ret = SPI_execute_with_args( + "UPDATE diskquota.quota_config AS q set segratio = $1 FROM diskquota.target AS t WHERE " + "q.targetOid = t.rowId AND (t.quotaType = $2 OR t.quotaType = $3) AND t.quotaType = " + "q.quotaType And t.tablespaceOid = $4", + 4, + (Oid[]){ + FLOAT4OID, + INT4OID, + INT4OID, + OIDOID, + }, + (Datum[]){ + Float4GetDatum(ratio), + Int32GetDatum(NAMESPACE_TABLESPACE_QUOTA), + Int32GetDatum(ROLE_TABLESPACE_QUOTA), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); + if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update item from quota setting table, error code %d", ret); + + /* + * And finish our transaction. + */ + SPI_finish(); + PG_RETURN_VOID(); +} + +int +worker_spi_get_extension_version(int *major, int *minor) +{ + StartTransactionCommand(); + int ret = SPI_connect(); + Assert(ret = SPI_OK_CONNECT); + PushActiveSnapshot(GetTransactionSnapshot()); + + ret = SPI_execute("select extversion from pg_extension where extname = 'diskquota'", true, 0); + + if (SPI_processed == 0) + { + ret = -1; + goto out; + } + + if (ret != SPI_OK_SELECT || SPI_processed != 1) + { + ereport(WARNING, + (errmsg("[diskquota] when reading installed version lines %ld code = %d", SPI_processed, ret))); + ret = -1; + goto out; + } + + bool is_null = false; + Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + + char *version = TextDatumGetCString(v); + if (version == NULL) + { + ereport(WARNING, + (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. catalog might be corrupted"))); + ret = -1; + goto out; + } + + ret = sscanf(version, "%d.%d", major, minor); + + if (ret != 2) + { + ereport(WARNING, (errmsg("[diskquota] 'extversion' is '%s' in pg_class.pg_extension which is not valid format. " + "catalog might be corrupted", + version))); + ret = -1; + goto out; + } + + ret = 0; + +out: + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); + + return ret; +} + +/* + * Get the list of oids of the tables which diskquota + * needs to care about in the database. + * Firstly the all the table oids which relkind is 'r' + * or 'm' and not system table. On init stage, oids from + * diskquota.table_size are added to invalidate them. + * Then, fetch the indexes of those tables. + */ + +List * +get_rel_oid_list(bool is_init) +{ + List *oidlist = NIL; + int ret; + +#define SELECT_FROM_PG_CATALOG_PG_CLASS "select oid from pg_catalog.pg_class where oid >= $1 and relkind in ('r', 'm')" + + ret = SPI_execute_with_args(is_init ? SELECT_FROM_PG_CATALOG_PG_CLASS + " union distinct" + " select tableid from diskquota.table_size where segid = -1" + : SELECT_FROM_PG_CATALOG_PG_CLASS, + 1, + (Oid[]){ + OIDOID, + }, + (Datum[]){ + ObjectIdGetDatum(FirstNormalObjectId), + }, + NULL, false, 0); + +#undef SELECT_FROM_PG_CATALOG_PG_CLASS + + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot fetch in pg_class. error code %d", ret); + + TupleDesc tupdesc = SPI_tuptable->tupdesc; + for (int i = 0; i < SPI_processed; i++) + { + HeapTuple tup; + bool isnull; + Oid oid; + ListCell *l; + + tup = SPI_tuptable->vals[i]; + oid = DatumGetObjectId(SPI_getbinval(tup, tupdesc, 1, &isnull)); + if (!isnull) + { + List *indexIds; + oidlist = lappend_oid(oidlist, oid); + indexIds = diskquota_get_index_list(oid); + if (indexIds != NIL) + { + foreach (l, indexIds) + { + oidlist = lappend_oid(oidlist, lfirst_oid(l)); + } + } + list_free(indexIds); + } + } + return oidlist; +} + +typedef struct +{ + char *relation_path; + int64 size; +} RelationFileStatCtx; + +static bool +relation_file_stat(int segno, void *ctx) +{ + RelationFileStatCtx *stat_ctx = (RelationFileStatCtx *)ctx; + char file_path[MAXPGPATH] = {0}; + if (segno == 0) + snprintf(file_path, MAXPGPATH, "%s", stat_ctx->relation_path); + else + snprintf(file_path, MAXPGPATH, "%s.%u", stat_ctx->relation_path, segno); + struct stat fst; + SIMPLE_FAULT_INJECTOR("diskquota_before_stat_relfilenode"); + if (stat(file_path, &fst) < 0) + { + if (errno != ENOENT) + { + int saved_errno = errno; + ereport(WARNING, (errcode_for_file_access(), + errmsg("[diskquota] could not stat file %s: %s", file_path, strerror(saved_errno)))); + } + return false; + } + stat_ctx->size += fst.st_size; + return true; +} + +/* + * calculate size of (all forks of) a relation in transaction + * This function is following calculate_relation_size() + */ +int64 +calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam) +{ + int64 totalsize = 0; + ForkNumber forkNum; + unsigned int segno = 0; + + if (TableIsHeap(relstorage, relam)) + { + for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++) + { + RelationFileStatCtx ctx = {0}; + ctx.relation_path = relpathbackend(rnode->node, rnode->backend, forkNum); + ctx.size = 0; + for (segno = 0;; segno++) + { + if (!relation_file_stat(segno, &ctx)) break; + } + totalsize += ctx.size; + } + return totalsize; + } + else if (TableIsAoRows(relstorage, relam) || TableIsAoCols(relstorage, relam)) + { + RelationFileStatCtx ctx = {0}; + ctx.relation_path = relpathbackend(rnode->node, rnode->backend, MAIN_FORKNUM); + ctx.size = 0; + /* + * Since the extension file with (segno=0, column=1) is not traversed by + * ao_foreach_extent_file(), we need to handle the size of it additionally. + * See comments in ao_foreach_extent_file() for details. + */ + relation_file_stat(0, &ctx); + ao_foreach_extent_file(relation_file_stat, &ctx); + return ctx.size; + } + else + { + return 0; + } +} + +Datum +relation_size_local(PG_FUNCTION_ARGS) +{ + Oid reltablespace = PG_GETARG_OID(0); + Oid relfilenode = PG_GETARG_OID(1); + char relpersistence = PG_GETARG_CHAR(2); + char relstorage = PG_GETARG_CHAR(3); + Oid relam = PG_GETARG_OID(4); + RelFileNodeBackend rnode = {0}; + int64 size = 0; + + rnode.node.dbNode = MyDatabaseId; + rnode.node.relNode = relfilenode; + rnode.node.spcNode = OidIsValid(reltablespace) ? reltablespace : MyDatabaseTableSpace; + rnode.backend = relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + + size = calculate_relation_size_all_forks(&rnode, relstorage, relam); + + PG_RETURN_INT64(size); +} + +Relation +diskquota_relation_open(Oid relid) +{ + Relation rel; + bool success_open = false; + int32 SavedInterruptHoldoffCount = InterruptHoldoffCount; + + PG_TRY(); + { + rel = RelationIdGetRelation(relid); + if (rel) success_open = true; + } + PG_CATCH(); + { + InterruptHoldoffCount = SavedInterruptHoldoffCount; + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + + return success_open ? rel : NULL; +} + +List * +diskquota_get_index_list(Oid relid) +{ + Relation indrel; + SysScanDesc indscan; + ScanKeyData skey; + HeapTuple htup; + List *result = NIL; + + /* Prepare to scan pg_index for entries having indrelid = this rel. */ + ScanKeyInit(&skey, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, relid); + + indrel = table_open(IndexRelationId, AccessShareLock); + indscan = systable_beginscan(indrel, IndexIndrelidIndexId, true, NULL, 1, &skey); + + while (HeapTupleIsValid(htup = systable_getnext(indscan))) + { + Form_pg_index index = (Form_pg_index)GETSTRUCT(htup); + + /* + * Ignore any indexes that are currently being dropped. This will + * prevent them from being searched, inserted into, or considered in + * HOT-safety decisions. It's unsafe to touch such an index at all + * since its catalog entries could disappear at any instant. + */ + if (!index->indislive) continue; + + /* Add index's OID to result list in the proper order */ + result = lappend_oid(result, index->indexrelid); + } + + systable_endscan(indscan); + + table_close(indrel, AccessShareLock); + + return result; +} + +/* + * Get auxiliary relations oid by searching the pg_appendonly table. + */ +void +diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid) +{ + ScanKeyData skey; + SysScanDesc scan; + TupleDesc tupDesc; + Relation aorel; + HeapTuple htup; + Datum auxoid; + bool isnull; + + ScanKeyInit(&skey, Anum_pg_appendonly_relid, BTEqualStrategyNumber, F_OIDEQ, reloid); + aorel = table_open(AppendOnlyRelationId, AccessShareLock); + tupDesc = RelationGetDescr(aorel); + scan = systable_beginscan(aorel, AppendOnlyRelidIndexId, true /*indexOk*/, NULL /*snapshot*/, 1 /*nkeys*/, &skey); + while (HeapTupleIsValid(htup = systable_getnext(scan))) + { + if (segrelid) + { + auxoid = heap_getattr(htup, Anum_pg_appendonly_segrelid, tupDesc, &isnull); + if (!isnull) *segrelid = DatumGetObjectId(auxoid); + } + + if (blkdirrelid) + { + auxoid = heap_getattr(htup, Anum_pg_appendonly_blkdirrelid, tupDesc, &isnull); + if (!isnull) *blkdirrelid = DatumGetObjectId(auxoid); + } + + if (visimaprelid) + { + auxoid = heap_getattr(htup, Anum_pg_appendonly_visimaprelid, tupDesc, &isnull); + if (!isnull) *visimaprelid = DatumGetObjectId(auxoid); + } + } + + systable_endscan(scan); + table_close(aorel, AccessShareLock); +} + +Oid +diskquota_parse_primary_table_oid(Oid namespace, char *relname) +{ + switch (namespace) + { + case PG_TOAST_NAMESPACE: + if (strncmp(relname, "pg_toast", 8) == 0) return atoi(&relname[9]); + break; + case PG_AOSEGMENT_NAMESPACE: { + if (strncmp(relname, "pg_aoseg", 8) == 0) + return atoi(&relname[9]); + else if (strncmp(relname, "pg_aovisimap", 12) == 0) + return atoi(&relname[13]); + else if (strncmp(relname, "pg_aocsseg", 10) == 0) + return atoi(&relname[11]); + else if (strncmp(relname, "pg_aoblkdir", 11) == 0) + return atoi(&relname[12]); + } + break; + } + return InvalidOid; +} + +static float4 +get_per_segment_ratio(Oid spcoid) +{ + int ret; + float4 segratio = INVALID_SEGRATIO; + + if (!OidIsValid(spcoid)) return segratio; + + /* + * using row share lock to lock TABLESPACE_QUTAO + * row to avoid concurrently updating the segratio + */ + ret = SPI_execute_with_args( + "select segratio from diskquota.quota_config where targetoid = $1 and quotatype = $2 for share", 2, + (Oid[]){ + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(spcoid), + Int32GetDatum(TABLESPACE_QUOTA), + }, + NULL, false, 0); + if (ret != SPI_OK_SELECT) + { + elog(ERROR, "cannot get per segment ratio for the tablepace: error code %d", ret); + } + + if (SPI_processed == 1) + { + TupleDesc tupdesc = SPI_tuptable->tupdesc; + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (!isnull) + { + segratio = DatumGetFloat4(dat); + } + } + return segratio; +} + +/* + * For quota type: TABLESPACE_QUOTA, it only stores + * segratio not quota info. So when segratio is + * negtive, we can just delete it. + */ +static bool +to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio) +{ + if (quota_limit_mb < 0) + return true; + else if (segratio < 0 && type == TABLESPACE_QUOTA) + return true; + return false; +} + +static void +check_role(Oid roleoid, char *rolname, int64 quota_limit_mb) +{ + /* reject setting quota for super user, but deletion is allowed */ + if (roleoid == BOOTSTRAP_SUPERUSERID && quota_limit_mb >= 0) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Can not set disk quota for system owner: %s", rolname))); +} + +HTAB * +diskquota_hash_create(const char *tabname, long nelem, HASHCTL *info, int flags, DiskquotaHashFunction hashFunction) +{ + return hash_create(tabname, nelem, info, flags | HASH_BLOBS); +} + +HTAB * +DiskquotaShmemInitHash(const char *name, /* table string name for shmem index */ + long init_size, /* initial table size */ + long max_size, /* max size of the table */ + HASHCTL *infoP, /* info about key and bucket size */ + int hash_flags, /* info about infoP */ + DiskquotaHashFunction hashFunction) +{ + return ShmemInitHash(name, init_size, max_size, infoP, hash_flags | HASH_BLOBS); +} diff --git a/gpcontrib/diskquota/src/enforcement.c b/gpcontrib/diskquota/src/enforcement.c new file mode 100644 index 00000000000..4568db3934c --- /dev/null +++ b/gpcontrib/diskquota/src/enforcement.c @@ -0,0 +1,91 @@ +/* ------------------------------------------------------------------------- + * + * enforcment.c + * + * This code registers enforcement hooks to cancel the query which exceeds + * the quota limit. + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/enforcement.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "cdb/cdbdisp.h" +#include "executor/executor.h" + +#include "diskquota.h" + +#define CHECKED_OID_LIST_NUM 64 + +static bool quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation); + +static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook; + +/* + * Initialize enforcement hooks. + */ +void +init_disk_quota_enforcement(void) +{ + /* enforcement hook before query is loading data */ + prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; + ExecutorCheckPerms_hook = quota_check_ExecCheckRTPerms; +} + +/* + * Enforcement hook function before query is loading data. Throws an error if + * you try to INSERT, UPDATE or COPY into a table, and the quota has been exceeded. + */ +static bool +quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) +{ + ListCell *l; + + foreach (l, rangeTable) + { + List *indexIds; + ListCell *oid; + RangeTblEntry *rte = (RangeTblEntry *)lfirst(l); + + /* see ExecCheckRTEPerms() */ + if (rte->rtekind != RTE_RELATION) continue; + + /* + * Only check quota on inserts. UPDATEs may well increase space usage + * too, but we ignore that for now. + */ + if ((rte->requiredPerms & ACL_INSERT) == 0 && (rte->requiredPerms & ACL_UPDATE) == 0) continue; + + /* + * Given table oid, check whether the quota limit of table's schema or + * table's owner are reached. This function will ereport(ERROR) when + * quota limit exceeded. + */ + quota_check_common(rte->relid, NULL /*relfilenode*/); + /* Check the indexes of the this relation */ + indexIds = diskquota_get_index_list(rte->relid); + PG_TRY(); + { + if (indexIds != NIL) + { + foreach (oid, indexIds) + { + quota_check_common(lfirst_oid(oid), NULL /*relfilenode*/); + } + } + } + PG_CATCH(); + { + list_free(indexIds); + PG_RE_THROW(); + } + PG_END_TRY(); + list_free(indexIds); + } + return true; +} diff --git a/gpcontrib/diskquota/src/gp_activetable.c b/gpcontrib/diskquota/src/gp_activetable.c new file mode 100644 index 00000000000..85309b9ce0d --- /dev/null +++ b/gpcontrib/diskquota/src/gp_activetable.c @@ -0,0 +1,1201 @@ +/* ------------------------------------------------------------------------- + * + * gp_activetable.c + * + * This code is responsible for detecting active table for databases + * quotamodel will call gp_fetch_active_tables() to fetch the active tables + * and their size information in each loop. + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/gp_activetable.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/htup_details.h" +#include "access/relation.h" +#include "access/xact.h" +#include "catalog/catalog.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_extension.h" +#include "cdb/cdbdisp_query.h" +#include "cdb/cdbdispatchresult.h" +#include "cdb/cdbvars.h" +#include "commands/dbcommands.h" +#include "commands/extension.h" +#include "executor/spi.h" +#include "funcapi.h" +#include "libpq-fe.h" +#include "storage/smgr.h" +#include "utils/faultinjector.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/inval.h" +#include "utils/array.h" + +#include "gp_activetable.h" +#include "diskquota.h" +#include "relation_cache.h" + +PG_FUNCTION_INFO_V1(diskquota_fetch_table_stat); + +/* The results set cache for SRF call*/ +typedef struct DiskQuotaSetOFCache +{ + HTAB *result; + HASH_SEQ_STATUS pos; +} DiskQuotaSetOFCache; + +HTAB *active_tables_map = NULL; // Set + +/* + * monitored_dbid_cache is a allow list for diskquota + * to know which databases it need to monitor. + * + * dbid will be added to it when creating diskquota extension + * dbid will be removed from it when droping diskquota extension + */ +HTAB *altered_reloid_cache = NULL; // Set + +/* active table hooks which detect the disk file size change. */ +static file_create_hook_type prev_file_create_hook = NULL; +static file_extend_hook_type prev_file_extend_hook = NULL; +static file_truncate_hook_type prev_file_truncate_hook = NULL; +static file_unlink_hook_type prev_file_unlink_hook = NULL; +static object_access_hook_type prev_object_access_hook = NULL; + +static void active_table_hook_smgrcreate(RelFileNodeBackend rnode); +static void active_table_hook_smgrextend(RelFileNodeBackend rnode); +static void active_table_hook_smgrtruncate(RelFileNodeBackend rnode); +static void active_table_hook_smgrunlink(RelFileNodeBackend rnode); +static void object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); + +static HTAB *get_active_tables_stats(ArrayType *array); +static HTAB *get_active_tables_oid(void); +static HTAB *pull_active_list_from_seg(void); +static void pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_array); +static StringInfoData convert_map_to_string(HTAB *active_list); +static void load_table_size(HTAB *local_table_stats_map); +static void report_active_table_helper(const RelFileNodeBackend *relFileNode); +static void remove_from_active_table_map(const RelFileNodeBackend *relFileNode); +static void report_relation_cache_helper(Oid relid); +static void report_altered_reloid(Oid reloid); +static Oid get_dbid(ArrayType *array); + +void init_active_table_hook(void); +void init_shm_worker_active_tables(void); +void init_lock_active_tables(void); +HTAB *gp_fetch_active_tables(bool is_init); + +/* + * Init active_tables_map shared memory + */ +void +init_shm_worker_active_tables(void) +{ + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); + active_tables_map = DiskquotaShmemInitHash("active_tables", diskquota_max_active_tables, + diskquota_max_active_tables, &ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(Oid); + altered_reloid_cache = DiskquotaShmemInitHash("altered_reloid_cache", diskquota_max_active_tables, + diskquota_max_active_tables, &ctl, HASH_ELEM, DISKQUOTA_OID_HASH); +} + +/* + * Register disk file size change hook to detect active table. + */ +void +init_active_table_hook(void) +{ + prev_file_create_hook = file_create_hook; + file_create_hook = active_table_hook_smgrcreate; + + prev_file_extend_hook = file_extend_hook; + file_extend_hook = active_table_hook_smgrextend; + + prev_file_truncate_hook = file_truncate_hook; + file_truncate_hook = active_table_hook_smgrtruncate; + + prev_file_unlink_hook = file_unlink_hook; + file_unlink_hook = active_table_hook_smgrunlink; + + prev_object_access_hook = object_access_hook; + object_access_hook = object_access_hook_QuotaStmt; +} + +/* + * File create hook is used to monitor a new file create event + */ +static void +active_table_hook_smgrcreate(RelFileNodeBackend rnode) +{ + if (prev_file_create_hook) (*prev_file_create_hook)(rnode); + + SIMPLE_FAULT_INJECTOR("diskquota_after_smgrcreate"); + report_active_table_helper(&rnode); +} + +/* + * File extend hook is used to monitor file size extend event + * it could be extending a page for heap table or just monitoring + * file write for an append-optimize table. + */ +static void +active_table_hook_smgrextend(RelFileNodeBackend rnode) +{ + if (prev_file_extend_hook) (*prev_file_extend_hook)(rnode); + + report_active_table_helper(&rnode); + quota_check_common(InvalidOid /*reloid*/, &rnode.node); +} + +/* + * File truncate hook is used to monitor a new file truncate event + */ +static void +active_table_hook_smgrtruncate(RelFileNodeBackend rnode) +{ + if (prev_file_truncate_hook) (*prev_file_truncate_hook)(rnode); + + report_active_table_helper(&rnode); +} + +static void +active_table_hook_smgrunlink(RelFileNodeBackend rnode) +{ + if (prev_file_unlink_hook) (*prev_file_unlink_hook)(rnode); + + /* + * Since we do not remove the relfilenode if it does not map to any valid + * relation oid, we need to do the cleaning here to avoid memory leak + */ + remove_from_active_table_map(&rnode); + remove_cache_entry(InvalidOid, rnode.node.relNode); +} + +static void +object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg) +{ + if (prev_object_access_hook) (*prev_object_access_hook)(access, classId, objectId, subId, arg); + + /* if is 'drop extension diskquota' */ + if (classId == ExtensionRelationId && access == OAT_DROP) + { + if (get_extension_oid("diskquota", true) == objectId) + { + invalidate_database_rejectmap(MyDatabaseId); + diskquota_stop_worker(); + } + return; + } + + /* TODO: do we need to use "&&" instead of "||"? */ + if (classId != RelationRelationId || subId != 0) + { + return; + } + + if (objectId < FirstNormalObjectId) + { + return; + } + + switch (access) + { + case OAT_POST_CREATE: + report_relation_cache_helper(objectId); + break; + case OAT_POST_ALTER: + SIMPLE_FAULT_INJECTOR("object_access_post_alter"); + report_altered_reloid(objectId); + break; + default: + break; + } +} + +static void +report_altered_reloid(Oid reloid) +{ + /* + * We don't collect altered relations' reloid on mirrors + * and QD. + */ + if (IsRoleMirror() || IS_QUERY_DISPATCHER()) return; + + LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_EXCLUSIVE); + hash_search(altered_reloid_cache, &reloid, HASH_ENTER, NULL); + LWLockRelease(diskquota_locks.altered_reloid_cache_lock); +} + +static void +report_relation_cache_helper(Oid relid) +{ + bool found; + Relation rel; + char relkind; + + /* We do not collect the active table in mirror segments */ + if (IsRoleMirror()) + { + return; + } + + /* + * Do not collect active table info when the database is not under monitoring. + * this operation is read-only and does not require absolutely exact. + * read the cache with out shared lock. + */ + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + if (!found) + { + return; + } + + rel = diskquota_relation_open(relid); + if (rel == NULL) + { + return; + } + + relkind = rel->rd_rel->relkind; + + RelationClose(rel); + + if (relkind != RELKIND_FOREIGN_TABLE && relkind != RELKIND_COMPOSITE_TYPE && relkind != RELKIND_VIEW) + update_relation_cache(relid); +} + +/* + * Common function for reporting active tables + * Currently, any file events(create, extend. truncate) are + * treated the same and report_active_table_helper just put + * the corresponding relFileNode into the active_tables_map + */ +static void +report_active_table_helper(const RelFileNodeBackend *relFileNode) +{ + DiskQuotaActiveTableFileEntry *entry; + DiskQuotaActiveTableFileEntry item; + bool found = false; + Oid dbid = relFileNode->node.dbNode; + + /* We do not collect the active table in mirror segments */ + if (IsRoleMirror()) + { + return; + } + + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + /* do not collect active table info when the database is not under monitoring. + * this operation is read-only and does not require absolutely exact. + * read the cache with out shared lock */ + hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + if (!found) + { + return; + } + found = false; + + MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); + item.dbid = relFileNode->node.dbNode; + item.relfilenode = relFileNode->node.relNode; + item.tablespaceoid = relFileNode->node.spcNode; + + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + entry = hash_search(active_tables_map, &item, HASH_ENTER_NULL, &found); + if (entry && !found) *entry = item; + + if (!found && entry == NULL) + { + /* + * We may miss the file size change of this relation at current + * refresh interval. + */ + ereport(WARNING, (errmsg("Share memory is not enough for active tables."))); + } + LWLockRelease(diskquota_locks.active_table_lock); +} + +/* + * Remove relfilenode from the active table map if exists. + */ +static void +remove_from_active_table_map(const RelFileNodeBackend *relFileNode) +{ + DiskQuotaActiveTableFileEntry item = {0}; + + item.dbid = relFileNode->node.dbNode; + item.relfilenode = relFileNode->node.relNode; + item.tablespaceoid = relFileNode->node.spcNode; + + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + hash_search(active_tables_map, &item, HASH_REMOVE, NULL); + LWLockRelease(diskquota_locks.active_table_lock); +} + +/* + * Interface of activetable module + * This function is called by quotamodel module. + * Disk quota worker process need to collect + * active table disk usage from all the segments. + * And aggregate the table size on each segment + * to get the real table size at cluster level. + */ +HTAB * +gp_fetch_active_tables(bool is_init) +{ + HTAB *local_table_stats_map = NULL; + HASHCTL ctl; + HTAB *local_active_table_oid_maps; + StringInfoData active_oid_list; + + Assert(Gp_role == GP_ROLE_DISPATCH); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(TableEntryKey); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + + local_table_stats_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); + + if (is_init) + { + load_table_size(local_table_stats_map); + } + else + { + /* step 1: fetch active oids from all the segments */ + local_active_table_oid_maps = pull_active_list_from_seg(); + active_oid_list = convert_map_to_string(local_active_table_oid_maps); + + ereport(DEBUG1, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] active_old_list = %s", active_oid_list.data))); + + /* step 2: fetch active table sizes based on active oids */ + pull_active_table_size_from_seg(local_table_stats_map, active_oid_list.data); + + hash_destroy(local_active_table_oid_maps); + pfree(active_oid_list.data); + } + return local_table_stats_map; +} + +/* + * Function to get the table size from each segments + * There are 4 modes: + * + * - FETCH_ACTIVE_OID: gather active table oid from all the segments, since + * table may only be modified on a subset of the segments, we need to firstly + * gather the active table oid list from all the segments. + * + * - FETCH_ACTIVE_SIZE: calculate the active table size based on the active + * table oid list. + * + * - ADD_DB_TO_MONITOR: add MyDatabaseId to the monitored db cache so that + * active tables in the current database will be recorded. This is used each + * time a worker starts. + * + * - REMOVE_DB_FROM_BEING_MONITORED: remove MyDatabaseId from the monitored + * db cache so that active tables in the current database will be recorded. + * This is used when DROP EXTENSION. + */ +Datum +diskquota_fetch_table_stat(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + int32 mode = PG_GETARG_INT32(0); + AttInMetadata *attinmeta; + bool isFirstCall = true; + Oid dbid; + + HTAB *localCacheTable = NULL; + DiskQuotaSetOFCache *cache = NULL; + DiskQuotaActiveTableEntry *results_entry = NULL; + +#ifdef FAULT_INJECTOR + if (SIMPLE_FAULT_INJECTOR("ereport_warning_from_segment") == FaultInjectorTypeSkip) + { + ereport(WARNING, (errmsg("[Fault Injector] This is a warning reported from segment"))); + } +#endif + + /* Init the container list in the first call and get the results back */ + if (SRF_IS_FIRSTCALL()) + { + MemoryContext oldcontext; + TupleDesc tupdesc; + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); + } + SPI_finish(); + + /* create a function context for cross-call persistence */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + if (Gp_role == GP_ROLE_DISPATCH || Gp_role == GP_ROLE_UTILITY) + { + ereport(ERROR, (errmsg("This function must not be called on master or by user"))); + } + + switch (mode) + { + case FETCH_ACTIVE_OID: + localCacheTable = get_active_tables_oid(); + break; + case FETCH_ACTIVE_SIZE: + localCacheTable = get_active_tables_stats(PG_GETARG_ARRAYTYPE_P(1)); + break; + /*TODO: add another UDF to update the monitored_db_cache */ + case ADD_DB_TO_MONITOR: + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, ADD_DB_TO_MONITOR); + PG_RETURN_NULL(); + case REMOVE_DB_FROM_BEING_MONITORED: + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, REMOVE_DB_FROM_BEING_MONITORED); + PG_RETURN_NULL(); + case PAUSE_DB_TO_MONITOR: + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, PAUSE_DB_TO_MONITOR); + PG_RETURN_NULL(); + case RESUME_DB_TO_MONITOR: + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, RESUME_DB_TO_MONITOR); + PG_RETURN_NULL(); + default: + ereport(ERROR, (errmsg("Unused mode number %d, transaction will be aborted", mode))); + break; + } + + /* + * total number of active tables to be returned, each tuple contains + * one active table stat + */ + funcctx->max_calls = localCacheTable ? (uint32)hash_get_num_entries(localCacheTable) : 0; + + /* + * prepare attribute metadata for next calls that generate the tuple + */ + tupdesc = DiskquotaCreateTemplateTupleDesc(3); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "TABLE_OID", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "TABLE_SIZE", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "GP_SEGMENT_ID", INT2OID, -1, 0); + + attinmeta = TupleDescGetAttInMetadata(tupdesc); + funcctx->attinmeta = attinmeta; + + /* Prepare SetOf results HATB */ + cache = (DiskQuotaSetOFCache *)palloc(sizeof(DiskQuotaSetOFCache)); + cache->result = localCacheTable; + hash_seq_init(&(cache->pos), localCacheTable); + + MemoryContextSwitchTo(oldcontext); + } + else + { + isFirstCall = false; + } + + funcctx = SRF_PERCALL_SETUP(); + + if (isFirstCall) + { + funcctx->user_fctx = (void *)cache; + } + else + { + cache = (DiskQuotaSetOFCache *)funcctx->user_fctx; + } + + /* return the results back to SPI caller */ + while ((results_entry = (DiskQuotaActiveTableEntry *)hash_seq_search(&(cache->pos))) != NULL) + { + Datum result; + Datum values[3]; + bool nulls[3]; + HeapTuple tuple; + + memset(values, 0, sizeof(values)); + memset(nulls, false, sizeof(nulls)); + + values[0] = ObjectIdGetDatum(results_entry->reloid); + values[1] = Int64GetDatum(results_entry->tablesize); + values[2] = Int16GetDatum(results_entry->segid); + + tuple = heap_form_tuple(funcctx->attinmeta->tupdesc, values, nulls); + + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + + /* finished, do the clear staff */ + hash_destroy(cache->result); + pfree(cache); + SRF_RETURN_DONE(funcctx); +} + +static Oid +get_dbid(ArrayType *array) +{ + Assert(ARR_ELEMTYPE(array) == OIDOID); + char *ptr; + bool typbyval; + int16 typlen; + char typalign; + Oid dbid; + + get_typlenbyvalalign(ARR_ELEMTYPE(array), &typlen, &typbyval, &typalign); + ptr = ARR_DATA_PTR(array); + dbid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); + return dbid; +} + +/* + * Call pg_table_size to calcualte the + * active table size on each segments. + */ +static HTAB * +get_active_tables_stats(ArrayType *array) +{ + int ndim = ARR_NDIM(array); + int *dims = ARR_DIMS(array); + int nitems; + int16 typlen; + bool typbyval; + char typalign; + char *ptr; + bits8 *bitmap; + int bitmask; + int i; + Oid relOid; + int segId; + HTAB *local_table = NULL; + HASHCTL ctl; + TableEntryKey key; + DiskQuotaActiveTableEntry *entry; + bool found; + + Assert(ARR_ELEMTYPE(array) == OIDOID); + + nitems = ArrayGetNItems(ndim, dims); + + get_typlenbyvalalign(ARR_ELEMTYPE(array), &typlen, &typbyval, &typalign); + + ptr = ARR_DATA_PTR(array); + bitmap = ARR_NULLBITMAP(array); + bitmask = 1; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(TableEntryKey); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + local_table = diskquota_hash_create("local table map", 1024, &ctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); + + for (i = 0; i < nitems; i++) + { + /* + * handle array containing NULL case for general inupt, but the active + * table oid array would not contain NULL in fact + */ + if (bitmap && (*bitmap & bitmask) == 0) + { + continue; + } + else + { + relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); + segId = GpIdentity.segindex; + key.reloid = relOid; + key.segid = segId; + + entry = (DiskQuotaActiveTableEntry *)hash_search(local_table, &key, HASH_ENTER, &found); + if (!found) + { + entry->reloid = relOid; + entry->segid = segId; + entry->tablesize = calculate_table_size(relOid); + } + + ptr = att_addlength_pointer(ptr, typlen, ptr); + ptr = (char *)att_align_nominal(ptr, typalign); + } + + /* advance bitmap pointer if any */ + if (bitmap) + { + bitmask <<= 1; + if (bitmask == 0x100) + { + bitmap++; + bitmask = 1; + } + } + } + + return local_table; +} + +/* + * SetLocktagRelationOid + * Set up a locktag for a relation, given only relation OID + */ +static inline void +SetLocktagRelationOid(LOCKTAG *tag, Oid relid) +{ + Oid dbid; + + if (IsSharedRelation(relid)) + dbid = InvalidOid; + else + dbid = MyDatabaseId; + + SET_LOCKTAG_RELATION(*tag, dbid, relid); +} + +static bool +is_relation_being_altered(Oid relid) +{ + LOCKTAG locktag; + SetLocktagRelationOid(&locktag, relid); + VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock, NULL); + bool being_altered = VirtualTransactionIdIsValid(*vxid_list); /* if vxid_list is empty */ + pfree(vxid_list); + return being_altered; +} + +/* + * Check whether the cached relfilenode is stale compared to the given one + * due to delayed cache invalidation messages. + * + * NOTE: It will return false if the relation is currently uncommitted. + */ +static bool +is_cached_relfilenode_stale(Oid relOid, RelFileNode rnode) +{ + /* + * Since we don't take any lock on relation, need to check for cache + * invalidation messages manually. + */ + AcceptInvalidationMessages(); + HeapTuple tp = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); + + /* + * Tuple is not valid if + * - The relation has not been committed yet, or + * - The relation has been deleted + */ + if (!HeapTupleIsValid(tp)) return false; + Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tp); + + /* + * If cache invalidation messages are not delievered in time, the + * relfilenode in the tuple of the relation is stale. In that case, + * the relfilenode in the relation tuple is not equal to the one in + * the active table map. + */ + Oid cached_relfilenode = reltup->relfilenode; + bool is_stale = cached_relfilenode != rnode.relNode; + heap_freetuple(tp); + return is_stale; +} + +/* + * Get local active table with table oid and table size info. + * This function first copies active table map from shared memory + * to local active table map with refilenode info. Then traverses + * the local map and find corresponding table oid and table file + * size. Finally stores them into local active table map and return. + */ +static HTAB * +get_active_tables_oid(void) +{ + HASHCTL ctl; + HTAB *local_active_table_file_map = NULL; + HTAB *local_active_table_stats_map = NULL; + HTAB *local_altered_reloid_cache = NULL; + HASH_SEQ_STATUS iter; + DiskQuotaActiveTableFileEntry *active_table_file_entry; + DiskQuotaActiveTableEntry *active_table_entry; + Oid *altered_reloid_entry; + + Oid relOid; + + refresh_monitored_dbid_cache(); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.hcxt = CurrentMemoryContext; + local_active_table_file_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(Oid); + ctl.hcxt = CurrentMemoryContext; + local_altered_reloid_cache = diskquota_hash_create("local_altered_reloid_cache", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); + + /* Move active table from shared memory to local active table map */ + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + + hash_seq_init(&iter, active_tables_map); + + /* copy active table from shared memory into local memory */ + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) + { + bool found; + DiskQuotaActiveTableFileEntry *entry; + + if (active_table_file_entry->dbid != MyDatabaseId) + { + continue; + } + + /* Add the active table entry into local hash table */ + entry = hash_search(local_active_table_file_map, active_table_file_entry, HASH_ENTER, &found); + if (entry) *entry = *active_table_file_entry; + hash_search(active_tables_map, active_table_file_entry, HASH_REMOVE, NULL); + } + // TODO: hash_seq_term(&iter); + LWLockRelease(diskquota_locks.active_table_lock); + + memset(&ctl, 0, sizeof(ctl)); + /* only use Oid as key here, segid is not needed */ + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + local_active_table_stats_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); + + remove_committed_relation_from_cache(); + + /* + * scan whole local map, get the oid of each table and calculate the size + * of them + */ + hash_seq_init(&iter, local_active_table_file_map); + + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) + { + bool found; + RelFileNode rnode; + Oid prelid; + + /* The session of db1 should not see the table inside db2. */ + if (active_table_file_entry->dbid != MyDatabaseId) continue; + + rnode.dbNode = active_table_file_entry->dbid; + rnode.relNode = active_table_file_entry->relfilenode; + rnode.spcNode = active_table_file_entry->tablespaceoid; + relOid = get_relid_by_relfilenode(rnode); + + /* If relfilenode is not prepared for some relation, just skip it. */ + if (!OidIsValid(relOid)) continue; + + /* skip system catalog tables */ + if (relOid < FirstNormalObjectId) + { + hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); + } + else + { + prelid = get_primary_table_oid(relOid, true); + active_table_entry = hash_search(local_active_table_stats_map, &prelid, HASH_ENTER, &found); + if (active_table_entry && !found) + { + active_table_entry->reloid = prelid; + /* we don't care segid and tablesize here */ + active_table_entry->tablesize = 0; + active_table_entry->segid = -1; + } + /* + * Do NOT remove relation from the active table map if it is being + * altered or its cached relfilenode is stale so that we can check it + * again in the next epoch. + */ + if (!is_relation_being_altered(relOid) && !is_cached_relfilenode_stale(relOid, rnode)) + { + hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); + } + } + } + + // TODO: hash_seq_term(&iter); + + /* Adding the remaining relfilenodes back to the map in the shared memory */ + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, local_active_table_file_map); + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) + { + /* TODO: handle possible ERROR here so that the bgworker will not go down. */ + hash_search(active_tables_map, active_table_file_entry, HASH_ENTER, NULL); + } + /* TODO: hash_seq_term(&iter); */ + LWLockRelease(diskquota_locks.active_table_lock); + + LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_SHARED); + hash_seq_init(&iter, altered_reloid_cache); + while ((altered_reloid_entry = (Oid *)hash_seq_search(&iter)) != NULL) + { + bool found; + Oid altered_oid = *altered_reloid_entry; + if (OidIsValid(*altered_reloid_entry)) + { + active_table_entry = hash_search(local_active_table_stats_map, &altered_oid, HASH_ENTER, &found); + if (!found && active_table_entry) + { + active_table_entry->reloid = altered_oid; + /* We don't care segid and tablesize here. */ + active_table_entry->tablesize = 0; + active_table_entry->segid = -1; + } + } + hash_search(local_altered_reloid_cache, &altered_oid, HASH_ENTER, NULL); + } + LWLockRelease(diskquota_locks.altered_reloid_cache_lock); + + hash_seq_init(&iter, local_altered_reloid_cache); + while ((altered_reloid_entry = (Oid *)hash_seq_search(&iter)) != NULL) + { + if (OidIsValid(*altered_reloid_entry) && !is_relation_being_altered(*altered_reloid_entry)) + { + hash_search(local_altered_reloid_cache, altered_reloid_entry, HASH_REMOVE, NULL); + } + } + + LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, altered_reloid_cache); + while ((altered_reloid_entry = (Oid *)hash_seq_search(&iter)) != NULL) + { + bool found; + Oid altered_reloid = *altered_reloid_entry; + hash_search(local_altered_reloid_cache, &altered_reloid, HASH_FIND, &found); + if (!found) + { + hash_search(altered_reloid_cache, &altered_reloid, HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.altered_reloid_cache_lock); + + /* + * If cannot convert relfilenode to relOid, put them back to shared memory + * and wait for the next check. + */ + if (hash_get_num_entries(local_active_table_file_map) > 0) + { + bool found; + DiskQuotaActiveTableFileEntry *entry; + + hash_seq_init(&iter, local_active_table_file_map); + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) + { + entry = hash_search(active_tables_map, active_table_file_entry, HASH_ENTER_NULL, &found); + if (entry) *entry = *active_table_file_entry; + } + LWLockRelease(diskquota_locks.active_table_lock); + } + hash_destroy(local_active_table_file_map); + hash_destroy(local_altered_reloid_cache); + return local_active_table_stats_map; +} + +/* + * Load table size info from diskquota.table_size table. + * This is called when system startup, disk quota rejectmap + * and other shared memory will be warmed up by table_size table. + */ +static void +load_table_size(HTAB *local_table_stats_map) +{ + TupleDesc tupdesc; + int i; + bool found; + TableEntryKey key; + DiskQuotaActiveTableEntry *quota_entry; + SPIPlanPtr plan; + Portal portal; + char *sql = "select tableid, size, segid from diskquota.table_size"; + + if ((plan = SPI_prepare(sql, 0, NULL)) == NULL) + ereport(ERROR, (errmsg("[diskquota] SPI_prepare(\"%s\") failed", sql))); + if ((portal = SPI_cursor_open(NULL, plan, NULL, NULL, true)) == NULL) + ereport(ERROR, (errmsg("[diskquota] SPI_cursor_open(\"%s\") failed", sql))); + + SPI_cursor_fetch(portal, true, 10000); + + if (SPI_tuptable == NULL) + { + ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_cursor_fetch failed"))); + } + + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 3 || ((tupdesc)->attrs[0]).atttypid != OIDOID || ((tupdesc)->attrs[1]).atttypid != INT8OID || + ((tupdesc)->attrs[2]).atttypid != INT2OID) + { + if (tupdesc->natts != 3) + { + ereport(WARNING, (errmsg("[diskquota] tupdesc->natts: %d", tupdesc->natts))); + } + else + { + ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", tupdesc->attrs[0].atttypid, + tupdesc->attrs[1].atttypid, tupdesc->attrs[2].atttypid))); + } + ereport(ERROR, (errmsg("[diskquota] table \"table_size\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); + } + + while (SPI_processed > 0) + { + /* push the table oid and size into local_table_stats_map */ + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + Oid reloid; + int64 size; + int16 segid; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) continue; + reloid = DatumGetObjectId(dat); + + dat = SPI_getbinval(tup, tupdesc, 2, &isnull); + if (isnull) continue; + size = DatumGetInt64(dat); + dat = SPI_getbinval(tup, tupdesc, 3, &isnull); + if (isnull) continue; + segid = DatumGetInt16(dat); + key.reloid = reloid; + key.segid = segid; + + quota_entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); + quota_entry->reloid = reloid; + quota_entry->tablesize = size; + quota_entry->segid = segid; + } + SPI_freetuptable(SPI_tuptable); + SPI_cursor_fetch(portal, true, 10000); + } + + SPI_freetuptable(SPI_tuptable); + SPI_cursor_close(portal); + SPI_freeplan(plan); +} + +/* + * Convert a hash map with oids into a string array + * This function is used to prepare the second array parameter + * of function diskquota_fetch_table_stat. + */ +static StringInfoData +convert_map_to_string(HTAB *local_active_table_oid_maps) +{ + HASH_SEQ_STATUS iter; + StringInfoData buffer; + DiskQuotaActiveTableEntry *entry; + uint32 count = 0; + uint32 nitems = hash_get_num_entries(local_active_table_oid_maps); + + initStringInfo(&buffer); + appendStringInfo(&buffer, "{"); + + hash_seq_init(&iter, local_active_table_oid_maps); + + while ((entry = (DiskQuotaActiveTableEntry *)hash_seq_search(&iter)) != NULL) + { + count++; + if (count != nitems) + { + appendStringInfo(&buffer, "%d,", entry->reloid); + } + else + { + appendStringInfo(&buffer, "%d", entry->reloid); + } + } + appendStringInfo(&buffer, "}"); + + return buffer; +} + +/* + * Get active table size from all the segments based on + * active table oid list. + * Function diskquota_fetch_table_stat is called to calculate + * the table size on the fly. + */ +static HTAB * +pull_active_list_from_seg(void) +{ + CdbPgResults cdb_pgresults = {NULL, 0}; + int i, j; + char *sql = NULL; + HTAB *local_active_table_oid_map = NULL; + HASHCTL ctl; + DiskQuotaActiveTableEntry *entry; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + local_active_table_oid_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); + + /* first get all oid of tables which are active table on any segment */ + sql = "select * from diskquota.diskquota_fetch_table_stat(0, '{}'::oid[])"; + + /* any errors will be catch in upper level */ + CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); + for (i = 0; i < cdb_pgresults.numResults; i++) + { + Oid reloid; + bool found; + + PGresult *pgresult = cdb_pgresults.pg_results[i]; + + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + { + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", + PQresultStatus(pgresult)))); + } + + /* push the active table oid into local_active_table_oid_map */ + for (j = 0; j < PQntuples(pgresult); j++) + { + reloid = atooid(PQgetvalue(pgresult, j, 0)); + + entry = (DiskQuotaActiveTableEntry *)hash_search(local_active_table_oid_map, &reloid, HASH_ENTER, &found); + + if (!found) + { + entry->reloid = reloid; + entry->tablesize = 0; + entry->segid = -1; + } + } + } + cdbdisp_clearCdbPgResults(&cdb_pgresults); + + return local_active_table_oid_map; +} + +/* + * Get active table list from all the segments. + * Since when loading data, there is case where only subset for + * segment doing the real loading. As a result, the same table + * maybe active on some segments while not active on others. We + * haven't store the table size for each segment on master(to save + * memory), so when re-calculate the table size, we need to sum the + * table size on all of the segments. + */ +static void +pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_array) +{ + CdbPgResults cdb_pgresults = {NULL, 0}; + StringInfoData sql_command; + int i; + int j; + + initStringInfo(&sql_command); + appendStringInfo(&sql_command, "select * from diskquota.diskquota_fetch_table_stat(1, '%s'::oid[])", + active_oid_array); + CdbDispatchCommand(sql_command.data, DF_NONE, &cdb_pgresults); + pfree(sql_command.data); + + SEGCOUNT = cdb_pgresults.numResults; + if (SEGCOUNT <= 0) + { + ereport(ERROR, (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); + } + + /* sum table size from each segment into local_table_stats_map */ + for (i = 0; i < cdb_pgresults.numResults; i++) + { + Size tableSize; + bool found; + Oid reloid; + int segId; + TableEntryKey key; + DiskQuotaActiveTableEntry *entry; + + PGresult *pgresult = cdb_pgresults.pg_results[i]; + + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + { + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", + PQresultStatus(pgresult)))); + } + + for (j = 0; j < PQntuples(pgresult); j++) + { + reloid = atooid(PQgetvalue(pgresult, j, 0)); + tableSize = (Size)atoll(PQgetvalue(pgresult, j, 1)); + key.reloid = reloid; + /* for diskquota extension version is 1.0, pgresult doesn't contain segid */ + if (PQnfields(pgresult) == 3) + { + /* get the segid, tablesize for each table */ + segId = atoi(PQgetvalue(pgresult, j, 2)); + key.segid = segId; + entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); + + if (!found) + { + /* receive table size info from the first segment */ + entry->reloid = reloid; + entry->segid = segId; + } + entry->tablesize = tableSize; + } + + /* when segid is -1, the tablesize is the sum of tablesize of master and all segments */ + key.segid = -1; + entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); + + if (!found) + { + /* receive table size info from the first segment */ + entry->reloid = reloid; + entry->tablesize = tableSize; + entry->segid = -1; + } + else + { + /* sum table size from all the segments */ + entry->tablesize = entry->tablesize + tableSize; + } + } + } + cdbdisp_clearCdbPgResults(&cdb_pgresults); + return; +} \ No newline at end of file diff --git a/gpcontrib/diskquota/src/gp_activetable.h b/gpcontrib/diskquota/src/gp_activetable.h new file mode 100644 index 00000000000..6b513fe97dc --- /dev/null +++ b/gpcontrib/diskquota/src/gp_activetable.h @@ -0,0 +1,53 @@ +/* ------------------------------------------------------------------------- + * + * gp_activetable.h + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/gp_activetable.h + * + * ------------------------------------------------------------------------- + */ +#ifndef ACTIVE_TABLE_H +#define ACTIVE_TABLE_H + +#include "c.h" +#include "utils/hsearch.h" + +/* Cache to detect the active table list */ +typedef struct DiskQuotaActiveTableFileEntry +{ + Oid dbid; + Oid relfilenode; + Oid tablespaceoid; +} DiskQuotaActiveTableFileEntry; + +typedef struct TableEntryKey +{ + Oid reloid; + int segid; +} TableEntryKey; + +typedef struct DiskQuotaActiveTableEntry +{ + Oid reloid; + int segid; + Size tablesize; +} DiskQuotaActiveTableEntry; + +extern HTAB *gp_fetch_active_tables(bool force); +extern void init_active_table_hook(void); +extern void init_shm_worker_active_tables(void); +extern void init_lock_active_tables(void); + +extern HTAB *active_tables_map; +extern HTAB *monitored_dbid_cache; +extern HTAB *altered_reloid_cache; + +#ifndef atooid +#define atooid(x) ((Oid)strtoul((x), NULL, 10)) +#endif + +#endif diff --git a/gpcontrib/diskquota/src/monitored_db.c b/gpcontrib/diskquota/src/monitored_db.c new file mode 100644 index 00000000000..fd590a9f718 --- /dev/null +++ b/gpcontrib/diskquota/src/monitored_db.c @@ -0,0 +1,368 @@ +#include "postgres.h" + +#include "diskquota.h" + +#include "funcapi.h" +#include "pgstat.h" +#include "port/atomics.h" +#include "commands/dbcommands.h" +#include "storage/proc.h" +#include "utils/builtins.h" + +PG_FUNCTION_INFO_V1(show_worker_epoch); +PG_FUNCTION_INFO_V1(db_status); +PG_FUNCTION_INFO_V1(wait_for_worker_new_epoch); + +HTAB *monitored_dbid_cache = NULL; // Map +const char *MonitorDBStatusToString[] = { +#define DB_STATUS(id, str) str, +#include "diskquota_enum.h" +#undef DB_STATUS +}; + +static bool check_for_timeout(TimestampTz start_time); +static MonitorDBEntry dump_monitored_dbid_cache(long *nitems); +// Returns the worker epoch for the current database. +// An epoch marks a new iteration of refreshing quota usage by a bgworker. +// An epoch is a 32-bit unsigned integer and there is NO invalid value. +// Therefore, the UDF must throw an error if something unexpected occurs. +Datum +show_worker_epoch(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT32(worker_get_epoch(MyDatabaseId)); +} + +Datum +db_status(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + struct StatusCtx + { + MonitorDBEntry entries; + long nitems; + int index; + } * status_ctx; + + if (SRF_IS_FIRSTCALL()) + { + MemoryContext oldcontext; + TupleDesc tupdesc; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + tupdesc = DiskquotaCreateTemplateTupleDesc(5); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "DBID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "DATNAME", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "STATUS", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "EPOCH", INT8OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "PAUSED", BOOLOID, -1 /*typmod*/, 0 /*attdim*/); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + status_ctx = (struct StatusCtx *)palloc(sizeof(struct StatusCtx)); + + /* Setup first calling context. */ + funcctx->user_fctx = (void *)status_ctx; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + status_ctx->nitems = hash_get_num_entries(monitored_dbid_cache); + /* + * As we need acquire lock monitored_dbid_cache_lock to access + * monitored_dbid_cache hash table, but it's unsafe to acquire lock + * in the function, when the function fails the lock can not be + * released correctly. So dump the hash table into a array in the + * local memory. The hash table is small, it doesn't consume much + * memory. + */ + status_ctx->entries = dump_monitored_dbid_cache(&status_ctx->nitems); + status_ctx->index = 0; + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + status_ctx = (struct StatusCtx *)funcctx->user_fctx; + + while (status_ctx->index < status_ctx->nitems) + { + MonitorDBEntry entry = &status_ctx->entries[status_ctx->index]; + status_ctx->index++; + Datum result; + Datum values[5]; + bool nulls[5]; + HeapTuple tuple; + + values[0] = ObjectIdGetDatum(entry->dbid); + values[1] = CStringGetTextDatum(get_database_name(entry->dbid)); + int status = Int32GetDatum(pg_atomic_read_u32(&(entry->status))); + status = status >= DB_STATUS_MAX ? DB_STATUS_UNKNOWN : status; + values[2] = CStringGetTextDatum(MonitorDBStatusToString[status]); + values[3] = UInt32GetDatum(pg_atomic_read_u32(&(entry->epoch))); + values[4] = BoolGetDatum(entry->paused); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + pfree(status_ctx->entries); + SRF_RETURN_DONE(funcctx); +} + +// Checks if the bgworker for the current database works as expected. +// 1. If it returns successfully in `diskquota.naptime`, the bgworker works as expected. +// 2. If it does not terminate, there must be some issues with the bgworker. +// In this case, we must ensure this UDF can be interrupted by the user. +Datum +wait_for_worker_new_epoch(PG_FUNCTION_ARGS) +{ + TimestampTz start_time = GetCurrentTimestamp(); + uint32 current_epoch = worker_get_epoch(MyDatabaseId); + for (;;) + { + CHECK_FOR_INTERRUPTS(); + if (check_for_timeout(start_time)) start_time = GetCurrentTimestamp(); + uint32 new_epoch = worker_get_epoch(MyDatabaseId); + /* Unsigned integer underflow is OK */ + if (new_epoch - current_epoch >= 2u) + { + PG_RETURN_BOOL(true); + } + /* Sleep for naptime to reduce CPU usage */ + (void)DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime ? diskquota_naptime : 1); + ResetLatch(&MyProc->procLatch); + } + PG_RETURN_BOOL(false); +} + +bool +diskquota_is_paused() +{ + Assert(MyDatabaseId != InvalidOid); + bool paused = false; + bool found; + MonitorDBEntry entry; + + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + if (found) + { + paused = entry->paused; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + return paused; +} + +bool +diskquota_is_readiness_logged() +{ + Assert(MyDatabaseId != InvalidOid); + bool is_readiness_logged; + + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + { + MonitorDBEntry hash_entry; + bool found; + + hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); + is_readiness_logged = found ? hash_entry->is_readiness_logged : false; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + + return is_readiness_logged; +} + +void +diskquota_set_readiness_logged() +{ + Assert(MyDatabaseId != InvalidOid); + + /* + * We actually need ROW EXCLUSIVE lock here. Given that the current worker + * is the the only process that modifies the entry, it is safe to only take + * the shared lock. + */ + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + { + MonitorDBEntry hash_entry; + bool found; + + hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); + hash_entry->is_readiness_logged = true; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} + +bool +worker_increase_epoch(Oid dbid) +{ + bool found = false; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + + if (found) + { + pg_atomic_fetch_add_u32(&(entry->epoch), 1); + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + return found; +} + +uint32 +worker_get_epoch(Oid dbid) +{ + bool found = false; + uint32 epoch = 0; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + if (found) + { + epoch = pg_atomic_read_u32(&(entry->epoch)); + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + if (!found) + { + ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] database \"%s\" not found for getting epoch", get_database_name(dbid)))); + } + return epoch; +} + +/* + * Function to update the db list on each segment + * Will print a WARNING to log if out of memory + */ +void +update_monitor_db(Oid dbid, FetchTableStatType action) +{ + bool found = false; + + // add/remove the dbid to monitoring database cache to filter out table not under + // monitoring in hook functions + + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_EXCLUSIVE); + if (action == ADD_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + if (entry == NULL) + { + ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); + } + entry->paused = false; + pg_atomic_init_u32(&(entry->epoch), 0); + pg_atomic_init_u32(&(entry->status), DB_INIT); + } + else if (action == REMOVE_DB_FROM_BEING_MONITORED) + { + hash_search(monitored_dbid_cache, &dbid, HASH_REMOVE, &found); + } + else if (action == PAUSE_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + if (found) + { + entry->paused = true; + } + } + else if (action == RESUME_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + + if (found) + { + entry->paused = false; + } + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} + +void +update_monitordb_status(Oid dbid, uint32 status) +{ + MonitorDBEntry entry; + bool found; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + { + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + } + if (found) + { + Assert(status < DB_STATUS_MAX); + pg_atomic_write_u32(&(entry->status), status); + } + else + ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] database %u not found for updating monitor db", dbid))); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} + +static bool +check_for_timeout(TimestampTz start_time) +{ + long diff_secs = 0; + int diff_usecs = 0; + TimestampDifference(start_time, GetCurrentTimestamp(), &diff_secs, &diff_usecs); + if (diff_secs >= diskquota_worker_timeout) + { + ereport(NOTICE, (errmsg("[diskquota] timeout when waiting for worker"), + errhint("please check if the bgworker is still alive."))); + return true; + } + return false; +} + +static MonitorDBEntry +dump_monitored_dbid_cache(long *nitems) +{ + HASH_SEQ_STATUS seq; + MonitorDBEntry curEntry; + int count = *nitems = hash_get_num_entries(monitored_dbid_cache); + MonitorDBEntry entries = curEntry = (MonitorDBEntry)palloc(sizeof(struct MonitorDBEntryStruct) * count); + + hash_seq_init(&seq, monitored_dbid_cache); + MonitorDBEntry entry; + while ((entry = hash_seq_search(&seq)) != NULL) + { + Assert(count > 0); + memcpy(curEntry, entry, sizeof(struct MonitorDBEntryStruct)); + curEntry++; + count--; + } + Assert(count == 0); + return entries; +} + +/* + * After primary failure and mirror switching, the monitored_dbid_cache + * is lost on segments. We should refresh the monitored_dbid_cache during + * every diskquota refresh procedure. + */ +void +refresh_monitored_dbid_cache(void) +{ + bool found; + Oid dbid = MyDatabaseId; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_EXCLUSIVE); + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + if (entry == NULL) + { + ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there are too many databases to monitor"))); + } + else if (!found) + { + entry->paused = false; + pg_atomic_init_u32(&(entry->epoch), 0); + pg_atomic_init_u32(&(entry->status), DB_RUNNING); + ereport(LOG, (errmsg("the entry in monitored_dbid_cache is lost due to mirror switching and is added back now, " + "dbid: %d", + dbid))); + } + + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} diff --git a/gpcontrib/diskquota/src/quotamodel.c b/gpcontrib/diskquota/src/quotamodel.c new file mode 100644 index 00000000000..30f383ef57a --- /dev/null +++ b/gpcontrib/diskquota/src/quotamodel.c @@ -0,0 +1,2320 @@ +/* ------------------------------------------------------------------------- + * + * quotamodel.c + * + * This code is responsible for init disk quota model and refresh disk quota + * model. Disk quota related Shared memory initialization is also implemented + * in this file. + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/quotamodel.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "diskquota.h" +#include "gp_activetable.h" +#include "relation_cache.h" + +#include "access/xact.h" +#include "catalog/pg_tablespace.h" +#include "commands/dbcommands.h" +#include "commands/tablespace.h" +#include "executor/spi.h" +#include "funcapi.h" +#include "storage/ipc.h" +#include "port/atomics.h" +#include "utils/builtins.h" +#include "utils/guc.h" +#include "utils/faultinjector.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" +#include "libpq-fe.h" + +#include "cdb/cdbvars.h" +#include "cdb/cdbdisp_query.h" +#include "cdb/cdbdispatchresult.h" +#include "cdb/cdbutil.h" + +#include + +/* cluster level max size of rejectmap */ +#define MAX_DISK_QUOTA_REJECT_ENTRIES (1024 * 1024) +/* cluster level init size of rejectmap */ +#define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 +/* per database level max size of rejectmap */ +#define MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES 8192 +/* Number of attributes in quota configuration records. */ +#define NUM_QUOTA_CONFIG_ATTRS 6 +/* Number of entries for diskquota.table_size update SQL */ +#define SQL_MAX_VALUES_NUMBER 1000000 + +/* TableSizeEntry macro function */ +/* Use the top bit of totalsize as a flush flag. If this bit is set, the size should be flushed into + * diskquota.table_size_table. */ +#define TableSizeEntryFlushFlag (1ul << 63) +#define TableSizeEntrySizeMask (TableSizeEntryFlushFlag - 1) +#define TableSizeEntryId(segid) ((segid + 1) / SEGMENT_SIZE_ARRAY_LENGTH) +#define TableSizeEntryIndex(segid) ((segid + 1) % SEGMENT_SIZE_ARRAY_LENGTH) +#define TableSizeEntryGetFlushFlag(entry, segid) \ + (entry->totalsize[TableSizeEntryIndex(segid)] & TableSizeEntryFlushFlag) +#define TableSizeEntrySetFlushFlag(entry, segid) entry->totalsize[TableSizeEntryIndex(segid)] |= TableSizeEntryFlushFlag +#define TableSizeEntryResetFlushFlag(entry, segid) \ + entry->totalsize[TableSizeEntryIndex(segid)] &= TableSizeEntrySizeMask +#define TableSizeEntryGetSize(entry, segid) (entry->totalsize[TableSizeEntryIndex(segid)] & TableSizeEntrySizeMask) +#define TableSizeEntrySetSize(entry, segid, size) entry->totalsize[TableSizeEntryIndex(segid)] = size +#define TableSizeEntrySegidStart(entry) (entry->key.id * SEGMENT_SIZE_ARRAY_LENGTH - 1) +#define TableSizeEntrySegidEnd(entry) \ + (((entry->key.id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) < SEGCOUNT \ + ? ((entry->key.id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) \ + : SEGCOUNT) + +typedef struct TableSizeEntry TableSizeEntry; +typedef struct NamespaceSizeEntry NamespaceSizeEntry; +typedef struct RoleSizeEntry RoleSizeEntry; +typedef struct QuotaLimitEntry QuotaLimitEntry; +typedef struct RejectMapEntry RejectMapEntry; +typedef struct GlobalRejectMapEntry GlobalRejectMapEntry; +typedef struct LocalRejectMapEntry LocalRejectMapEntry; + +int SEGCOUNT = 0; +extern int diskquota_max_table_segments; +extern pg_atomic_uint32 *diskquota_table_size_entry_num; +extern int diskquota_max_monitored_databases; +extern int diskquota_max_quota_probes; +extern pg_atomic_uint32 *diskquota_quota_info_entry_num; + +/* + * local cache of table disk size and corresponding schema and owner. + * + * When id is 0, this TableSizeEntry stores the table size in the (-1 ~ + * SEGMENT_SIZE_ARRAY_LENGTH - 2)th segment, and so on. + * |---------|--------------------------------------------------------------------------| + * | id | segment index | + * |---------|--------------------------------------------------------------------------| + * | 0 | [-1, SEGMENT_SIZE_ARRAY_LENGTH - 1) | + * | 1 | [SEGMENT_SIZE_ARRAY_LENGTH - 1, 2 * SEGMENT_SIZE_ARRAY_LENGTH - 1) | + * | 2 | [2 * SEGMENT_SIZE_ARRAY_LENGTH - 1, 3 * SEGMENT_SIZE_ARRAY_LENGTH - 1) | + * -------------------------------------------------------------------------------------- + * + * flag's each bit is used to show the table's status, which is described in TableSizeEntryFlag. + * + * totalsize contains tables' size on segments. When id is 0, totalsize[0] is the sum of all segments' table size. + * table size including fsm, visibility map etc. + */ +typedef struct TableSizeEntryKey +{ + Oid reloid; + int id; +} TableSizeEntryKey; + +struct TableSizeEntry +{ + TableSizeEntryKey key; + Oid tablespaceoid; + Oid namespaceoid; + Oid owneroid; + uint32 flag; + int64 totalsize[SEGMENT_SIZE_ARRAY_LENGTH]; +}; + +typedef enum +{ + TABLE_EXIST = (1 << 0), /* whether table is already dropped */ +} TableSizeEntryFlag; + +/* + * quota_key_num array contains the number of key for each type of quota. + * |----------------------------|---------------| + * | Quota Type | Number of Key | + * |----------------------------|---------------| + * | NAMESPACE_QUOTA | 1 | + * | ROLE_QUOTA | 1 | + * | NAMESPACE_TABLESPACE_QUOTA | 2 | + * | ROLE_TABLESPACE_QUOTA | 2 | + * | TABLESPACE_QUOTA | 1 | + * |----------------------------|---------------| + */ +uint16 quota_key_num[NUM_QUOTA_TYPES] = {1, 1, 2, 2, 1}; +Oid quota_key_caches[NUM_QUOTA_TYPES][MAX_NUM_KEYS_QUOTA_MAP] = { + {NAMESPACEOID}, {AUTHOID}, {NAMESPACEOID, TABLESPACEOID}, {AUTHOID, TABLESPACEOID}, {TABLESPACEOID}}; +HTAB *quota_info_map; + +/* global rejectmap for which exceed their quota limit */ +struct RejectMapEntry +{ + Oid targetoid; + Oid databaseoid; + Oid tablespaceoid; + uint32 targettype; + /* + * TODO refactor this data structure + * QD index the rejectmap by (targetoid, databaseoid, tablespaceoid, targettype). + * QE index the rejectmap by (relfilenode). + */ + RelFileNode relfilenode; +}; + +struct GlobalRejectMapEntry +{ + RejectMapEntry keyitem; + bool segexceeded; + /* + * When the quota limit is exceeded on segment servers, + * we need an extra auxiliary field to preserve the quota + * limitation information for error message on segment + * servers, e.g., targettype, targetoid. This field is + * useful on segment servers. + */ + RejectMapEntry auxblockinfo; +}; + +/* local rejectmap for which exceed their quota limit */ +struct LocalRejectMapEntry +{ + RejectMapEntry keyitem; + bool isexceeded; + bool segexceeded; +}; + +/* using hash table to support incremental update the table size entry.*/ +static HTAB *table_size_map = NULL; + +/* rejectmap for database objects which exceed their quota limit */ +static HTAB *disk_quota_reject_map = NULL; +static HTAB *local_disk_quota_reject_map = NULL; + +static shmem_startup_hook_type prev_shmem_startup_hook = NULL; + +/* functions to maintain the quota maps */ +static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid); +static void update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys); +static void add_quota_to_rejectmap(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded); +static void refresh_quota_info_map(void); +static void clean_all_quota_limit(void); +static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid); +static QuotaInfoEntry *put_quota_map_entry(QuotaInfoEntryKey *key, bool *found); + +/* functions to refresh disk quota model*/ +static void refresh_disk_quota_usage(bool is_init); +static void calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map); +static void flush_to_table_size(void); +static bool flush_local_reject_map(void); +static void dispatch_rejectmap(HTAB *local_active_table_stat_map); +static bool load_quotas(void); +static void do_load_quotas(void); + +static Size DiskQuotaShmemSize(void); +static void disk_quota_shmem_startup(void); +static void init_lwlocks(void); + +static void export_exceeded_error(GlobalRejectMapEntry *entry, bool skip_name); +void truncateStringInfo(StringInfo str, int nchars); +static void format_name(const char *prefix, uint32 id, StringInfo str); + +static bool get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); +static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); +static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); + +static void delete_from_table_size_map(char *str); + +/* + * put QuotaInfoEntry into quota_info_map and return this entry. + * return NULL: no free SHM for quota_info_map + * found cannot be NULL + */ +static QuotaInfoEntry * +put_quota_map_entry(QuotaInfoEntryKey *key, bool *found) +{ + QuotaInfoEntry *entry; + uint32 counter = pg_atomic_read_u32(diskquota_quota_info_entry_num); + if (counter >= diskquota_max_quota_probes) + { + entry = hash_search(quota_info_map, key, HASH_FIND, found); + /* + * Too many quotas have been added to the quota_info_map, to avoid diskquota using + * too much shared memory, just return NULL. The diskquota won't work correctly + * anymore. + */ + if (!(*found)) return NULL; + } + else + { + entry = hash_search(quota_info_map, key, HASH_ENTER, found); + if (!(*found)) + { + counter = pg_atomic_add_fetch_u32(diskquota_quota_info_entry_num, 1); + if (counter >= diskquota_max_quota_probes) + { + ereport(WARNING, (errmsg("[diskquota] the number of quota probe exceeds the limit, please " + "increase the GUC value for diskquota.max_quota_probes. Current " + "diskquota.max_quota_probes value: %d", + diskquota_max_quota_probes))); + } + } + } + return entry; +} + +/* add a new entry quota or update the old entry quota */ +static void +update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) +{ + bool found; + QuotaInfoEntry *entry; + QuotaInfoEntryKey key = {0}; + + memcpy(key.keys, keys, quota_key_num[type] * sizeof(Oid)); + key.type = type; + key.segid = segid; + entry = put_quota_map_entry(&key, &found); + /* If the number of quota exceeds the limit, entry will be NULL */ + if (entry == NULL) return; + if (!found) + { + entry->size = 0; + entry->limit = -1; + } + entry->size += size; +} + +/* add a new entry quota or update the old entry limit */ +static void +update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys) +{ + bool found; + for (int i = -1; i < SEGCOUNT; i++) + { + QuotaInfoEntry *entry; + QuotaInfoEntryKey key = {0}; + + memcpy(key.keys, keys, quota_key_num[type] * sizeof(Oid)); + key.type = type; + key.segid = i; + entry = put_quota_map_entry(&key, &found); + /* If the number of quota exceeds the limit, entry will be NULL */ + if (entry == NULL) continue; + if (!found) + { + entry->size = 0; + } + if (key.segid == -1) + entry->limit = limit; + else + entry->limit = round((limit / SEGCOUNT) * segratio); + } +} + +/* + * Compare the disk quota limit and current usage of a database object. + * Put them into local rejectmap if quota limit is exceeded. + */ +static void +add_quota_to_rejectmap(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded) +{ + LocalRejectMapEntry *localrejectentry; + RejectMapEntry keyitem = {0}; + + keyitem.targetoid = targetOid; + keyitem.databaseoid = MyDatabaseId; + keyitem.tablespaceoid = tablespaceoid; + keyitem.targettype = (uint32)type; + ereport(DEBUG1, (errmsg("[diskquota] Put object %u to rejectmap", targetOid))); + localrejectentry = (LocalRejectMapEntry *)hash_search(local_disk_quota_reject_map, &keyitem, HASH_ENTER, NULL); + localrejectentry->isexceeded = true; + localrejectentry->segexceeded = segexceeded; +} + +/* + * Check the quota map, if the entry doesn't exist anymore, + * remove it from the map. Otherwise, check if it has hit + * the quota limit, if it does, add it to the rejectmap. + */ +static void +refresh_quota_info_map(void) +{ + HeapTuple tuple; + HASH_SEQ_STATUS iter; + QuotaInfoEntry *entry; + + hash_seq_init(&iter, quota_info_map); + while ((entry = hash_seq_search(&iter)) != NULL) + { + bool removed = false; + QuotaType type = entry->key.type; + for (int i = 0; i < quota_key_num[type]; ++i) + { + tuple = SearchSysCache1(quota_key_caches[type][i], ObjectIdGetDatum(entry->key.keys[i])); + if (!HeapTupleIsValid(tuple)) + { + hash_search(quota_info_map, &entry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); + removed = true; + break; + } + ReleaseSysCache(tuple); + } + if (!removed && entry->limit > 0) + { + if (entry->size >= entry->limit) + { + Oid targetOid = entry->key.keys[0]; + /* when quota type is not NAMESPACE_TABLESPACE_QUOTA or ROLE_TABLESPACE_QUOTA, the tablespaceoid + * is set to be InvalidOid, so when we get it from map, also set it to be InvalidOid + */ + Oid tablespaceoid = (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) + ? entry->key.keys[1] + : InvalidOid; + + bool segmentExceeded = entry->key.segid == -1 ? false : true; + add_quota_to_rejectmap(type, targetOid, tablespaceoid, segmentExceeded); + } + } + } +} + +/* transfer one table's size from one quota to another quota */ +static void +transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid) +{ + update_size_for_quota(-totalsize, type, old_keys, segid); + update_size_for_quota(totalsize, type, new_keys, segid); +} + +static void +clean_all_quota_limit(void) +{ + HASH_SEQ_STATUS iter; + QuotaInfoEntry *entry; + hash_seq_init(&iter, quota_info_map); + while ((entry = hash_seq_search(&iter)) != NULL) + { + entry->limit = -1; + } +} + +/* ---- Functions for disk quota shared memory ---- */ +/* + * DiskQuotaShmemInit + * Allocate and initialize diskquota-related shared memory + * This function is called in _PG_init(). + */ +void +init_disk_quota_shmem(void) +{ + /* + * Request additional shared resources. (These are no-ops if we're not in + * the postmaster process.) We'll allocate or attach to the shared + * resources in pgss_shmem_startup(). + */ + RequestAddinShmemSpace(DiskQuotaShmemSize()); + /* locks for diskquota refer to init_lwlocks() for details */ + RequestNamedLWLockTranche("DiskquotaLocks", DiskQuotaLocksItemNumber); + + /* Install startup hook to initialize our shared memory. */ + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = disk_quota_shmem_startup; +} + +/* + * DiskQuotaShmemInit hooks. + * Initialize shared memory data and locks. + */ +static void +disk_quota_shmem_startup(void) +{ + bool found; + HASHCTL hash_ctl; + + if (prev_shmem_startup_hook) (*prev_shmem_startup_hook)(); + + LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + + init_lwlocks(); + + /* + * Four shared memory data. extension_ddl_message is used to handle + * diskquota extension create/drop command. disk_quota_reject_map is used + * to store out-of-quota rejectmap. active_tables_map is used to store + * active tables whose disk usage is changed. + */ + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(RejectMapEntry); + hash_ctl.entrysize = sizeof(GlobalRejectMapEntry); + disk_quota_reject_map = + DiskquotaShmemInitHash("rejectmap whose quota limitation is reached", INIT_DISK_QUOTA_REJECT_ENTRIES, + MAX_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + + init_shm_worker_active_tables(); + + init_shm_worker_relation_cache(); + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(struct MonitorDBEntryStruct); + + monitored_dbid_cache = + DiskquotaShmemInitHash("table oid cache which shoud tracking", diskquota_max_monitored_databases, + diskquota_max_monitored_databases, &hash_ctl, HASH_ELEM, DISKQUOTA_OID_HASH); + + /* only initialize ddl_message and launcher memory on master/standby. */ + if (IS_QUERY_DISPATCHER()) + { + extension_ddl_message = + ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), &found); + if (!found) memset((void *)extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); + + init_launcher_shmem(); + } + LWLockRelease(AddinShmemInitLock); +} + +/* + * Initialize four shared memory locks. + * active_table_lock is used to access active table map. + * reject_map_lock is used to access out-of-quota rejectmap. + * extension_ddl_message_lock is used to access content of + * extension_ddl_message. + * extension_ddl_lock is used to avoid concurrent diskquota + * extension ddl(create/drop) command. + * monitored_dbid_cache_lock is used to shared `monitored_dbid_cache` on segment process. + */ +static void +init_lwlocks(void) +{ + LWLockPadded *lock_base = GetNamedLWLockTranche("DiskquotaLocks"); + diskquota_locks.active_table_lock = &lock_base[0].lock; + diskquota_locks.reject_map_lock = &lock_base[1].lock; + diskquota_locks.extension_ddl_message_lock = &lock_base[2].lock; + diskquota_locks.extension_ddl_lock = &lock_base[3].lock; + diskquota_locks.monitored_dbid_cache_lock = &lock_base[4].lock; + diskquota_locks.relation_cache_lock = &lock_base[5].lock; + diskquota_locks.dblist_lock = &lock_base[6].lock; + diskquota_locks.workerlist_lock = &lock_base[7].lock; + diskquota_locks.altered_reloid_cache_lock = &lock_base[8].lock; +} + +static Size +diskquota_worker_shmem_size() +{ + Size size; + size = hash_estimate_size(MAX_NUM_TABLE_SIZE_ENTRIES / diskquota_max_monitored_databases + 100, + sizeof(TableSizeEntry)); + size = add_size(size, hash_estimate_size(MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, sizeof(LocalRejectMapEntry))); + return size; +} + +/* + * DiskQuotaShmemSize + * Compute space needed for diskquota-related shared memory + */ +static Size +DiskQuotaShmemSize(void) +{ + Size size = 0; + + size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_REJECT_ENTRIES, sizeof(GlobalRejectMapEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelationCacheEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(Oid))); + size = add_size(size, hash_estimate_size(diskquota_max_monitored_databases, + sizeof(struct MonitorDBEntryStruct))); // monitored_dbid_cache + + if (IS_QUERY_DISPATCHER()) + { + size = add_size(size, sizeof(ExtensionDDLMessage)); + size = add_size(size, diskquota_launcher_shmem_size()); + size = add_size(size, sizeof(pg_atomic_uint32)); + size = add_size(size, diskquota_worker_shmem_size() * diskquota_max_monitored_databases); + size = add_size(size, hash_estimate_size(MAX_QUOTA_MAP_ENTRIES, sizeof(QuotaInfoEntry)) * + diskquota_max_monitored_databases); + } + + return size; +} + +/* ---- Functions for disk quota model ---- */ +/* + * Init disk quota model when the worker process firstly started. + */ +void +init_disk_quota_model(uint32 id) +{ + HASHCTL hash_ctl; + StringInfoData str; + initStringInfo(&str); + + format_name("TableSizeEntrymap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(TableSizeEntryKey); + hash_ctl.entrysize = sizeof(TableSizeEntry); + table_size_map = DiskquotaShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + + /* for localrejectmap */ + /* WARNNING: The max length of name of the map is 48 */ + format_name("localrejectmap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(RejectMapEntry); + hash_ctl.entrysize = sizeof(LocalRejectMapEntry); + local_disk_quota_reject_map = + DiskquotaShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + + /* for quota_info_map */ + format_name("QuotaInfoMap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(QuotaInfoEntry); + hash_ctl.keysize = sizeof(QuotaInfoEntryKey); + quota_info_map = DiskquotaShmemInitHash(str.data, INIT_QUOTA_MAP_ENTRIES, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, + HASH_ELEM, DISKQUOTA_TAG_HASH); + + pfree(str.data); +} + +/* + * Reset the shared memory of diskquota worker + * + * Suppose a user first drops diskquota extension, then recreates it in + * the same database, as diskquota worker will get the same memory address + * as before. + * + * As the shared memory can not be recycled, so we just clean up the shared + * memory when dropping the extension. + * - memset diskquotaDBStatus to 0 + * - clean all items in the maps + */ +void +vacuum_disk_quota_model(uint32 id) +{ + HASH_SEQ_STATUS iter; + TableSizeEntry *tsentry = NULL; + LocalRejectMapEntry *localrejectentry; + QuotaInfoEntry *qentry; + + HASHCTL hash_ctl; + StringInfoData str; + initStringInfo(&str); + + /* table_size_map */ + format_name("TableSizeEntrymap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(TableSizeEntryKey); + hash_ctl.entrysize = sizeof(TableSizeEntry); + table_size_map = DiskquotaShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + hash_search(table_size_map, &tsentry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_table_size_entry_num, 1); + } + + /* localrejectmap */ + format_name("localrejectmap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(RejectMapEntry); + hash_ctl.entrysize = sizeof(LocalRejectMapEntry); + local_disk_quota_reject_map = + DiskquotaShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + hash_seq_init(&iter, local_disk_quota_reject_map); + while ((localrejectentry = hash_seq_search(&iter)) != NULL) + { + hash_search(local_disk_quota_reject_map, &localrejectentry->keyitem, HASH_REMOVE, NULL); + } + + /* quota_info_map */ + format_name("QuotaInfoMap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(QuotaInfoEntry); + hash_ctl.keysize = sizeof(QuotaInfoEntryKey); + quota_info_map = DiskquotaShmemInitHash(str.data, INIT_QUOTA_MAP_ENTRIES, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, + HASH_ELEM, DISKQUOTA_TAG_HASH); + hash_seq_init(&iter, quota_info_map); + while ((qentry = hash_seq_search(&iter)) != NULL) + { + hash_search(quota_info_map, &qentry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); + } + + pfree(str.data); +} + +/* + * Check whether the diskquota state is ready + */ +bool +check_diskquota_state_is_ready() +{ + bool is_ready = false; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + + StartTransactionCommand(); + + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota worker process should + * tolerate this kind of errors and continue to check at the next loop. + */ + PG_TRY(); + { + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + is_ready = do_check_diskquota_state_is_ready(); + } + PG_CATCH(); + { + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); + return is_ready; +} + +/* + * Check whether the diskquota state is ready. + * Throw an error or return false if it is not. + * + * For empty database, table diskquota.state would be ready after + * 'CREATE EXTENSION diskquota;'. But for non-empty database, + * user need to run UDF diskquota.init_table_size_table() + * manually to get all the table size information and + * store them into table diskquota.table_size + */ +bool +do_check_diskquota_state_is_ready(void) +{ + int ret; + TupleDesc tupdesc; + ret = SPI_execute("select state from diskquota.state", true, 0); + ereportif(ret != SPI_OK_SELECT, ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + + tupdesc = SPI_tuptable->tupdesc; + if (SPI_processed != 1 || tupdesc->natts != 1 || ((tupdesc)->attrs[0]).atttypid != INT4OID) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] \"diskquota.state\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); + } + + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + int state; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + state = isnull ? DISKQUOTA_UNKNOWN_STATE : DatumGetInt32(dat); + bool is_ready = state == DISKQUOTA_READY_STATE; + + if (!is_ready && !diskquota_is_readiness_logged()) + { + diskquota_set_readiness_logged(); + ereport(WARNING, (errmsg("[diskquota] diskquota is not ready"), + errhint("please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota"))); + } + return is_ready; +} + +/* + * Diskquota worker will refresh disk quota model + * periodically. It will reload quota setting and + * recalculate the changed disk usage. + */ +void +refresh_disk_quota_model(bool is_init) +{ + SEGCOUNT = getgpsegmentCount(); + if (SEGCOUNT <= 0) + { + ereport(ERROR, (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); + } + + if (is_init) ereport(LOG, (errmsg("[diskquota] initialize quota model started"))); + /* skip refresh model when load_quotas failed */ + if (load_quotas()) + { + refresh_disk_quota_usage(is_init); + } + if (is_init) ereport(LOG, (errmsg("[diskquota] initialize quota model finished"))); +} + +/* + * Update the disk usage of namespace, role and tablespace. + * Put the exceeded namespace and role into shared reject map. + * Parameter 'is_init' is true when it's the first time that worker + * process is constructing quota model. + */ +static void +refresh_disk_quota_usage(bool is_init) +{ + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + HTAB *local_active_table_stat_map = NULL; + + StartTransactionCommand(); + + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota worker process should + * tolerate this kind of errors and continue to check at the next loop. + */ + PG_TRY(); + { + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + /* + * initialization stage all the tables are active. later loop, only the + * tables whose disk size changed will be treated as active + * + * local_active_table_stat_map only contains the active tables which belong + * to the current database. + */ + local_active_table_stat_map = gp_fetch_active_tables(is_init); + bool hasActiveTable = (hash_get_num_entries(local_active_table_stat_map) != 0); + /* TODO: if we can skip the following steps when there is no active table */ + /* recalculate the disk usage of table, schema and role */ + calculate_table_disk_usage(is_init, local_active_table_stat_map); + /* refresh quota_info_map */ + refresh_quota_info_map(); + /* flush local table_size_map to user table table_size */ + flush_to_table_size(); + /* copy local reject map back to shared reject map */ + bool reject_map_changed = flush_local_reject_map(); + /* + * Dispatch rejectmap entries to segments to perform hard-limit. + * If the bgworker is in init mode, the rejectmap should be refreshed anyway. + * Otherwise, only when the rejectmap is changed or the active_table_list is + * not empty the rejectmap should be dispatched to segments. + */ + if (is_init || (diskquota_hardlimit && (reject_map_changed || hasActiveTable))) + dispatch_rejectmap(local_active_table_stat_map); + hash_destroy(local_active_table_stat_map); + } + PG_CATCH(); + { + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); + + return; +} + +static List * +merge_uncommitted_table_to_oidlist(List *oidlist) +{ + HASH_SEQ_STATUS iter; + DiskQuotaRelationCacheEntry *entry; + + if (relation_cache == NULL) + { + return oidlist; + } + + remove_committed_relation_from_cache(); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&iter, relation_cache); + while ((entry = hash_seq_search(&iter)) != NULL) + { + /* The session of db1 should not see the table inside db2. */ + if (entry->primary_table_relid == entry->relid && entry->rnode.node.dbNode == MyDatabaseId) + { + oidlist = lappend_oid(oidlist, entry->relid); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return oidlist; +} + +/* + * Incremental way to update the disk quota of every database objects + * Recalculate the table's disk usage when it's a new table or active table. + * Detect the removed table if it's no longer in pg_class. + * If change happens, no matter size change or owner change, + * update namespace_size_map and role_size_map correspondingly. + * Parameter 'is_init' set to true at initialization stage to fetch tables + * size from table table_size + */ + +static void +calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) +{ + bool table_size_map_found; + bool active_tbl_found; + int64 updated_total_size; + TableSizeEntry *tsentry = NULL; + Oid relOid; + HASH_SEQ_STATUS iter; + DiskQuotaActiveTableEntry *active_table_entry; + TableSizeEntryKey key; + TableEntryKey active_table_key; + List *oidlist; + ListCell *l; + int delete_entries_num = 0; + StringInfoData delete_statement; + + initStringInfo(&delete_statement); + + /* + * unset is_exist flag for tsentry in table_size_map this is used to + * detect tables which have been dropped. + */ + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + reset_table_size_entry_flag(tsentry, TABLE_EXIST); + } + + /* + * scan pg_class to detect table event: drop, reset schema, reset owner. + * calculate the file size for active table and update namespace_size_map + * and role_size_map + */ + oidlist = get_rel_oid_list(is_init); + + oidlist = merge_uncommitted_table_to_oidlist(oidlist); + + foreach (l, oidlist) + { + HeapTuple classTup; + Form_pg_class classForm = NULL; + Oid relnamespace = InvalidOid; + Oid relowner = InvalidOid; + Oid reltablespace = InvalidOid; + relOid = lfirst_oid(l); + + classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); + if (HeapTupleIsValid(classTup)) + { + classForm = (Form_pg_class)GETSTRUCT(classTup); + relnamespace = classForm->relnamespace; + relowner = classForm->relowner; + reltablespace = classForm->reltablespace; + + if (!OidIsValid(reltablespace)) + { + reltablespace = MyDatabaseTableSpace; + } + } + else + { + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + DiskQuotaRelationCacheEntry *relation_entry = hash_search(relation_cache, &relOid, HASH_FIND, NULL); + if (relation_entry == NULL) + { + elog(WARNING, "cache lookup failed for relation %u", relOid); + LWLockRelease(diskquota_locks.relation_cache_lock); + + if (!is_init) continue; + + for (int i = -1; i < SEGCOUNT; i++) + { + appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", relOid, i); + + delete_entries_num++; + + if (delete_entries_num > SQL_MAX_VALUES_NUMBER) + { + delete_from_table_size_map(delete_statement.data); + resetStringInfo(&delete_statement); + delete_entries_num = 0; + } + } + + continue; + } + relnamespace = relation_entry->namespaceoid; + relowner = relation_entry->owneroid; + reltablespace = relation_entry->rnode.node.spcNode; + LWLockRelease(diskquota_locks.relation_cache_lock); + } + + /* + * The segid is the same as the content id in gp_segment_configuration + * and the content id is continuous, so it's safe to use SEGCOUNT + * to get segid. + */ + for (int cur_segid = -1; cur_segid < SEGCOUNT; cur_segid++) + { + key.reloid = relOid; + key.id = TableSizeEntryId(cur_segid); + + uint32 counter = pg_atomic_read_u32(diskquota_table_size_entry_num); + if (counter > MAX_NUM_TABLE_SIZE_ENTRIES) + { + tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_FIND, &table_size_map_found); + /* Too many tables have been added to the table_size_map, to avoid diskquota using + too much share memory, just quit the loop. The diskquota won't work correctly + anymore. */ + if (!table_size_map_found) + { + break; + } + } + else + { + tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); + + if (!table_size_map_found) + { + counter = pg_atomic_add_fetch_u32(diskquota_table_size_entry_num, 1); + if (counter > MAX_NUM_TABLE_SIZE_ENTRIES) + { + ereport(WARNING, (errmsg("[diskquota] the number of tables exceeds the limit, please increase " + "the GUC value for diskquota.max_table_segments. Current " + "diskquota.max_table_segments value: %d", + diskquota_max_table_segments))); + } + tsentry->key.reloid = relOid; + tsentry->key.id = key.id; + Assert(TableSizeEntrySegidStart(tsentry) == cur_segid); + memset(tsentry->totalsize, 0, sizeof(tsentry->totalsize)); + tsentry->owneroid = InvalidOid; + tsentry->namespaceoid = InvalidOid; + tsentry->tablespaceoid = InvalidOid; + tsentry->flag = 0; + + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); + for (int j = seg_st; j < seg_ed; j++) TableSizeEntrySetFlushFlag(tsentry, j); + } + } + + /* mark tsentry is_exist */ + if (tsentry) set_table_size_entry_flag(tsentry, TABLE_EXIST); + active_table_key.reloid = relOid; + active_table_key.segid = cur_segid; + active_table_entry = (DiskQuotaActiveTableEntry *)hash_search( + local_active_table_stat_map, &active_table_key, HASH_FIND, &active_tbl_found); + + /* skip to recalculate the tables which are not in active list */ + if (active_tbl_found) + { + if (cur_segid == -1) + { + /* pretend process as utility mode, and append the table size on master */ + Gp_role = GP_ROLE_UTILITY; + + active_table_entry->tablesize += calculate_table_size(relOid); + + Gp_role = GP_ROLE_DISPATCH; + } + /* firstly calculate the updated total size of a table */ + updated_total_size = active_table_entry->tablesize - TableSizeEntryGetSize(tsentry, cur_segid); + + /* update the table_size entry */ + TableSizeEntrySetSize(tsentry, cur_segid, active_table_entry->tablesize); + TableSizeEntrySetFlushFlag(tsentry, cur_segid); + + /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does + * not exist in the table_size_map */ + update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, cur_segid); + update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, cur_segid); + update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, cur_segid); + update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, cur_segid); + } + /* table size info doesn't need to flush at init quota model stage */ + if (is_init) + { + TableSizeEntryResetFlushFlag(tsentry, cur_segid); + } + + /* if schema change, transfer the file size */ + if (tsentry->namespaceoid != relnamespace) + { + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), NAMESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid}, (Oid[]){relnamespace}, cur_segid); + } + /* if owner change, transfer the file size */ + if (tsentry->owneroid != relowner) + { + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), ROLE_QUOTA, + (Oid[]){tsentry->owneroid}, (Oid[]){relowner}, cur_segid); + } + + if (tsentry->tablespaceoid != reltablespace || tsentry->namespaceoid != relnamespace) + { + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, + (Oid[]){relnamespace, reltablespace}, cur_segid); + } + if (tsentry->tablespaceoid != reltablespace || tsentry->owneroid != relowner) + { + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, + (Oid[]){relowner, reltablespace}, cur_segid); + } + + if (cur_segid == (TableSizeEntrySegidEnd(tsentry) - 1)) + { + tsentry->namespaceoid = relnamespace; + tsentry->owneroid = relowner; + tsentry->tablespaceoid = reltablespace; + } + } + if (HeapTupleIsValid(classTup)) + { + heap_freetuple(classTup); + } + } + + if (delete_entries_num) delete_from_table_size_map(delete_statement.data); + + pfree(delete_statement.data); + list_free(oidlist); + + /* + * Process removed tables. Reduce schema and role size firstly. Remove + * table from table_size_map in flush_to_table_size() function later. + */ + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) + { + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); + for (int i = seg_st; i < seg_ed; i++) + { + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), NAMESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid}, i); + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), ROLE_QUOTA, (Oid[]){tsentry->owneroid}, i); + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, i); + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, i); + } + } + } +} + +static void +delete_from_table_size_map(char *str) +{ + StringInfoData delete_statement; + int ret; + + initStringInfo(&delete_statement); + appendStringInfo(&delete_statement, + "WITH deleted_table AS ( VALUES %s ) " + "delete from diskquota.table_size " + "where (tableid, segid) in ( SELECT * FROM deleted_table );", + str); + ret = SPI_execute(delete_statement.data, false, 0); + if (ret != SPI_OK_DELETE) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] delete_from_table_size_map SPI_execute failed: error code %d", ret))); + pfree(delete_statement.data); +} + +static void +insert_into_table_size_map(char *str) +{ + StringInfoData insert_statement; + int ret; + + initStringInfo(&insert_statement); + appendStringInfo(&insert_statement, "insert into diskquota.table_size values %s;", str); + ret = SPI_execute(insert_statement.data, false, 0); + if (ret != SPI_OK_INSERT) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] insert_into_table_size_map SPI_execute failed: error code %d", ret))); + pfree(insert_statement.data); +} + +/* + * Flush the table_size_map to user table diskquota.table_size + * To improve update performance, we first delete all the need_to_flush + * entries in table table_size. And then insert new table size entries into + * table table_size. + */ +static void +flush_to_table_size(void) +{ + HASH_SEQ_STATUS iter; + TableSizeEntry *tsentry = NULL; + StringInfoData delete_statement; + StringInfoData insert_statement; + int delete_entries_num = 0; + int insert_entries_num = 0; + + /* TODO: Add flush_size_interval to avoid flushing size info in every loop */ + + /* Disable ORCA since it does not support non-scalar subqueries. */ + bool old_optimizer = optimizer; + optimizer = false; + + initStringInfo(&insert_statement); + initStringInfo(&delete_statement); + + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); + for (int i = seg_st; i < seg_ed; i++) + { + /* delete dropped table from both table_size_map and table table_size */ + if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) + { + appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", + tsentry->key.reloid, i); + delete_entries_num++; + if (delete_entries_num > SQL_MAX_VALUES_NUMBER) + { + delete_from_table_size_map(delete_statement.data); + resetStringInfo(&delete_statement); + delete_entries_num = 0; + } + } + /* update the table size by delete+insert in table table_size */ + else if (TableSizeEntryGetFlushFlag(tsentry, i)) + { + appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", + tsentry->key.reloid, i); + appendStringInfo(&insert_statement, "%s(%u,%ld,%d)", (insert_entries_num == 0) ? " " : ", ", + tsentry->key.reloid, TableSizeEntryGetSize(tsentry, i), i); + delete_entries_num++; + insert_entries_num++; + + if (delete_entries_num > SQL_MAX_VALUES_NUMBER) + { + delete_from_table_size_map(delete_statement.data); + resetStringInfo(&delete_statement); + delete_entries_num = 0; + } + if (insert_entries_num > SQL_MAX_VALUES_NUMBER) + { + insert_into_table_size_map(insert_statement.data); + resetStringInfo(&insert_statement); + insert_entries_num = 0; + } + + TableSizeEntryResetFlushFlag(tsentry, i); + } + } + if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) + { + hash_search(table_size_map, &tsentry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_table_size_entry_num, 1); + } + } + + if (delete_entries_num) delete_from_table_size_map(delete_statement.data); + if (insert_entries_num) insert_into_table_size_map(insert_statement.data); + + optimizer = old_optimizer; + + pfree(delete_statement.data); + pfree(insert_statement.data); +} + +/* + * Generate the new shared rejectmap from the local_rejectmap which + * exceed the quota limit. + * local_rejectmap is used to reduce the lock contention. + */ +static bool +flush_local_reject_map(void) +{ + bool changed = false; + HASH_SEQ_STATUS iter; + LocalRejectMapEntry *localrejectentry; + GlobalRejectMapEntry *rejectentry; + bool found; + + LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); + + hash_seq_init(&iter, local_disk_quota_reject_map); + while ((localrejectentry = hash_seq_search(&iter)) != NULL) + { + /* + * If localrejectentry->isexceeded is true, and it alredy exists in disk_quota_reject_map, + * that means the reject entry exists in both last loop and current loop, but its segexceeded + * feild may have changed. + * + * If localrejectentry->isexceeded is true, and it doesn't exist in disk_quota_reject_map, + * then it is a new added reject entry in this loop. + * + * Otherwise, it means the reject entry has gone, we need to delete it. + */ + if (localrejectentry->isexceeded) + { + rejectentry = (GlobalRejectMapEntry *)hash_search(disk_quota_reject_map, (void *)&localrejectentry->keyitem, + HASH_ENTER_NULL, &found); + if (rejectentry == NULL) + { + ereport(WARNING, (errmsg("[diskquota] Shared disk quota reject map size limit reached." + "Some out-of-limit schemas or roles will be lost" + "in rejectmap."))); + continue; + } + /* new db objects which exceed quota limit */ + if (!found) + { + rejectentry->keyitem.targetoid = localrejectentry->keyitem.targetoid; + rejectentry->keyitem.databaseoid = MyDatabaseId; + rejectentry->keyitem.targettype = localrejectentry->keyitem.targettype; + rejectentry->keyitem.tablespaceoid = localrejectentry->keyitem.tablespaceoid; + rejectentry->segexceeded = localrejectentry->segexceeded; + changed = true; + } + if (rejectentry->segexceeded != localrejectentry->segexceeded) + { + rejectentry->segexceeded = localrejectentry->segexceeded; + changed = true; + } + localrejectentry->isexceeded = false; + localrejectentry->segexceeded = false; + } + else + { + changed = true; + /* db objects are removed or under quota limit in the new loop */ + (void)hash_search(disk_quota_reject_map, (void *)&localrejectentry->keyitem, HASH_REMOVE, NULL); + (void)hash_search(local_disk_quota_reject_map, (void *)&localrejectentry->keyitem, HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.reject_map_lock); + return changed; +} + +/* + * Dispatch rejectmap to segment servers. + */ +static void +dispatch_rejectmap(HTAB *local_active_table_stat_map) +{ + HASH_SEQ_STATUS hash_seq; + GlobalRejectMapEntry *rejectmap_entry; + DiskQuotaActiveTableEntry *active_table_entry; + int num_entries, count = 0; + CdbPgResults cdb_pgresults = {NULL, 0}; + StringInfoData rows; + StringInfoData active_oids; + StringInfoData sql; + + initStringInfo(&rows); + initStringInfo(&active_oids); + initStringInfo(&sql); + + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); + num_entries = hash_get_num_entries(disk_quota_reject_map); + hash_seq_init(&hash_seq, disk_quota_reject_map); + while ((rejectmap_entry = hash_seq_search(&hash_seq)) != NULL) + { + appendStringInfo(&rows, "ROW(%d, %d, %d, %d, %s)", rejectmap_entry->keyitem.targetoid, + rejectmap_entry->keyitem.databaseoid, rejectmap_entry->keyitem.tablespaceoid, + rejectmap_entry->keyitem.targettype, rejectmap_entry->segexceeded ? "true" : "false"); + + if (++count != num_entries) appendStringInfo(&rows, ","); + } + LWLockRelease(diskquota_locks.reject_map_lock); + + count = 0; + num_entries = hash_get_num_entries(local_active_table_stat_map); + hash_seq_init(&hash_seq, local_active_table_stat_map); + while ((active_table_entry = hash_seq_search(&hash_seq)) != NULL) + { + appendStringInfo(&active_oids, "%d", active_table_entry->reloid); + + if (++count != num_entries) appendStringInfo(&active_oids, ","); + } + + appendStringInfo(&sql, + "select diskquota.refresh_rejectmap(" + "ARRAY[%s]::diskquota.rejectmap_entry[], " + "ARRAY[%s]::oid[])", + rows.data, active_oids.data); + CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); + + pfree(rows.data); + pfree(active_oids.data); + pfree(sql.data); + cdbdisp_clearCdbPgResults(&cdb_pgresults); +} + +/* + * Make sure a StringInfo's string is no longer than 'nchars' characters. + */ +void +truncateStringInfo(StringInfo str, int nchars) +{ + if (str && str->len > nchars) + { + Assert(str->data != NULL && str->len <= str->maxlen); + str->len = nchars; + str->data[nchars] = '\0'; + } +} + +/* + * Interface to load quotas from diskquota configuration table(quota_config). + */ +static bool +load_quotas(void) +{ + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + + StartTransactionCommand(); + + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota worker process should + * tolerate this kind of errors and continue to check at the next loop. + */ + PG_TRY(); + { + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unable to connect to execute SPI query, return code: %d", ret_code))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + do_load_quotas(); + } + PG_CATCH(); + { + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); + + return ret; +} + +/* + * Load quotas from diskquota configuration table(quota_config). + */ +static void +do_load_quotas(void) +{ + int ret; + TupleDesc tupdesc; + int i; + + /* + * TODO: we should skip to reload quota config when there is no change in + * quota.config. A flag in shared memory could be used to detect the quota + * config change. + */ + clean_all_quota_limit(); + + /* + * read quotas from diskquota.quota_config and target table + */ + ret = SPI_execute_with_args( + "SELECT c.targetOid, c.quotaType, c.quotalimitMB, COALESCE(c.segratio, 0) AS segratio, " + "COALESCE(t.tablespaceoid, 0) AS tablespaceoid, COALESCE(t.primaryOid, 0) AS primaryoid " + "FROM diskquota.quota_config AS c LEFT OUTER JOIN diskquota.target AS t " + "ON c.targetOid = t.rowId AND c.quotaType IN ($1, $2) AND c.quotaType = t.quotaType", + 2, + (Oid[]){ + INT4OID, + INT4OID, + }, + (Datum[]){ + Int32GetDatum(NAMESPACE_TABLESPACE_QUOTA), + Int32GetDatum(ROLE_TABLESPACE_QUOTA), + }, + NULL, true, 0); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); + + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || ((tupdesc)->attrs[0]).atttypid != OIDOID || + ((tupdesc)->attrs[1]).atttypid != INT4OID || ((tupdesc)->attrs[2]).atttypid != INT8OID) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] configuration table is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); + } + + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum vals[NUM_QUOTA_CONFIG_ATTRS]; + bool isnull[NUM_QUOTA_CONFIG_ATTRS]; + + for (int i = 0; i < NUM_QUOTA_CONFIG_ATTRS; ++i) + { + vals[i] = SPI_getbinval(tup, tupdesc, i + 1, &(isnull[i])); + if (i <= 2 && isnull[i]) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] attibutes in configuration table MUST NOT be NULL"))); + } + } + + Oid targetOid = DatumGetObjectId(vals[0]); + int quotaType = (QuotaType)DatumGetInt32(vals[1]); + int64 quota_limit_mb = DatumGetInt64(vals[2]); + float segratio = DatumGetFloat4(vals[3]); + Oid spcOid = DatumGetObjectId(vals[4]); + Oid primaryOid = DatumGetObjectId(vals[5]); + + if (quotaType == NAMESPACE_TABLESPACE_QUOTA || quotaType == ROLE_TABLESPACE_QUOTA) + { + targetOid = primaryOid; + } + + if (spcOid == InvalidOid) + { + if (quotaType == NAMESPACE_TABLESPACE_QUOTA || quotaType == ROLE_TABLESPACE_QUOTA) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d. num_keys: %d", + quotaType, quota_key_num[quotaType]))); + } + update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid}); + } + else + { + update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid, spcOid}); + } + } + + return; +} + +/* + * Given table oid, search for namespace and owner. + */ +static bool +get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *tablespaceoid) +{ + HeapTuple tp; + + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + bool found = HeapTupleIsValid(tp); + if (HeapTupleIsValid(tp)) + { + Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tp); + + *ownerOid = reltup->relowner; + *nsOid = reltup->relnamespace; + *tablespaceoid = reltup->reltablespace; + + if (!OidIsValid(*tablespaceoid)) + { + *tablespaceoid = MyDatabaseTableSpace; + } + + ReleaseSysCache(tp); + } + return found; +} + +/* + * Given table oid, search for namespace and name. + * Memory relname points to should be pre-allocated at least NAMEDATALEN bytes. + */ +bool +get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname) +{ + HeapTuple tp; + + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + bool found = HeapTupleIsValid(tp); + if (found) + { + Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tp); + + *nsOid = reltup->relnamespace; + memcpy(relname, reltup->relname.data, NAMEDATALEN); + + ReleaseSysCache(tp); + } + return found; +} + +static bool +check_rejectmap_by_relfilenode(RelFileNode relfilenode) +{ + bool found; + RejectMapEntry keyitem; + GlobalRejectMapEntry *entry; + + SIMPLE_FAULT_INJECTOR("check_rejectmap_by_relfilenode"); + + memset(&keyitem, 0, sizeof(keyitem)); + memcpy(&keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); + + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); + entry = hash_search(disk_quota_reject_map, &keyitem, HASH_FIND, &found); + + if (found && entry) + { + GlobalRejectMapEntry segrejectentry; + memcpy(&segrejectentry.keyitem, &entry->auxblockinfo, sizeof(RejectMapEntry)); + segrejectentry.segexceeded = entry->segexceeded; + LWLockRelease(diskquota_locks.reject_map_lock); + + export_exceeded_error(&segrejectentry, true /*skip_name*/); + return false; + } + LWLockRelease(diskquota_locks.reject_map_lock); + return true; +} + +/* + * This function takes relowner, relnamespace, reltablespace as arguments, + * prepares the searching key of the global rejectmap for us. + */ +static void +prepare_rejectmap_search_key(RejectMapEntry *keyitem, QuotaType type, Oid relowner, Oid relnamespace, Oid reltablespace) +{ + Assert(keyitem != NULL); + memset(keyitem, 0, sizeof(RejectMapEntry)); + if (type == ROLE_QUOTA || type == ROLE_TABLESPACE_QUOTA) + keyitem->targetoid = relowner; + else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + keyitem->targetoid = relnamespace; + else if (type == TABLESPACE_QUOTA) + keyitem->targetoid = reltablespace; + else + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unknown quota type: %d", type))); + + if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + keyitem->tablespaceoid = reltablespace; + else + { + /* refer to add_quota_to_rejectmap */ + keyitem->tablespaceoid = InvalidOid; + } + keyitem->databaseoid = MyDatabaseId; + keyitem->targettype = type; +} + +/* + * Given table oid, check whether quota limit + * of table's schema or table's owner are reached. + * Do enforcement if quota exceeds. + */ +static bool +check_rejectmap_by_reloid(Oid reloid) +{ + Oid ownerOid = InvalidOid; + Oid nsOid = InvalidOid; + Oid tablespaceoid = InvalidOid; + bool found; + RejectMapEntry keyitem; + GlobalRejectMapEntry *entry; + + bool found_rel = get_rel_owner_schema_tablespace(reloid, &ownerOid, &nsOid, &tablespaceoid); + if (!found_rel) + { + return true; + } + + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + prepare_rejectmap_search_key(&keyitem, type, ownerOid, nsOid, tablespaceoid); + entry = hash_search(disk_quota_reject_map, &keyitem, HASH_FIND, &found); + if (found) + { + LWLockRelease(diskquota_locks.reject_map_lock); + export_exceeded_error(entry, false /*skip_name*/); + return false; + } + } + LWLockRelease(diskquota_locks.reject_map_lock); + return true; +} + +/* + * Given relation's oid or relfilenode, check whether the + * quota limits of schema or owner are reached. Do enforcement + * if the quota exceeds. + */ +bool +quota_check_common(Oid reloid, RelFileNode *relfilenode) +{ + bool enable_hardlimit; + + if (!IsTransactionState()) return true; + + if (diskquota_is_paused()) return true; + + if (OidIsValid(reloid)) return check_rejectmap_by_reloid(reloid); + + enable_hardlimit = diskquota_hardlimit; + +#ifdef FAULT_INJECTOR + if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) enable_hardlimit = true; +#endif + if (relfilenode && enable_hardlimit) return check_rejectmap_by_relfilenode(*relfilenode); + + return true; +} + +/* + * invalidate all reject entry with a specific dbid in SHM + */ +void +invalidate_database_rejectmap(Oid dbid) +{ + RejectMapEntry *entry; + HASH_SEQ_STATUS iter; + + LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, disk_quota_reject_map); + while ((entry = hash_seq_search(&iter)) != NULL) + { + if (entry->databaseoid == dbid || entry->relfilenode.dbNode == dbid) + { + hash_search(disk_quota_reject_map, entry, HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.reject_map_lock); +} + +static char * +GetNamespaceName(Oid spcid, bool skip_name) +{ + if (skip_name) + { + NameData spcstr; + pg_ltoa(spcid, spcstr.data); + return pstrdup(spcstr.data); + } + return get_namespace_name(spcid); +} + +static char * +GetTablespaceName(Oid spcid, bool skip_name) +{ + if (skip_name) + { + NameData spcstr; + pg_ltoa(spcid, spcstr.data); + return pstrdup(spcstr.data); + } + return get_tablespace_name(spcid); +} + +static char * +GetUserName(Oid relowner, bool skip_name) +{ + if (skip_name) + { + NameData namestr; + pg_ltoa(relowner, namestr.data); + return pstrdup(namestr.data); + } + return GetUserNameFromId(relowner, false); +} + +static void +export_exceeded_error(GlobalRejectMapEntry *entry, bool skip_name) +{ + RejectMapEntry *rejectentry = &entry->keyitem; + switch (rejectentry->targettype) + { + case NAMESPACE_QUOTA: + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("schema's disk space quota exceeded with name: %s", + GetNamespaceName(rejectentry->targetoid, skip_name)))); + break; + case ROLE_QUOTA: + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("role's disk space quota exceeded with name: %s", + GetUserName(rejectentry->targetoid, skip_name)))); + break; + case NAMESPACE_TABLESPACE_QUOTA: + if (entry->segexceeded) + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace: %s, schema: %s diskquota exceeded per segment quota", + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetNamespaceName(rejectentry->targetoid, skip_name)))); + else + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, schema: %s diskquota exceeded", + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetNamespaceName(rejectentry->targetoid, skip_name)))); + break; + case ROLE_TABLESPACE_QUOTA: + if (entry->segexceeded) + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace: %s, role: %s diskquota exceeded per segment quota", + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetUserName(rejectentry->targetoid, skip_name)))); + else + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, role: %s diskquota exceeded", + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetUserName(rejectentry->targetoid, skip_name)))); + break; + default: + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("diskquota exceeded, unknown quota type"))); + } +} + +/* + * refresh_rejectmap() takes two arguments. + * The first argument is an array of rejectmap entries on QD. + * The second argument is an array of active relations' oid. + * + * The basic idea is that, we iterate over the active relations' oid, check that + * whether the relation's owner/tablespace/namespace is in one of the rejectmap + * entries dispatched from diskquota worker from QD. If the relation should be + * blocked, we then add its relfilenode together with the toast, toast index, + * appendonly, appendonly index relations' relfilenodes to the global rejectmap. + * Note that, this UDF is called on segment servers by diskquota worker on QD and + * the global rejectmap on segment servers is indexed by relfilenode. + */ +PG_FUNCTION_INFO_V1(refresh_rejectmap); +Datum +refresh_rejectmap(PG_FUNCTION_ARGS) +{ + ArrayType *rejectmap_array_type = PG_GETARG_ARRAYTYPE_P(0); + ArrayType *active_oid_array_type = PG_GETARG_ARRAYTYPE_P(1); + Oid rejectmap_elem_type = ARR_ELEMTYPE(rejectmap_array_type); + Oid active_oid_elem_type = ARR_ELEMTYPE(active_oid_array_type); + Datum *datums; + bool *nulls; + int16 elem_width; + bool elem_type_by_val; + char elem_alignment_code; + int reject_array_count; + int active_array_count; + HeapTupleHeader lt; + bool segexceeded; + GlobalRejectMapEntry *rejectmapentry; + HASH_SEQ_STATUS hash_seq; + HTAB *local_rejectmap; + HASHCTL hashctl; + + if (!superuser()) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to update rejectmap"))); + if (IS_QUERY_DISPATCHER()) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("\"refresh_rejectmap()\" can only be executed on QE."))); + if (ARR_NDIM(rejectmap_array_type) > 1 || ARR_NDIM(active_oid_array_type) > 1) + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("1-dimensional array needed"))); + + /* + * Iterate over rejectmap entries and add these entries to the local reject map + * on segment servers so that we are able to check whether the given relation (by oid) + * should be rejected in O(1) time complexity in third step. + */ + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(RejectMapEntry); + hashctl.entrysize = sizeof(GlobalRejectMapEntry); + hashctl.hcxt = CurrentMemoryContext; + + /* + * Since uncommitted relations' information and the global rejectmap entries + * are cached in shared memory. The memory regions are guarded by lightweight + * locks. In order not to hold multiple locks at the same time, We add rejectmap + * entries into the local_rejectmap below and then flush the content of the + * local_rejectmap to the global rejectmap at the end of this UDF. + */ + local_rejectmap = + diskquota_hash_create("local_rejectmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); + get_typlenbyvalalign(rejectmap_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); + deconstruct_array(rejectmap_array_type, rejectmap_elem_type, elem_width, elem_type_by_val, elem_alignment_code, + &datums, &nulls, &reject_array_count); + for (int i = 0; i < reject_array_count; ++i) + { + RejectMapEntry keyitem; + bool isnull; + + if (nulls[i]) continue; + + memset(&keyitem, 0, sizeof(RejectMapEntry)); + lt = DatumGetHeapTupleHeader(datums[i]); + keyitem.targetoid = DatumGetObjectId(GetAttributeByNum(lt, 1, &isnull)); + keyitem.databaseoid = DatumGetObjectId(GetAttributeByNum(lt, 2, &isnull)); + keyitem.tablespaceoid = DatumGetObjectId(GetAttributeByNum(lt, 3, &isnull)); + keyitem.targettype = DatumGetInt32(GetAttributeByNum(lt, 4, &isnull)); + /* rejectmap entries from QD should have the real tablespace oid */ + if ((keyitem.targettype == NAMESPACE_TABLESPACE_QUOTA || keyitem.targettype == ROLE_TABLESPACE_QUOTA)) + { + Assert(OidIsValid(keyitem.tablespaceoid)); + } + segexceeded = DatumGetBool(GetAttributeByNum(lt, 5, &isnull)); + + rejectmapentry = hash_search(local_rejectmap, &keyitem, HASH_ENTER_NULL, NULL); + if (rejectmapentry) rejectmapentry->segexceeded = segexceeded; + } + + /* + * Thirdly, iterate over the active oid list. Check that if the relation should be blocked. + * If the relation should be blocked, we insert the toast, toast index, appendonly, appendonly + * index relations to the global reject map. + */ + get_typlenbyvalalign(active_oid_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); + deconstruct_array(active_oid_array_type, active_oid_elem_type, elem_width, elem_type_by_val, elem_alignment_code, + &datums, &nulls, &active_array_count); + for (int i = 0; i < active_array_count; ++i) + { + Oid active_oid = InvalidOid; + HeapTuple tuple; + if (nulls[i]) continue; + + active_oid = DatumGetObjectId(datums[i]); + if (!OidIsValid(active_oid)) continue; + + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); + tuple = SearchSysCacheCopy1(RELOID, active_oid); + if (HeapTupleIsValid(tuple)) + { + Form_pg_class form = (Form_pg_class)GETSTRUCT(tuple); + Oid relnamespace = form->relnamespace; + Oid reltablespace = OidIsValid(form->reltablespace) ? form->reltablespace : MyDatabaseTableSpace; + Oid relowner = form->relowner; + RejectMapEntry keyitem; + bool found; + + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + /* Check that if the current relation should be blocked. */ + prepare_rejectmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); + rejectmapentry = hash_search(local_rejectmap, &keyitem, HASH_FIND, &found); + if (found && rejectmapentry) + { + /* + * If the current relation is blocked, we should add the relfilenode + * of itself together with the relfilenodes of its toast relation and + * appendonly relations to the global reject map. + */ + List *oid_list = NIL; + ListCell *cell = NULL; + Oid toastrelid = form->reltoastrelid; + Oid aosegrelid = InvalidOid; + Oid aoblkdirrelid = InvalidOid; + Oid aovisimaprelid = InvalidOid; + oid_list = lappend_oid(oid_list, active_oid); + + /* Append toast relation and toast index to the oid_list if any. */ + if (OidIsValid(toastrelid)) + { + oid_list = lappend_oid(oid_list, toastrelid); + oid_list = list_concat(oid_list, diskquota_get_index_list(toastrelid)); + } + + /* Append ao auxiliary relations and their indexes to the oid_list if any. */ + diskquota_get_appendonly_aux_oid_list(active_oid, &aosegrelid, &aoblkdirrelid, &aovisimaprelid); + if (OidIsValid(aosegrelid)) + { + oid_list = lappend_oid(oid_list, aosegrelid); + oid_list = list_concat(oid_list, diskquota_get_index_list(aosegrelid)); + } + if (OidIsValid(aoblkdirrelid)) + { + oid_list = lappend_oid(oid_list, aoblkdirrelid); + oid_list = list_concat(oid_list, diskquota_get_index_list(aoblkdirrelid)); + } + if (OidIsValid(aovisimaprelid)) + { + oid_list = lappend_oid(oid_list, aovisimaprelid); + oid_list = list_concat(oid_list, diskquota_get_index_list(aovisimaprelid)); + } + + /* Iterate over the oid_list and add their relfilenodes to the rejectmap. */ + foreach (cell, oid_list) + { + Oid curr_oid = lfirst_oid(cell); + HeapTuple curr_tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(curr_oid)); + if (HeapTupleIsValid(curr_tuple)) + { + Form_pg_class curr_form = (Form_pg_class)GETSTRUCT(curr_tuple); + Oid curr_reltablespace = OidIsValid(curr_form->reltablespace) ? curr_form->reltablespace + : MyDatabaseTableSpace; + RelFileNode relfilenode = {.dbNode = MyDatabaseId, + .relNode = curr_form->relfilenode, + .spcNode = curr_reltablespace}; + bool found; + GlobalRejectMapEntry *blocked_filenode_entry; + RejectMapEntry blocked_filenode_keyitem; + + memset(&blocked_filenode_keyitem, 0, sizeof(RejectMapEntry)); + memcpy(&blocked_filenode_keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); + + blocked_filenode_entry = + hash_search(local_rejectmap, &blocked_filenode_keyitem, HASH_ENTER_NULL, &found); + if (!found && blocked_filenode_entry) + { + memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(RejectMapEntry)); + blocked_filenode_entry->segexceeded = rejectmapentry->segexceeded; + } + + heap_freetuple(curr_tuple); + } + } + /* + * The current relation may satisfy multiple blocking conditions, + * we only add it once. + */ + break; + } + } + + heap_freetuple(tuple); + } + else + { + /* + * We cannot fetch the relation from syscache. It may be an uncommitted relation. + * Let's try to fetch it from relation_cache. + */ + DiskQuotaRelationCacheEntry *relation_cache_entry; + bool found; + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + relation_cache_entry = hash_search(relation_cache, &active_oid, HASH_FIND, &found); + /* The session of db1 should not see the table inside db2. */ + if (found && relation_cache_entry && relation_cache_entry->rnode.node.dbNode == MyDatabaseId) + { + Oid relnamespace = relation_cache_entry->namespaceoid; + Oid reltablespace = relation_cache_entry->rnode.node.spcNode; + Oid relowner = relation_cache_entry->owneroid; + RejectMapEntry keyitem; + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + /* Check that if the current relation should be blocked. */ + prepare_rejectmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); + rejectmapentry = hash_search(local_rejectmap, &keyitem, HASH_FIND, &found); + + if (found && rejectmapentry) + { + List *oid_list = NIL; + ListCell *cell = NULL; + + /* Collect the relation oid together with its auxiliary relations' oid. */ + oid_list = lappend_oid(oid_list, active_oid); + for (int auxoidcnt = 0; auxoidcnt < relation_cache_entry->auxrel_num; ++auxoidcnt) + oid_list = lappend_oid(oid_list, relation_cache_entry->auxrel_oid[auxoidcnt]); + + foreach (cell, oid_list) + { + bool found; + GlobalRejectMapEntry *blocked_filenode_entry; + RejectMapEntry blocked_filenode_keyitem; + Oid curr_oid = lfirst_oid(cell); + + relation_cache_entry = hash_search(relation_cache, &curr_oid, HASH_FIND, &found); + if (found && relation_cache_entry) + { + memset(&blocked_filenode_keyitem, 0, sizeof(RejectMapEntry)); + memcpy(&blocked_filenode_keyitem.relfilenode, &relation_cache_entry->rnode.node, + sizeof(RelFileNode)); + + blocked_filenode_entry = hash_search(local_rejectmap, &blocked_filenode_keyitem, + HASH_ENTER_NULL, &found); + if (!found && blocked_filenode_entry) + { + memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(RejectMapEntry)); + blocked_filenode_entry->segexceeded = rejectmapentry->segexceeded; + } + } + } + } + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + } + } + + LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); + + /* Clear rejectmap entries. */ + hash_seq_init(&hash_seq, disk_quota_reject_map); + while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) + { + if (rejectmapentry->keyitem.relfilenode.dbNode != MyDatabaseId && + rejectmapentry->keyitem.databaseoid != MyDatabaseId) + continue; + hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_REMOVE, NULL); + } + + /* Flush the content of local_rejectmap to the global rejectmap. */ + hash_seq_init(&hash_seq, local_rejectmap); + while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) + { + bool found; + GlobalRejectMapEntry *new_entry; + + /* + * Skip soft limit reject entry. We don't perform soft-limit on segment servers, so we don't flush the + * rejectmap entry with a valid targetoid to the global rejectmap on segment servers. + */ + if (OidIsValid(rejectmapentry->keyitem.targetoid)) continue; + + new_entry = hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_ENTER_NULL, &found); + if (!found && new_entry) memcpy(new_entry, rejectmapentry, sizeof(GlobalRejectMapEntry)); + } + LWLockRelease(diskquota_locks.reject_map_lock); + + PG_RETURN_VOID(); +} + +/* + * show_rejectmap() provides developers or users to dump the rejectmap in shared + * memory on a single server. If you want to query rejectmap on segment servers, + * you should dispatch this query to segments. + */ +PG_FUNCTION_INFO_V1(show_rejectmap); +Datum +show_rejectmap(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + GlobalRejectMapEntry *rejectmap_entry; + struct RejectMapCtx + { + HASH_SEQ_STATUS rejectmap_seq; + HTAB *rejectmap; + } * rejectmap_ctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext oldcontext; + HASHCTL hashctl; + HASH_SEQ_STATUS hash_seq; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + tupdesc = DiskquotaCreateTemplateTupleDesc(9); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "target_type", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "target_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "database_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "tablespace_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "seg_exceeded", BOOLOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "dbnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "spcnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)8, "relnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)9, "segid", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + rejectmap_ctx = (struct RejectMapCtx *)palloc(sizeof(struct RejectMapCtx)); + + /* Create a local hash table and fill it with entries from shared memory. */ + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(RejectMapEntry); + hashctl.entrysize = sizeof(GlobalRejectMapEntry); + hashctl.hcxt = CurrentMemoryContext; + rejectmap_ctx->rejectmap = diskquota_hash_create("rejectmap_ctx rejectmap", 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); + + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); + hash_seq_init(&hash_seq, disk_quota_reject_map); + while ((rejectmap_entry = hash_seq_search(&hash_seq)) != NULL) + { + GlobalRejectMapEntry *local_rejectmap_entry = NULL; + local_rejectmap_entry = + hash_search(rejectmap_ctx->rejectmap, &rejectmap_entry->keyitem, HASH_ENTER_NULL, NULL); + if (local_rejectmap_entry) + { + memcpy(&local_rejectmap_entry->keyitem, &rejectmap_entry->keyitem, sizeof(RejectMapEntry)); + local_rejectmap_entry->segexceeded = rejectmap_entry->segexceeded; + memcpy(&local_rejectmap_entry->auxblockinfo, &rejectmap_entry->auxblockinfo, sizeof(RejectMapEntry)); + } + } + LWLockRelease(diskquota_locks.reject_map_lock); + + /* Setup first calling context. */ + hash_seq_init(&(rejectmap_ctx->rejectmap_seq), rejectmap_ctx->rejectmap); + funcctx->user_fctx = (void *)rejectmap_ctx; + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + rejectmap_ctx = (struct RejectMapCtx *)funcctx->user_fctx; + + while ((rejectmap_entry = hash_seq_search(&(rejectmap_ctx->rejectmap_seq))) != NULL) + { +#define _TARGETTYPE_STR_SIZE 32 + Datum result; + Datum values[9]; + bool nulls[9]; + HeapTuple tuple; + RejectMapEntry keyitem; + char targettype_str[_TARGETTYPE_STR_SIZE]; + RelFileNode blocked_relfilenode; + + memcpy(&blocked_relfilenode, &rejectmap_entry->keyitem.relfilenode, sizeof(RelFileNode)); + /* + * If the rejectmap entry is indexed by relfilenode, we dump the blocking + * condition from auxblockinfo. + */ + if (!OidIsValid(blocked_relfilenode.relNode)) + memcpy(&keyitem, &rejectmap_entry->keyitem, sizeof(keyitem)); + else + memcpy(&keyitem, &rejectmap_entry->auxblockinfo, sizeof(keyitem)); + memset(targettype_str, 0, sizeof(targettype_str)); + + switch ((QuotaType)keyitem.targettype) + { + case ROLE_QUOTA: + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "ROLE_QUOTA"); + break; + case NAMESPACE_QUOTA: + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "NAMESPACE_QUOTA"); + break; + case ROLE_TABLESPACE_QUOTA: + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "ROLE_TABLESPACE_QUOTA"); + break; + case NAMESPACE_TABLESPACE_QUOTA: + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "NAMESPACE_TABLESPACE_QUOTA"); + break; + default: + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "UNKNOWN"); + break; + } + + values[0] = CStringGetTextDatum(targettype_str); + values[1] = ObjectIdGetDatum(keyitem.targetoid); + values[2] = ObjectIdGetDatum(keyitem.databaseoid); + values[3] = ObjectIdGetDatum(keyitem.tablespaceoid); + values[4] = BoolGetDatum(rejectmap_entry->segexceeded); + values[5] = ObjectIdGetDatum(blocked_relfilenode.dbNode); + values[6] = ObjectIdGetDatum(blocked_relfilenode.spcNode); + values[7] = ObjectIdGetDatum(blocked_relfilenode.relNode); + values[8] = Int32GetDatum(GpIdentity.segindex); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + + SRF_RETURN_DONE(funcctx); +} + +void +update_monitor_db_mpp(Oid dbid, FetchTableStatType action, const char *schema) +{ + StringInfoData sql_command; + initStringInfo(&sql_command); + appendStringInfo(&sql_command, + "SELECT %s.diskquota_fetch_table_stat(%d, '{%d}'::oid[]) FROM gp_dist_random('gp_id')", schema, + action, dbid); + /* Add current database to the monitored db cache on all segments */ + int ret = SPI_execute(sql_command.data, true, 0); + pfree(sql_command.data); + + ereportif(ret != SPI_OK_SELECT, ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + + /* Add current database to the monitored db cache on coordinator */ + update_monitor_db(dbid, action); +} + +static void +format_name(const char *prefix, uint32 id, StringInfo str) +{ + resetStringInfo(str); + appendStringInfo(str, "%s_%u", prefix, id); + Assert(str->len <= SHMEM_INDEX_KEYSIZE); +} + +static bool +get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag) +{ + return (entry->flag & flag) ? true : false; +} + +static void +reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag) +{ + entry->flag &= (UINT32_MAX ^ flag); +} + +static void +set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag) +{ + entry->flag |= flag; +} diff --git a/gpcontrib/diskquota/src/relation_cache.c b/gpcontrib/diskquota/src/relation_cache.c new file mode 100644 index 00000000000..8b1e7f11d61 --- /dev/null +++ b/gpcontrib/diskquota/src/relation_cache.c @@ -0,0 +1,630 @@ +/* ------------------------------------------------------------------------- + * + * relation_cache.c + * + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/relation_cache.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/relation.h" +#include "catalog/indexing.h" +#include "catalog/pg_class.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_tablespace.h" +#include "catalog/objectaccess.h" +#include "utils/rel.h" +#include "utils/relcache.h" +#include "utils/relfilenodemap.h" +#include "utils/syscache.h" +#include "utils/array.h" +#include "utils/inval.h" +#include "funcapi.h" +#include "diskquota.h" +#include "relation_cache.h" + +HTAB *relation_cache = NULL; +HTAB *relid_cache = NULL; + +static void update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, + DiskQuotaRelidCacheEntry *relid_entry); + +PG_FUNCTION_INFO_V1(show_relation_cache); + +void +init_shm_worker_relation_cache(void) +{ + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); + relation_cache = DiskquotaShmemInitHash("relation_cache", diskquota_max_active_tables, diskquota_max_active_tables, + &ctl, HASH_ELEM, DISKQUOTA_OID_HASH); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaRelidCacheEntry); + relid_cache = DiskquotaShmemInitHash("relid_cache", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, + HASH_ELEM, DISKQUOTA_OID_HASH); +} + +Oid +get_relid_by_relfilenode(RelFileNode relfilenode) +{ + Oid relid; + + relid = RelidByRelfilenode(relfilenode.spcNode, relfilenode.relNode); + if (OidIsValid(relid)) + { + remove_cache_entry(InvalidOid, relfilenode.relNode); + return relid; + } + + relid = get_uncommitted_table_relid(relfilenode.relNode); + return relid; +} + +void +remove_cache_entry(Oid relid, Oid relfilenode) +{ + DiskQuotaRelationCacheEntry *relation_entry; + DiskQuotaRelidCacheEntry *relid_entry; + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); + if (OidIsValid(relid)) + { + relation_entry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (relation_entry) + { + hash_search(relid_cache, &relation_entry->rnode.node.relNode, HASH_REMOVE, NULL); + hash_search(relation_cache, &relid, HASH_REMOVE, NULL); + } + } + + if (OidIsValid(relfilenode)) + { + relid_entry = hash_search(relid_cache, &relfilenode, HASH_FIND, NULL); + if (relid_entry) + { + hash_search(relation_cache, &relid_entry->relid, HASH_REMOVE, NULL); + hash_search(relid_cache, &relfilenode, HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); +} + +Oid +get_uncommitted_table_relid(Oid relfilenode) +{ + Oid relid = InvalidOid; + DiskQuotaRelidCacheEntry *entry; + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + entry = hash_search(relid_cache, &relfilenode, HASH_FIND, NULL); + if (entry) + { + relid = entry->relid; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return relid; +} + +static void +add_auxrelid_to_relation_entry(DiskQuotaRelationCacheEntry *entry, Oid relid) +{ + int i; + + for (i = 0; i < entry->auxrel_num; i++) + { + if (entry->auxrel_oid[i] == relid) + { + return; + } + } + entry->auxrel_oid[entry->auxrel_num++] = relid; +} + +static void +update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, DiskQuotaRelidCacheEntry *relid_entry) +{ + Relation rel; + rel = diskquota_relation_open(relid); + + if (rel == NULL) + { + return; + } + + if (relation_entry) + { + relation_entry->relid = relid; + relation_entry->rnode.node = rel->rd_node; + relation_entry->rnode.backend = rel->rd_backend; + relation_entry->owneroid = rel->rd_rel->relowner; + relation_entry->namespaceoid = rel->rd_rel->relnamespace; + relation_entry->relstorage = DiskquotaGetRelstorage(rel->rd_rel); + relation_entry->relam = rel->rd_rel->relam; + } + + if (relid_entry) + { + relid_entry->relfilenode = rel->rd_node.relNode; + relid_entry->relid = relid; + } + + relation_entry->primary_table_relid = relid; + + RelationClose(rel); +} + +void +update_relation_cache(Oid relid) +{ + DiskQuotaRelationCacheEntry relation_entry_data = {0}; + DiskQuotaRelationCacheEntry *relation_entry; + DiskQuotaRelidCacheEntry relid_entry_data = {0}; + DiskQuotaRelidCacheEntry *relid_entry; + Oid prelid; + + update_relation_entry(relid, &relation_entry_data, &relid_entry_data); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); + relation_entry = hash_search(relation_cache, &relation_entry_data.relid, HASH_ENTER, NULL); + memcpy(relation_entry, &relation_entry_data, sizeof(DiskQuotaRelationCacheEntry)); + + relid_entry = hash_search(relid_cache, &relid_entry_data.relfilenode, HASH_ENTER, NULL); + memcpy(relid_entry, &relid_entry_data, sizeof(DiskQuotaRelidCacheEntry)); + LWLockRelease(diskquota_locks.relation_cache_lock); + + prelid = get_primary_table_oid(relid, false); + if (OidIsValid(prelid) && prelid != relid) + { + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); + relation_entry->primary_table_relid = prelid; + relation_entry = hash_search(relation_cache, &prelid, HASH_FIND, NULL); + if (relation_entry) + { + add_auxrelid_to_relation_entry(relation_entry, relid); + } + LWLockRelease(diskquota_locks.relation_cache_lock); + } +} + +static Oid +parse_primary_table_oid(Oid relid, bool on_bgworker) +{ + Relation rel; + Oid namespace; + Oid parsed_oid; + char relname[NAMEDATALEN]; + + /* + * diskquota bgworker should be error tolerant to keep it running in background, + * so we can't throw an error. + * On the other hand, diskquota launcher can throw an error if needed. + */ + if (on_bgworker) + { + if (!get_rel_name_namespace(relid, &namespace, relname)) + { + return InvalidOid; + } + } + else + { + rel = diskquota_relation_open(relid); + + if (rel == NULL) + { + return InvalidOid; + } + namespace = rel->rd_rel->relnamespace; + memcpy(relname, rel->rd_rel->relname.data, NAMEDATALEN); + + RelationClose(rel); + } + + parsed_oid = diskquota_parse_primary_table_oid(namespace, relname); + if (OidIsValid(parsed_oid)) + { + return parsed_oid; + } + return relid; +} + +Oid +get_primary_table_oid(Oid relid, bool on_bgworker) +{ + DiskQuotaRelationCacheEntry *relation_entry; + Oid cached_prelid = relid; + Oid parsed_prelid; + + parsed_prelid = parse_primary_table_oid(relid, on_bgworker); + if (OidIsValid(parsed_prelid)) + { + return parsed_prelid; + } + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + relation_entry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (relation_entry) + { + cached_prelid = relation_entry->primary_table_relid; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return cached_prelid; +} + +void +remove_committed_relation_from_cache(void) +{ + HASH_SEQ_STATUS iter = {0}; + DiskQuotaRelationCacheEntry *entry = NULL; + DiskQuotaRelationCacheEntry *local_entry = NULL; + HTAB *local_relation_cache; + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); + ctl.hcxt = CurrentMemoryContext; + local_relation_cache = + diskquota_hash_create("local relation cache", 1024, &ctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&iter, relation_cache); + while ((entry = hash_seq_search(&iter)) != NULL) + { + /* The session of db1 should not see the table inside db2. */ + if (entry->rnode.node.dbNode != MyDatabaseId) continue; + local_entry = hash_search(local_relation_cache, &entry->relid, HASH_ENTER, NULL); + memcpy(local_entry, entry, sizeof(DiskQuotaRelationCacheEntry)); + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + hash_seq_init(&iter, local_relation_cache); + while ((local_entry = hash_seq_search(&iter)) != NULL) + { + /* + * The committed table's oid can be fetched by RelidByRelfilenode(). + * If the table's relfilenode is modified and its relation_cache_entry + * remains in relation_cache, the outdated relation_cache_entry should + * be removed. + */ + if (OidIsValid(RelidByRelfilenode(local_entry->rnode.node.spcNode, local_entry->rnode.node.relNode))) + { + remove_cache_entry(InvalidOid, local_entry->rnode.node.relNode); + } + } + hash_destroy(local_relation_cache); +} + +Datum +show_relation_cache(PG_FUNCTION_ARGS) +{ + DiskQuotaRelationCacheEntry *entry; + FuncCallContext *funcctx; + struct RelationCacheCtx + { + HASH_SEQ_STATUS iter; + HTAB *relation_cache; + }; + struct RelationCacheCtx *relation_cache_ctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext oldcontext; + HASHCTL hashctl; + HASH_SEQ_STATUS hash_seq; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + tupdesc = DiskquotaCreateTemplateTupleDesc(12); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "RELID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "PRIMARY_TABLE_OID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "AUXREL_NUM", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "OWNEROID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "NAMESPACEOID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "BACKENDID", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "SPCNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)8, "DBNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)9, "RELNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)10, "RELSTORAGE", CHAROID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)11, "AUXREL_OID", OIDARRAYOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)12, "RELAM", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + relation_cache_ctx = (struct RelationCacheCtx *)palloc(sizeof(struct RelationCacheCtx)); + + /* Create a local hash table and fill it with entries from shared memory. */ + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(Oid); + hashctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); + hashctl.hcxt = CurrentMemoryContext; + + relation_cache_ctx->relation_cache = diskquota_hash_create("relation_cache_ctx->relation_cache", 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&hash_seq, relation_cache); + while ((entry = (DiskQuotaRelationCacheEntry *)hash_seq_search(&hash_seq)) != NULL) + { + /* The session of db1 should not see the table inside db2. */ + if (entry->rnode.node.dbNode != MyDatabaseId) continue; + DiskQuotaRelationCacheEntry *local_entry = + hash_search(relation_cache_ctx->relation_cache, &entry->relid, HASH_ENTER_NULL, NULL); + if (local_entry) + { + memcpy(local_entry, entry, sizeof(DiskQuotaRelationCacheEntry)); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + /* Setup first calling context. */ + hash_seq_init(&(relation_cache_ctx->iter), relation_cache_ctx->relation_cache); + funcctx->user_fctx = (void *)relation_cache_ctx; + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + relation_cache_ctx = (struct RelationCacheCtx *)funcctx->user_fctx; + + while ((entry = (DiskQuotaRelationCacheEntry *)hash_seq_search(&(relation_cache_ctx->iter))) != NULL) + { + Datum result; + Datum values[12]; + Datum auxrel_oid[11]; + bool nulls[12]; + HeapTuple tuple; + ArrayType *array; + int i; + + for (i = 0; i < entry->auxrel_num; i++) + { + auxrel_oid[i] = ObjectIdGetDatum(entry->auxrel_oid[i]); + } + array = construct_array(auxrel_oid, entry->auxrel_num, OIDOID, sizeof(Oid), true, 'i'); + + values[0] = ObjectIdGetDatum(entry->relid); + values[1] = ObjectIdGetDatum(entry->primary_table_relid); + values[2] = Int32GetDatum(entry->auxrel_num); + values[3] = ObjectIdGetDatum(entry->owneroid); + values[4] = ObjectIdGetDatum(entry->namespaceoid); + values[5] = Int32GetDatum(entry->rnode.backend); + values[6] = ObjectIdGetDatum(entry->rnode.node.spcNode); + values[7] = ObjectIdGetDatum(entry->rnode.node.dbNode); + values[8] = ObjectIdGetDatum(entry->rnode.node.relNode); + values[9] = CharGetDatum(entry->relstorage); + values[10] = PointerGetDatum(array); + values[11] = ObjectIdGetDatum(entry->relam); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + + SRF_RETURN_DONE(funcctx); +} + +static void +add_auxrelation_to_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *pentry) +{ + List *index_oids; + ListCell *cell; + + add_auxrelid_to_relation_entry(pentry, relid); + + index_oids = diskquota_get_index_list(relid); + foreach (cell, index_oids) + { + Oid idxrelid = lfirst_oid(cell); + add_auxrelid_to_relation_entry(pentry, idxrelid); + } + list_free(index_oids); +} + +/* + * Returns true iff blkdirrelid is missing. + * pg_aoblkdir_xxxx is created by `create index on ao_table`, which can not be + * fetched by diskquota_get_appendonly_aux_oid_list() before index's creation + * finish. By returning true to inform the caller that blkdirrelid is missing, + * then the caller will fetch blkdirrelid by traversing relation_cache. + */ +static bool +get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry *relation_entry) +{ + HeapTuple classTup; + Form_pg_class classForm; + Oid segrelid = InvalidOid; + Oid blkdirrelid = InvalidOid; + Oid visimaprelid = InvalidOid; + bool is_ao = false; + + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); + classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(classTup) || relation_entry == NULL) + { + return false; + } + + classForm = (Form_pg_class)GETSTRUCT(classTup); + + relation_entry->relid = relid; + relation_entry->primary_table_relid = relid; + relation_entry->owneroid = classForm->relowner; + relation_entry->namespaceoid = classForm->relnamespace; + relation_entry->relstorage = DiskquotaGetRelstorage(classForm); + relation_entry->relam = classForm->relam; + relation_entry->rnode.node.spcNode = + OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; + relation_entry->rnode.node.dbNode = MyDatabaseId; + relation_entry->rnode.node.relNode = classForm->relfilenode; + relation_entry->rnode.backend = + classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + + /* toast table */ + if (OidIsValid(classForm->reltoastrelid)) + { + add_auxrelation_to_relation_entry(classForm->reltoastrelid, relation_entry); + } + + heap_freetuple(classTup); + + if (TableIsAoRows(relation_entry->relstorage, relation_entry->relam) || + TableIsAoCols(relation_entry->relstorage, relation_entry->relam)) + { + is_ao = true; + } + + /* ao table */ + if (is_ao) + { + diskquota_get_appendonly_aux_oid_list(relid, &segrelid, &blkdirrelid, &visimaprelid); + if (OidIsValid(segrelid)) + { + add_auxrelation_to_relation_entry(segrelid, relation_entry); + } + if (OidIsValid(blkdirrelid)) + { + add_auxrelation_to_relation_entry(blkdirrelid, relation_entry); + } + if (OidIsValid(visimaprelid)) + { + add_auxrelation_to_relation_entry(visimaprelid, relation_entry); + } + + if (!OidIsValid(blkdirrelid)) + { + return true; + } + } + return false; +} + +static void +get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *entry) +{ + DiskQuotaRelationCacheEntry *tentry; + bool is_missing_relid; + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + tentry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (tentry) + { + memcpy(entry, tentry, sizeof(DiskQuotaRelationCacheEntry)); + LWLockRelease(diskquota_locks.relation_cache_lock); + return; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + is_missing_relid = get_relation_entry_from_pg_class(relid, entry); + + if (is_missing_relid) + { + DiskQuotaRelationCacheEntry *relation_cache_entry; + HASH_SEQ_STATUS iter; + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&iter, relation_cache); + while ((relation_cache_entry = hash_seq_search(&iter)) != NULL) + { + if (relation_cache_entry->primary_table_relid == relid) + { + add_auxrelid_to_relation_entry(entry, relation_cache_entry->relid); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + } +} + +static void +get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage, Oid *relam) +{ + DiskQuotaRelationCacheEntry *relation_cache_entry; + HeapTuple classTup; + Form_pg_class classForm; + + memset(rnode, 0, sizeof(RelFileNodeBackend)); + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); + classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); + if (HeapTupleIsValid(classTup)) + { + classForm = (Form_pg_class)GETSTRUCT(classTup); + rnode->node.spcNode = OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; + rnode->node.dbNode = MyDatabaseId; + rnode->node.relNode = classForm->relfilenode; + rnode->backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + *relstorage = DiskquotaGetRelstorage(classForm); + *relam = classForm->relam; + heap_freetuple(classTup); + remove_cache_entry(relid, InvalidOid); + return; + } + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + relation_cache_entry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (relation_cache_entry) + { + *rnode = relation_cache_entry->rnode; + *relstorage = relation_cache_entry->relstorage; + *relam = relation_cache_entry->relam; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return; +} + +static Size +do_calculate_table_size(DiskQuotaRelationCacheEntry *entry) +{ + Size tablesize = 0; + RelFileNodeBackend rnode; + Oid subrelid; + char relstorage = 0; + Oid relam = InvalidOid; + int i; + + get_relfilenode_by_relid(entry->relid, &rnode, &relstorage, &relam); + tablesize += calculate_relation_size_all_forks(&rnode, relstorage, relam); + + for (i = 0; i < entry->auxrel_num; i++) + { + subrelid = entry->auxrel_oid[i]; + get_relfilenode_by_relid(subrelid, &rnode, &relstorage, &relam); + tablesize += calculate_relation_size_all_forks(&rnode, relstorage, relam); + } + return tablesize; +} + +Size +calculate_table_size(Oid relid) +{ + DiskQuotaRelationCacheEntry entry = {0}; + + get_relation_entry(relid, &entry); + + return do_calculate_table_size(&entry); +} diff --git a/gpcontrib/diskquota/src/relation_cache.h b/gpcontrib/diskquota/src/relation_cache.h new file mode 100644 index 00000000000..f9e14b9d1a8 --- /dev/null +++ b/gpcontrib/diskquota/src/relation_cache.h @@ -0,0 +1,49 @@ +/* ------------------------------------------------------------------------- + * + * relation_cache.h + * + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/relation_cache.h + * + * ------------------------------------------------------------------------- + */ +#ifndef RELATION_CACHE_H +#define RELATION_CACHE_H + +#include "c.h" +#include "utils/hsearch.h" +#include "storage/relfilenode.h" + +typedef struct DiskQuotaRelationCacheEntry +{ + Oid relid; + Oid primary_table_relid; + Oid auxrel_oid[10]; + Oid auxrel_num; + Oid owneroid; + Oid namespaceoid; + char relstorage; + Oid relam; + RelFileNodeBackend rnode; +} DiskQuotaRelationCacheEntry; + +typedef struct DiskQuotaRelidCacheEntry +{ + Oid relfilenode; + Oid relid; +} DiskQuotaRelidCacheEntry; + +extern HTAB *relation_cache; + +extern void init_shm_worker_relation_cache(void); +extern Oid get_relid_by_relfilenode(RelFileNode relfilenode); +extern void remove_cache_entry(Oid relid, Oid relfilenode); +extern Oid get_uncommitted_table_relid(Oid relfilenode); +extern void update_relation_cache(Oid relid); +extern Oid get_primary_table_oid(Oid relid, bool on_bgworker); +extern void remove_committed_relation_from_cache(void); +extern Size calculate_table_size(Oid relid); + +#endif diff --git a/gpcontrib/diskquota/tests/CMakeLists.txt b/gpcontrib/diskquota/tests/CMakeLists.txt new file mode 100644 index 00000000000..72c65f73cbd --- /dev/null +++ b/gpcontrib/diskquota/tests/CMakeLists.txt @@ -0,0 +1,109 @@ +include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) + +list(APPEND isolation2_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected) +list(APPEND regress_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected) +# PLPYTHON_LANG_STR will be replaced by Regress.cmake +set(PLPYTHON_LANG_STR "plpython3u") +set(POSTMASTER_START_CMD "pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w -o \"-c gp_role=dispatch\" start") + +set(exclude_fault_injector OFF) +# GP7 release build doesn't support fault injector. +if (CMAKE_BUILD_TYPE STREQUAL "Release") + message(WARNING "Fault injector test cases will be disabled.") + set(exclude_fault_injector ON) +endif() + +# Check if pg_isolation2_regress is available (either pre-built or can be built from source) +# In binary-only installations, PG_SRC_DIR may not exist or isolation2 may not be buildable +set(ENABLE_ISOLATION2_TESTS OFF) + +# First, check if pg_isolation2_regress is already installed +find_program(PG_ISOLATION2_REGRESS pg_isolation2_regress HINTS ${PG_BIN_DIR}) +if(PG_ISOLATION2_REGRESS) + message(STATUS "Found pg_isolation2_regress: ${PG_ISOLATION2_REGRESS}") + set(ENABLE_ISOLATION2_TESTS ON) +elseif(PG_SRC_DIR_AVAILABLE AND EXISTS "${PG_SRC_DIR}/src/test/isolation2/Makefile") + # Can build from source + message(STATUS "pg_isolation2_regress will be built from source: ${PG_SRC_DIR}/src/test/isolation2") + set(ENABLE_ISOLATION2_TESTS ON) +else() + message(WARNING "pg_isolation2_regress not found and cannot be built from source. Isolation2 tests will be disabled.") +endif() + +RegressTarget_Add(regress + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql + EXPECTED_DIR ${regress_expected_DIR} + RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results + DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule + EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} + REGRESS_OPTS + --load-extension=gp_inject_fault + --load-extension=diskquota_test + --dbname=contrib_regression) + +if(ENABLE_ISOLATION2_TESTS) + RegressTarget_Add(isolation2 + REGRESS_TYPE + isolation2 + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/sql + EXPECTED_DIR ${isolation2_expected_DIR} + RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results + DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule + EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} + REGRESS_OPTS + --load-extension=gp_inject_fault + --dbname=isolation2test) +endif() + +add_custom_target(install_test_extension + COMMAND + cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test.control ${CMAKE_INSTALL_PREFIX}/share/postgresql/extension + COMMAND + cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test--1.0.sql ${CMAKE_INSTALL_PREFIX}/share/postgresql/extension + ) + +add_custom_target(installcheck) +add_dependencies(regress install_test_extension) + +if(ENABLE_ISOLATION2_TESTS) + add_dependencies(isolation2 install_test_extension) + add_dependencies(installcheck isolation2 regress) +else() + add_dependencies(installcheck regress) +endif() + +# Example to run test_truncate infinite times +# RegressTarget_Add(regress_config +# INIT_FILE +# ${CMAKE_CURRENT_SOURCE_DIR}/init_file +# SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql +# EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected +# RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results +# DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data +# REGRESS +# config test_create_extension +# REGRESS_OPTS +# --load-extension=gp_inject_fault +# --dbname=contrib_regression) +# RegressTarget_Add(regress_truncate_loop +# INIT_FILE +# ${CMAKE_CURRENT_SOURCE_DIR}/init_file +# SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql +# EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected +# RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results +# DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data +# REGRESS +# test_truncate +# RUN_TIMES -1 +# REGRESS_OPTS +# --load-extension=gp_inject_fault +# --dbname=contrib_regression +# --use-existing) +# add_dependencies(regress_truncate_loop regress_config) +# add_dependencies(installcheck regress_truncate_loop) diff --git a/gpcontrib/diskquota/tests/data/current_binary_name b/gpcontrib/diskquota/tests/data/current_binary_name new file mode 100755 index 00000000000..2e2b0e7a1d8 --- /dev/null +++ b/gpcontrib/diskquota/tests/data/current_binary_name @@ -0,0 +1,9 @@ +#!/bin/bash + +cd "$(dirname "$0")" || exit 1 + +if grep -q -E '^1.0' ../../VERSION; then + echo -n "diskquota.so" +else + echo -n "diskquota-$(grep -o -E '^[0-9]*.[0-9]*' ../../VERSION).so" +fi diff --git a/gpcontrib/diskquota/tests/init_file b/gpcontrib/diskquota/tests/init_file new file mode 100644 index 00000000000..613ebf85dc0 --- /dev/null +++ b/gpcontrib/diskquota/tests/init_file @@ -0,0 +1,48 @@ +-- This file contains global patterns of messages that should be ignored or +-- masked out, when comparing test results with the expected output. +-- Individual tests can contain additional patterns specific to the test. + +-- start_matchignore +# This pattern is extracted from gpdb/src/test/regress/init_file +m/^(?:HINT|NOTICE):\s+.+\'DISTRIBUTED BY\' clause.*/ +m/WARNING: \[diskquota\] worker not found for database.*/ +m/WARNING: \[diskquota\] database .* not found for getting epoch .*/ +m/^NOTICE: CREATE TABLE will create partition */ +m/^WARNING: skipping .* cannot calculate this foreign table size.*/ +m/^NOTICE: resource queue required -- using default resource queue "pg_default"/ +m/NOTICE: One or more columns in the following table\(s\) do not have statistics: / +m/HINT: For non-partitioned tables, run analyze .+\. For partitioned tables, run analyze rootpartition .+\. See log for columns missing statistics\./ +-- end_matchignore + +-- start_matchsubs +m/diskquota.c:\d+\)/ +s/diskquota.c:\d+\)/diskquota.c:xxx/ +m/diskquota_utility.c:\d+\)/ +s/diskquota_utility.c:\d+\)/diskquota_utility.c:xxx/ +m/^CONTEXT:*/ +s/^CONTEXT:/DETAIL:/ +m/plpython\du/ +s/plpython\du/plpythonu/ + +# Remove segment identifiers from error message. +# E.g., (slice1 XXX.XXX.XXX.XXX:XXXX pid=XXXX) +m/(slice\d+ [0-9.]+:\d+ pid=\d+)/ +s/(slice\d+ [0-9.]+:\d+ pid=\d+)// + +# Remove oid of schema/role/tablespace from error message. +m/ERROR: role's disk space quota exceeded with name: \d+.*/ +s/ERROR: role's disk space quota exceeded with name: \d+.*/[hardlimit] role's disk space quota exceeded/ + +m/ERROR: schema's disk space quota exceeded with name: \d+.*/ +s/ERROR: schema's disk space quota exceeded with name: \d+.*/[hardlimit] schema's disk space quota exceeded/ + +m/ERROR: tablespace: \d+, role: \d+ diskquota exceeded.*/ +s/ERROR: tablespace: \d+, role: \d+ diskquota exceeded.*/[hardlimit] tablespace-role's disk space quota exceeded/ + +m/ERROR: tablespace: \d+, schema: \d+ diskquota exceeded.*/ +s/ERROR: tablespace: \d+, schema: \d+ diskquota exceeded.*/[hardlimit] tablespace-schema's disk space quota exceeded/ + +m/^ERROR: Can not set disk quota for system owner:.*/ +s/^ERROR: Can not set disk quota for system owner:.*/ERROR: Can not set disk quota from system owner:/ + +-- end_matchsubs diff --git a/gpcontrib/diskquota/tests/isolation2/.gitignore b/gpcontrib/diskquota/tests/isolation2/.gitignore new file mode 100644 index 00000000000..dee11c2df0e --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/.gitignore @@ -0,0 +1,2 @@ +sql_isolation_testcase.* +results/* diff --git a/gpcontrib/diskquota/tests/isolation2/expected/config.out b/gpcontrib/diskquota/tests/isolation2/expected/config.out new file mode 100644 index 00000000000..294d8a78c19 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/config.out @@ -0,0 +1,74 @@ +--start_ignore +CREATE DATABASE diskquota; +CREATE +--end_ignore + +!\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); +-- start_ignore +20251211:00:07:53:067251 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c shared_preload_libraries -v diskquota-2.3.so' + +-- end_ignore +(exited with code 0) +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +20251211:00:07:53:067303 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' + +-- end_ignore +(exited with code 0) +!\retcode gpconfig -c max_worker_processes -v 20 --skipvalidation; +-- start_ignore +20251211:00:07:53:067350 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c max_worker_processes -v 20 --skipvalidation' + +-- end_ignore +(exited with code 0) + +!\retcode gpstop -raf; +-- start_ignore +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -raf +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Coordinator segment instance directory=/home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Attempting forceful termination of any leftover coordinator process +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Stopping coordinator standby host cdw mode=fast +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown standby process on cdw +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:07:57:067397 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:07:57:067397 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20251211:00:07:57:067397 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:- Segments stopped successfully = 6 +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:- Segments with errors during stop = 0 +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown 6 of 6 segment instances +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-Database successfully shutdown with no errors reported +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-Restarting System... + +-- end_ignore +(exited with code 0) + +-- Show the values of all GUC variables +--start_ignore +-- naptime cannot be 0 for release build +1: SHOW diskquota.naptime; + diskquota.naptime +------------------- + 0 +(1 row) +--end_ignore +1: SHOW diskquota.max_active_tables; + diskquota.max_active_tables +----------------------------- + 307200 +(1 row) +1: SHOW diskquota.worker_timeout; + diskquota.worker_timeout +-------------------------- + 60 +(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/reset_config.out b/gpcontrib/diskquota/tests/isolation2/expected/reset_config.out new file mode 100644 index 00000000000..045c86d1e10 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/reset_config.out @@ -0,0 +1,23 @@ +!\retcode gpconfig -c diskquota.naptime -v 2; +-- start_ignore +20251211:00:10:07:077993 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 2' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) + +1: SHOW diskquota.naptime; + diskquota.naptime +------------------- + 2 +(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/setup.out b/gpcontrib/diskquota/tests/isolation2/expected/setup.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_create_extension.out b/gpcontrib/diskquota/tests/isolation2/expected/test_create_extension.out new file mode 100644 index 00000000000..211ebd639f6 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_create_extension.out @@ -0,0 +1,15 @@ +CREATE EXTENSION diskquota; +CREATE + +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_drop_extension.out b/gpcontrib/diskquota/tests/isolation2/expected/test_drop_extension.out new file mode 100644 index 00000000000..4a9e4ecb16f --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_drop_extension.out @@ -0,0 +1,12 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +DROP EXTENSION diskquota; +DROP diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_dropped_table.out b/gpcontrib/diskquota/tests/isolation2/expected/test_dropped_table.out new file mode 100644 index 00000000000..5e889f0d8bd --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_dropped_table.out @@ -0,0 +1,116 @@ +-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +-- start_ignore +20251211:00:09:08:074039 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 5 --skipvalidation' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA dropped_schema; +CREATE +1: SET search_path TO dropped_schema; +SET +1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); +CREATE +1: INSERT INTO dropped_table SELECT generate_series(1, 10000); +INSERT 10000 +-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: DROP TABLE dropped_table; +DROP +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -afr +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Coordinator segment instance directory=/home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Attempting forceful termination of any leftover coordinator process +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Stopping coordinator standby host cdw mode=fast +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown standby process on cdw +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:09:31:074151 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:09:31:074151 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20251211:00:09:31:074151 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:- Segments stopped successfully = 6 +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:- Segments with errors during stop = 0 +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown 6 of 6 segment instances +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-Database successfully shutdown with no errors reported +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-Restarting System... + +-- end_ignore +(exited with code 0) + +-- Indicates that there is no dropped table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA dropped_schema CASCADE; +DROP +1q: ... + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +20251211:00:09:45:077146 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_ereport_from_seg.out b/gpcontrib/diskquota/tests/isolation2/expected/test_ereport_from_seg.out new file mode 100644 index 00000000000..776bfac6276 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_ereport_from_seg.out @@ -0,0 +1,62 @@ +CREATE SCHEMA efs1; +CREATE +SELECT diskquota.set_schema_quota('efs1', '1MB'); + set_schema_quota +------------------ + +(1 row) +CREATE TABLE efs1.t(i int); +CREATE + +INSERT INTO efs1.t SELECT generate_series(1, 10000); +INSERT 10000 +-- wait for refresh of diskquota and check the quota size +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + efs1 | 1 | 688128 +(1 row) + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +INSERT INTO efs1.t SELECT generate_series(1, 10000); +INSERT 10000 + +-- wait for refresh of diskquota and check whether the quota size changes +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + efs1 | 1 | 1081344 +(1 row) + +DROP TABLE efs1.t; +DROP +DROP SCHEMA efs1; +DROP + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_fast_quota_view.out b/gpcontrib/diskquota/tests/isolation2/expected/test_fast_quota_view.out new file mode 100644 index 00000000000..22bde74857d --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_fast_quota_view.out @@ -0,0 +1,182 @@ +CREATE SCHEMA s1; +CREATE +CREATE SCHEMA s2; +CREATE + +CREATE ROLE r LOGIN SUPERUSER; +CREATE + +!\retcode mkdir -p /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode mkdir -p /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) + +DROP TABLESPACE IF EXISTS spc1; +DROP +CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; +CREATE +DROP TABLESPACE IF EXISTS spc2; +DROP +CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; +CREATE + +SELECT diskquota.set_schema_quota('s1', '100 MB'); + set_schema_quota +------------------ + +(1 row) +SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +SELECT diskquota.set_role_quota('r', '100 MB'); + set_role_quota +---------------- + +(1 row) +SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +-- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); +CREATE +1: INSERT INTO s1.t SELECT generate_series(1, 100000); +INSERT 100000 + +1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); +CREATE +1: INSERT INTO s2.t SELECT generate_series(1, 100000); +INSERT 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check schema quota view before transaction commits +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +-- login r to test role quota view +1: SET ROLE r; +SET + +-- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); +CREATE +1: INSERT INTO t1 SELECT generate_series(1, 100000); +INSERT 100000 + +1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); +CREATE +1: INSERT INTO t2 SELECT generate_series(1, 100000); +INSERT 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check role quota view before transaction commits +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +DROP TABLE IF EXISTS s1.t; +DROP +DROP TABLE IF EXISTS s2.t; +DROP +DROP TABLE IF EXISTS t1; +DROP +DROP TABLE IF EXISTS t2; +DROP + +DROP SCHEMA IF EXISTS s1; +DROP +DROP SCHEMA IF EXISTS s2; +DROP +DROP ROLE IF EXISTS r; +DROP + +DROP TABLESPACE IF EXISTS spc1; +DROP +DROP TABLESPACE IF EXISTS spc2; +DROP + +!\retcode rm -rf /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode rm -rf /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_per_segment_config.out b/gpcontrib/diskquota/tests/isolation2/expected/test_per_segment_config.out new file mode 100644 index 00000000000..79b4a8ffcdc --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_per_segment_config.out @@ -0,0 +1,269 @@ +-- Test one session read tablespace segratio, +-- and at the same time, another session +-- update or insert the segratio + +-- start_ignore +!\retcode mkdir -p /tmp/spc101; +-- start_ignore + +-- end_ignore +(exited with code 0) +-- end_ignore +CREATE SCHEMA s101; +CREATE +DROP TABLESPACE IF EXISTS spc101; +DROP +CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; +CREATE + +-- +-- There is no tablesapce per segment quota configed yet +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', 1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- +-- There is already a tablesapce per segment quota configed +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', 1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first delete per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', -1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- +(0 rows) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then delete tablespace per segment ratio +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', -1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- +(0 rows) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE +DROP SCHEMA s101; +DROP +DROP TABLESPACE spc101; +DROP diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_postmaster_restart.out b/gpcontrib/diskquota/tests/isolation2/expected/test_postmaster_restart.out new file mode 100644 index 00000000000..53bf3c5526d --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_postmaster_restart.out @@ -0,0 +1,161 @@ +!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA postmaster_restart_s; +CREATE +1: SET search_path TO postmaster_restart_s; +SET + +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); +ERROR: schema's disk space quota exceeded with name: 17623 (seg1 172.17.0.2:7003 pid=77318) +1q: ... + +-- launcher should exist +-- [p]ostgres is to filter out the pgrep itself +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +77001 +77006 +77014 +77017 +77025 +77030 +77043 +77057 +77058 +77060 + +-- end_ignore +(exited with code 0) +-- bgworker should exist +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +77092 + +-- end_ignore +(exited with code 0) + +-- stop postmaster +!\retcode pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w stop; +-- start_ignore +waiting for server to shut down.... done +server stopped + +-- end_ignore +(exited with code 0) + +-- launcher should be terminated +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +77001 +77006 +77014 +77017 +77025 +77030 + +-- end_ignore +(exited with code 0) +-- bgworker should be terminated +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore + +-- end_ignore +(exited with code 1) + +-- start postmaster +-- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 +-- See https://github.com/greenplum-db/gpdb/pull/9396 +!\retcode pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w -o "-c gp_role=dispatch" start; +-- start_ignore +waiting for server to start....2025-12-11 00:09:46.602285 PST,,,p77338,th1267404928,,,,0,,,seg-1,,,,,"LOG","00000","registered custom resource manager ""Pax resource manager"" with ID 199",,,,,,,,"RegisterCustomRmgr","rmgr.c",139, +2025-12-11 00:09:46.661071 PST,,,p77338,th1267404928,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""log"".",,,,,,"SysLogger_Start","syslogger.c",735, + done +server started + +-- end_ignore +(exited with code 0) +-- Hopefully the bgworker can be started in 5 seconds +!\retcode sleep 5; +-- start_ignore + +-- end_ignore +(exited with code 0) + +-- launcher should be restarted +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +77001 +77006 +77014 +77017 +77025 +77030 +77344 +77358 +77359 +77361 + +-- end_ignore +(exited with code 0) +-- bgworker should be restarted +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +77383 + +-- end_ignore +(exited with code 0) + +1: SET search_path TO postmaster_restart_s; +SET +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect fail +1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); +ERROR: schema's disk space quota exceeded with name: 17623 (seg2 172.17.0.2:7004 pid=77412) +-- enlarge the quota limits +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect succeed +1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); +CREATE 1000000 + +1: DROP SCHEMA postmaster_restart_s CASCADE; +DROP +1q: ... +!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_rejectmap.out b/gpcontrib/diskquota/tests/isolation2/expected/test_rejectmap.out new file mode 100644 index 00000000000..bf54d2975d9 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_rejectmap.out @@ -0,0 +1,738 @@ +-- +-- This file contains tests for dispatching rejectmap and canceling +-- queries in smgrextend hook by relation's relfilenode. +-- + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE + +-- 1. Test canceling the extending of an ordinary table. +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t1 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 2. Test canceling the extending of a toast relation. +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t2 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 3. Test canceling the extending of an appendonly relation. +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t3 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 4. Test canceling the extending of an index relation. +CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); +CREATE +CREATE INDEX blocked_t4_index ON blocked_t4(i); +CREATE +INSERT INTO blocked_t4 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t5 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t6 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- Do some clean-ups. +DROP TABLE blocked_t1; +DROP +DROP TABLE blocked_t2; +DROP +DROP TABLE blocked_t3; +DROP +DROP TABLE blocked_t4; +DROP +DROP TABLE blocked_t5; +DROP +DROP TABLE blocked_t6; +DROP + +-- +-- Below are helper functions for testing adding uncommitted relations to rejectmap. +-- +-- start_ignore +CREATE OR REPLACE LANGUAGE plpython3u; +CREATE +-- end_ignore +CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, relowner oid, relnamespace oid, reltablespace oid, relfilenode oid, segid int); +CREATE + +-- This function dumps given relation_cache entries to the given file. +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: row = v['row'] # The composite type results are different between GP6 & GP7 if isinstance(row, dict): r = "{0},{1},{2},{3},{4},{5},{6}".format( row['reloid'], row['relname'], row['relowner'], row['relnamespace'], row['reltablespace'], row['relfilenode'], row['segid']) else: r = row[1:-1] f.write(r + '\n') $$ LANGUAGE plpython3u; +CREATE + +-- This function reads relation_cache entries from the given file. +CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpython3u; +CREATE + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' /*in func*/ AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; +CREATE + +-- This function helps dispatch rejectmap for the given relation to seg0. +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE + +-- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: role's disk space quota exceeded with name: 10 (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 13. Test that we are able to block a toast relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+---------------------------+-----------------+------------ + 0 | 99 | 0 | 10 | pg_toast_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 99 | 0 | 10 | pg_toast_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(3 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_relation_cache.out b/gpcontrib/diskquota/tests/isolation2/expected/test_relation_cache.out new file mode 100644 index 00000000000..df61fdb810f --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_relation_cache.out @@ -0,0 +1,70 @@ +CREATE DATABASE tempdb1; +CREATE +CREATE DATABASE tempdb2; +CREATE + +-- perpare extension +1:@db_name tempdb1: CREATE EXTENSION diskquota; +CREATE +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2:@db_name tempdb2: CREATE EXTENSION diskquota; +CREATE +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- create a table in tempdb1 +1:@db_name tempdb1: BEGIN; +BEGIN +1:@db_name tempdb1: CREATE TABLE t(i int); +CREATE +1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); +INSERT 10000 + +-- query relation_cache in tempdb2 +2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); + count +------- + 0 +(1 row) + +1:@db_name tempdb1: ABORT; +ABORT + +1:@db_name tempdb1: SELECT diskquota.pause(); + pause +------- + +(1 row) +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1:@db_name tempdb1: DROP EXTENSION diskquota; +DROP +2:@db_name tempdb2: SELECT diskquota.pause(); + pause +------- + +(1 row) +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2:@db_name tempdb2: DROP EXTENSION diskquota; +DROP +1q: ... +2q: ... + +DROP DATABASE tempdb1; +DROP +DROP DATABASE tempdb2; +DROP diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_relation_size.out b/gpcontrib/diskquota/tests/isolation2/expected/test_relation_size.out new file mode 100644 index 00000000000..65efe5006ff --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_relation_size.out @@ -0,0 +1,104 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- +-- 1. Test that when a relation is dropped before diskquota.relation_size() +-- applying stat(2) on the physical file, diskquota.relation_size() consumes +-- the error and returns 0. +-- + +CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); +CREATE +-- Insert a small amount of data to 't_dropped'. +INSERT INTO t_dropped SELECT generate_series(1, 100); +INSERT 100 +-- Shows that the size of relfilenode is not zero. +SELECT diskquota.relation_size('t_dropped'); + relation_size +--------------- + 98304 +(1 row) + +-- Inject 'suspension' to servers. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) + +-- Session 1 will hang before applying stat(2) to the physical file. +1&: SELECT diskquota.relation_size('t_dropped'); +-- Wait until the fault is triggered to avoid the following race condition: +-- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" +-- and the query will fail with 'ERROR: relation "t_dropped" does not exist' +SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) +-- Drop the table. +DROP TABLE t_dropped; +DROP +-- Remove the injected 'suspension'. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +-- Session 1 will continue and returns 0. +1<: <... completed> + relation_size +--------------- + 0 +(1 row) + +-- 2. Test whether relation size is correct under concurrent writes for AO tables. +-- Since no row is deleted, diskquota.relation_size() should be equal to +-- pg_relation_size(). + +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE +1: BEGIN; +BEGIN +1: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 10000 +2: BEGIN; +BEGIN +2: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 10000 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +SELECT diskquota.relation_size('t_ao'); + relation_size +--------------- + 200400 +(1 row) +SELECT pg_relation_size('t_ao'); + pg_relation_size +------------------ + 200400 +(1 row) +DROP TABLE t_ao; +DROP + +SELECT diskquota.resume(); + resume +-------- + +(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_temporary_table.out b/gpcontrib/diskquota/tests/isolation2/expected/test_temporary_table.out new file mode 100644 index 00000000000..8d84ba1c5d5 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_temporary_table.out @@ -0,0 +1,114 @@ +-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +-- start_ignore +20251211:00:08:29:070839 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 5 --skipvalidation' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA temporary_schema; +CREATE +1: SET search_path TO temporary_schema; +SET +1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); +CREATE +1: INSERT INTO temporary_table SELECT generate_series(1, 10000); +INSERT 10000 +-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -afr +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Coordinator segment instance directory=/home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Attempting forceful termination of any leftover coordinator process +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Stopping coordinator standby host cdw mode=fast +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown standby process on cdw +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:08:53:070951 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:08:53:070951 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20251211:00:08:53:070951 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:- Segments stopped successfully = 6 +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:- Segments with errors during stop = 0 +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown 6 of 6 segment instances +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-Database successfully shutdown with no errors reported +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-Restarting System... + +-- end_ignore +(exited with code 0) + +-- Indicates that there is no temporary table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA temporary_schema CASCADE; +DROP +1q: ... + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +20251211:00:09:07:073948 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_truncate.out b/gpcontrib/diskquota/tests/isolation2/expected/test_truncate.out new file mode 100644 index 00000000000..4964f6ec177 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_truncate.out @@ -0,0 +1,86 @@ +-- Test various race conditions for TRUNCATE. + +-- Case 1: Pulling active table before swapping relfilenode +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) + +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: TRUNCATE dummy_t1; +SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +TRUNCATE + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_vacuum.out b/gpcontrib/diskquota/tests/isolation2/expected/test_vacuum.out new file mode 100644 index 00000000000..eb43793236e --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_vacuum.out @@ -0,0 +1,99 @@ +-- This file tests various race conditions when performing 'VACUUM FULL'. + +-- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized +-- as the following 3 steps: +-- s1) create a new temporary relation (smgrcreate hook will be triggered, newly +-- created relfilenode will be put into shmem). +-- s2) insert data into the newly created relation from the old relation (smgrextend +-- hook will be triggered, newly created relfilenode will be put into shmem). +-- s3) change the old relation's relfilenode to the newly created one. +-- Consider the following situation: +-- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, +-- the newly created relfilenode is translated to the newly created temporary relation's oid, +-- the old relation's size cannot be updated. We resolve it by making altered relations' oids +-- constantly active so that the diskquota bgworker keeps updating the altered relation size +-- during 'VACUUM FULL'. +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 1000 +DELETE FROM dummy_t1; +DELETE 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: VACUUM FULL dummy_t1; +SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +VACUUM + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_worker_timeout.out b/gpcontrib/diskquota/tests/isolation2/expected/test_worker_timeout.out new file mode 100644 index 00000000000..9f62c0d9ffc --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_worker_timeout.out @@ -0,0 +1,64 @@ +!\retcode gpconfig -c diskquota.worker_timeout -v 1; +-- start_ignore +20251211:00:09:53:077489 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.worker_timeout -v 1' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1&: SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT pg_sleep(2 * current_setting('diskquota.worker_timeout')::int); + pg_sleep +---------- + +(1 row) + +SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query = 'SELECT diskquota.wait_for_worker_new_epoch();'; + pg_cancel_backend +------------------- + t +(1 row) + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'resume', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1<: <... completed> +ERROR: canceling statement due to user request + +!\retcode gpconfig -r diskquota.worker_timeout; +-- start_ignore +20251211:00:09:56:077570 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-r diskquota.worker_timeout' + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore +(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/isolation2_schedule b/gpcontrib/diskquota/tests/isolation2/isolation2_schedule new file mode 100644 index 00000000000..5ed558d693a --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/isolation2_schedule @@ -0,0 +1,16 @@ +test: config +test: test_create_extension +test: test_fast_quota_view +test: test_relation_size +test: test_rejectmap +test: test_vacuum +test: test_truncate +test: test_temporary_table +test: test_dropped_table +test: test_postmaster_restart +test: test_worker_timeout +test: test_per_segment_config +test: test_relation_cache +test: test_ereport_from_seg +test: test_drop_extension +test: reset_config diff --git a/gpcontrib/diskquota/tests/isolation2/sql/config.sql b/gpcontrib/diskquota/tests/isolation2/sql/config.sql new file mode 100644 index 00000000000..855ad7e531d --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/config.sql @@ -0,0 +1,17 @@ +--start_ignore +CREATE DATABASE diskquota; +--end_ignore + +!\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +!\retcode gpconfig -c max_worker_processes -v 20 --skipvalidation; + +!\retcode gpstop -raf; + +-- Show the values of all GUC variables +--start_ignore +-- naptime cannot be 0 for release build +1: SHOW diskquota.naptime; +--end_ignore +1: SHOW diskquota.max_active_tables; +1: SHOW diskquota.worker_timeout; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/reset_config.sql b/gpcontrib/diskquota/tests/isolation2/sql/reset_config.sql new file mode 100644 index 00000000000..129fe7b95b4 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/reset_config.sql @@ -0,0 +1,4 @@ +!\retcode gpconfig -c diskquota.naptime -v 2; +!\retcode gpstop -u; + +1: SHOW diskquota.naptime; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/setup.sql b/gpcontrib/diskquota/tests/isolation2/sql/setup.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_create_extension.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_create_extension.sql new file mode 100644 index 00000000000..1cc9c9cb940 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_create_extension.sql @@ -0,0 +1,6 @@ +CREATE EXTENSION diskquota; + +SELECT diskquota.init_table_size_table(); + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_drop_extension.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_drop_extension.sql new file mode 100644 index 00000000000..09f5b11fa7a --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_drop_extension.sql @@ -0,0 +1,3 @@ +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_dropped_table.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_dropped_table.sql new file mode 100644 index 00000000000..56652a79043 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_dropped_table.sql @@ -0,0 +1,29 @@ +-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +!\retcode gpstop -u; + +1: CREATE SCHEMA dropped_schema; +1: SET search_path TO dropped_schema; +1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); +1: INSERT INTO dropped_table SELECT generate_series(1, 10000); +-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: DROP TABLE dropped_table; +1q: + +-- Restart cluster fastly +!\retcode gpstop -afr; + +-- Indicates that there is no dropped table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; +1: DROP SCHEMA dropped_schema CASCADE; +1q: + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +!\retcode gpstop -u; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_ereport_from_seg.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_ereport_from_seg.sql new file mode 100644 index 00000000000..79cd25b2956 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_ereport_from_seg.sql @@ -0,0 +1,26 @@ +CREATE SCHEMA efs1; +SELECT diskquota.set_schema_quota('efs1', '1MB'); +CREATE TABLE efs1.t(i int); + +INSERT INTO efs1.t SELECT generate_series(1, 10000); +-- wait for refresh of diskquota and check the quota size +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'skip', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO efs1.t SELECT generate_series(1, 10000); + +-- wait for refresh of diskquota and check whether the quota size changes +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + +DROP TABLE efs1.t; +DROP SCHEMA efs1; + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_fast_quota_view.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_fast_quota_view.sql new file mode 100644 index 00000000000..24ff1f5fd74 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_fast_quota_view.sql @@ -0,0 +1,73 @@ +CREATE SCHEMA s1; +CREATE SCHEMA s2; + +CREATE ROLE r LOGIN SUPERUSER; + +!\retcode mkdir -p /tmp/spc1; +!\retcode mkdir -p /tmp/spc2; + +DROP TABLESPACE IF EXISTS spc1; +CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; +DROP TABLESPACE IF EXISTS spc2; +CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; + +SELECT diskquota.set_schema_quota('s1', '100 MB'); +SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); +SELECT diskquota.set_role_quota('r', '100 MB'); +SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); + +-- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view +1: BEGIN; +1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); +1: INSERT INTO s1.t SELECT generate_series(1, 100000); + +1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); +1: INSERT INTO s2.t SELECT generate_series(1, 100000); + +1: SELECT diskquota.wait_for_worker_new_epoch(); + +-- check schema quota view before transaction commits +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + +1: COMMIT; +2: SELECT diskquota.wait_for_worker_new_epoch(); +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + +-- login r to test role quota view +1: SET ROLE r; + +-- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view +1: BEGIN; +1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); +1: INSERT INTO t1 SELECT generate_series(1, 100000); + +1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); +1: INSERT INTO t2 SELECT generate_series(1, 100000); + +1: SELECT diskquota.wait_for_worker_new_epoch(); + +-- check role quota view before transaction commits +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + +1: COMMIT; +2: SELECT diskquota.wait_for_worker_new_epoch(); +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + +DROP TABLE IF EXISTS s1.t; +DROP TABLE IF EXISTS s2.t; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +DROP SCHEMA IF EXISTS s1; +DROP SCHEMA IF EXISTS s2; +DROP ROLE IF EXISTS r; + +DROP TABLESPACE IF EXISTS spc1; +DROP TABLESPACE IF EXISTS spc2; + +!\retcode rm -rf /tmp/spc1; +!\retcode rm -rf /tmp/spc2; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_per_segment_config.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_per_segment_config.sql new file mode 100644 index 00000000000..7592ffc00a2 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_per_segment_config.sql @@ -0,0 +1,120 @@ +-- Test one session read tablespace segratio, +-- and at the same time, another session +-- update or insert the segratio + +-- start_ignore +!\retcode mkdir -p /tmp/spc101; +-- end_ignore +CREATE SCHEMA s101; +DROP TABLESPACE IF EXISTS spc101; +CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; + +-- +-- There is no tablesapce per segment quota configed yet +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +1: BEGIN; +1: SELECT diskquota.set_per_segment_quota('spc101', 1); +2: BEGIN; +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +1: BEGIN; +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +2: BEGIN; +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- +-- There is already a tablesapce per segment quota configed +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_per_segment_quota('spc101', 1); +2: BEGIN; +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +2: BEGIN; +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first delete per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_per_segment_quota('spc101', -1); +2: BEGIN; +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first set_schema_tablespace_quota, then delete tablespace per segment ratio +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +2: BEGIN; +2&: SELECT diskquota.set_per_segment_quota('spc101', -1); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; +DROP SCHEMA s101; +DROP TABLESPACE spc101; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_postmaster_restart.in.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_postmaster_restart.in.sql new file mode 100644 index 00000000000..4c5f65a20c6 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_postmaster_restart.in.sql @@ -0,0 +1,52 @@ +!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; +!\retcode gpstop -u > /dev/null; + +1: CREATE SCHEMA postmaster_restart_s; +1: SET search_path TO postmaster_restart_s; + +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect fail +1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); +1q: + +-- launcher should exist +-- [p]ostgres is to filter out the pgrep itself +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- bgworker should exist +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; + +-- stop postmaster +!\retcode pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w stop; + +-- launcher should be terminated +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- bgworker should be terminated +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; + +-- start postmaster +-- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 +-- See https://github.com/greenplum-db/gpdb/pull/9396 +!\retcode @POSTMASTER_START_CMD@; +-- Hopefully the bgworker can be started in 5 seconds +!\retcode sleep 5; + +-- launcher should be restarted +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- bgworker should be restarted +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; + +1: SET search_path TO postmaster_restart_s; +1: SELECT diskquota.wait_for_worker_new_epoch(); +-- expect fail +1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); +-- enlarge the quota limits +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +-- expect succeed +1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); + +1: DROP SCHEMA postmaster_restart_s CASCADE; +1q: +!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_rejectmap.in.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_rejectmap.in.sql new file mode 100644 index 00000000000..3ad115f12c4 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_rejectmap.in.sql @@ -0,0 +1,575 @@ +-- +-- This file contains tests for dispatching rejectmap and canceling +-- queries in smgrextend hook by relation's relfilenode. +-- + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ + RETURNS oid AS /*in func*/ +$$ /*in func*/ +BEGIN /*in func*/ + /*in func*/ + CASE /*in func*/ + WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ + ELSE RETURN ( /*in func*/ + CASE tablespaceoid /*in func*/ + WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ + ELSE /*in func*/ + tablespaceoid /*in func*/ + END /*in func*/ + ); /*in func*/ + END CASE; /*in func*/ +END; /*in func*/ +$$ LANGUAGE plpgsql; /*in func*/ + +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) + RETURNS void AS $$ /*in func*/ + DECLARE /*in func*/ + bt int; /*in func*/ + targetoid oid; /*in func*/ + BEGIN /*in func*/ + CASE block_type /*in func*/ + WHEN 'NAMESPACE' THEN /*in func*/ + bt = 0; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'ROLE' THEN /*in func*/ + bt = 1; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ + bt = 2; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'ROLE_TABLESPACE' THEN /*in func*/ + bt = 3; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + END CASE; /*in func*/ + PERFORM diskquota.refresh_rejectmap( /*in func*/ + ARRAY[ /*in func*/ + ROW (targetoid, /*in func*/ + (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ + (SELECT get_real_tablespace_oid( /*in func*/ + block_type, /*in func*/ + (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ + )), /*in func*/ + bt, /*in func*/ + segexceeded) /*in func*/ + ]::diskquota.rejectmap_entry[], /*in func*/ + ARRAY[rel]::oid[]) /*in func*/ + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ + END; $$ /*in func*/ +LANGUAGE 'plpgsql'; + +-- 1. Test canceling the extending of an ordinary table. +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +INSERT INTO blocked_t1 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 2. Test canceling the extending of a toast relation. +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +INSERT INTO blocked_t2 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 3. Test canceling the extending of an appendonly relation. +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO blocked_t3 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 4. Test canceling the extending of an index relation. +CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); +CREATE INDEX blocked_t4_index ON blocked_t4(i); +INSERT INTO blocked_t4 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); +INSERT INTO blocked_t5 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); +INSERT INTO blocked_t6 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- Do some clean-ups. +DROP TABLE blocked_t1; +DROP TABLE blocked_t2; +DROP TABLE blocked_t3; +DROP TABLE blocked_t4; +DROP TABLE blocked_t5; +DROP TABLE blocked_t6; + +-- +-- Below are helper functions for testing adding uncommitted relations to rejectmap. +-- +-- start_ignore +CREATE OR REPLACE LANGUAGE @PLPYTHON_LANG_STR@; +-- end_ignore +CREATE TYPE cached_relation_entry AS ( + reloid oid, + relname text, + relowner oid, + relnamespace oid, + reltablespace oid, + relfilenode oid, + segid int); + +-- This function dumps given relation_cache entries to the given file. +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) + RETURNS void +AS $$ + rv = plpy.execute(""" + SELECT (oid, relname, relowner, + relnamespace, reltablespace, + relfilenode, gp_segment_id)::cached_relation_entry + FROM gp_dist_random('pg_class') + """) + with open(filename, 'wt') as f: + for v in rv: + row = v['row'] + # The composite type results are different between GP6 & GP7 + if isinstance(row, dict): + r = "{0},{1},{2},{3},{4},{5},{6}".format( + row['reloid'], row['relname'], row['relowner'], + row['relnamespace'], row['reltablespace'], + row['relfilenode'], row['segid']) + else: + r = row[1:-1] + f.write(r + '\n') +$$ LANGUAGE @PLPYTHON_LANG_STR@; + +-- This function reads relation_cache entries from the given file. +CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) + RETURNS SETOF cached_relation_entry +AS $$ + with open(filename) as f: + for l in f: + r = l.split(',') + yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) +$$ LANGUAGE @PLPYTHON_LANG_STR@; + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) + RETURNS text AS $$ /*in func*/ + BEGIN /*in func*/ + RETURN COALESCE( /*in func*/ + REGEXP_REPLACE(given_name, /*in func*/ + '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ + '\1' || /*in func*/ + (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' /*in func*/ + AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); /*in func*/ + END; /*in func*/ +$$ LANGUAGE plpgsql; + +-- This function helps dispatch rejectmap for the given relation to seg0. +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) + RETURNS void AS $$ /*in func*/ + DECLARE /*in func*/ + bt int; /*in func*/ + targetoid oid; /*in func*/ + BEGIN /*in func*/ + CASE block_type /*in func*/ + WHEN 'NAMESPACE' THEN /*in func*/ + bt = 0; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'ROLE' THEN /*in func*/ + bt = 1; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ + bt = 2; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'ROLE_TABLESPACE' THEN /*in func*/ + bt = 3; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + END CASE; /*in func*/ + PERFORM diskquota.refresh_rejectmap( /*in func*/ + ARRAY[ /*in func*/ + ROW (targetoid, /*in func*/ + (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ + (SELECT get_real_tablespace_oid( /*in func*/ + block_type, /*in func*/ + (SELECT reltablespace /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname = rel::text /*in func*/ + AND segid = 0) /*in func*/ + )), /*in func*/ + bt, /*in func*/ + segexceeded) /*in func*/ + ]::diskquota.rejectmap_entry[], /*in func*/ + ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ + END; $$ /*in func*/ +LANGUAGE 'plpgsql'; + +-- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 13. Test that we are able to block a toast relation on seg0 by its namespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, + replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 + ORDER BY relname DESC; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, + replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 + ORDER BY relname DESC; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, + replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 + ORDER BY relname DESC; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_relation_cache.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_relation_cache.sql new file mode 100644 index 00000000000..941e4c7614c --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_relation_cache.sql @@ -0,0 +1,30 @@ +CREATE DATABASE tempdb1; +CREATE DATABASE tempdb2; + +-- perpare extension +1:@db_name tempdb1: CREATE EXTENSION diskquota; +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); +2:@db_name tempdb2: CREATE EXTENSION diskquota; +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + +-- create a table in tempdb1 +1:@db_name tempdb1: BEGIN; +1:@db_name tempdb1: CREATE TABLE t(i int); +1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); + +-- query relation_cache in tempdb2 +2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); + +1:@db_name tempdb1: ABORT; + +1:@db_name tempdb1: SELECT diskquota.pause(); +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); +1:@db_name tempdb1: DROP EXTENSION diskquota; +2:@db_name tempdb2: SELECT diskquota.pause(); +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); +2:@db_name tempdb2: DROP EXTENSION diskquota; +1q: +2q: + +DROP DATABASE tempdb1; +DROP DATABASE tempdb2; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_relation_size.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_relation_size.sql new file mode 100644 index 00000000000..54ea209d5c0 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_relation_size.sql @@ -0,0 +1,50 @@ +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- +-- 1. Test that when a relation is dropped before diskquota.relation_size() +-- applying stat(2) on the physical file, diskquota.relation_size() consumes +-- the error and returns 0. +-- + +CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); +-- Insert a small amount of data to 't_dropped'. +INSERT INTO t_dropped SELECT generate_series(1, 100); +-- Shows that the size of relfilenode is not zero. +SELECT diskquota.relation_size('t_dropped'); + +-- Inject 'suspension' to servers. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content>=0; + +-- Session 1 will hang before applying stat(2) to the physical file. +1&: SELECT diskquota.relation_size('t_dropped'); +-- Wait until the fault is triggered to avoid the following race condition: +-- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" +-- and the query will fail with 'ERROR: relation "t_dropped" does not exist' +SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content>=0; +-- Drop the table. +DROP TABLE t_dropped; +-- Remove the injected 'suspension'. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content>=0; +-- Session 1 will continue and returns 0. +1<: + +-- 2. Test whether relation size is correct under concurrent writes for AO tables. +-- Since no row is deleted, diskquota.relation_size() should be equal to +-- pg_relation_size(). + +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +1: BEGIN; +1: INSERT INTO t_ao SELECT generate_series(1, 10000); +2: BEGIN; +2: INSERT INTO t_ao SELECT generate_series(1, 10000); +1: COMMIT; +2: COMMIT; +SELECT diskquota.relation_size('t_ao'); +SELECT pg_relation_size('t_ao'); +DROP TABLE t_ao; + +SELECT diskquota.resume(); diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_temporary_table.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_temporary_table.sql new file mode 100644 index 00000000000..381731791b0 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_temporary_table.sql @@ -0,0 +1,28 @@ +-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +!\retcode gpstop -u; + +1: CREATE SCHEMA temporary_schema; +1: SET search_path TO temporary_schema; +1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); +1: INSERT INTO temporary_table SELECT generate_series(1, 10000); +-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); +1q: + +-- Restart cluster fastly +!\retcode gpstop -afr; + +-- Indicates that there is no temporary table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; +1: DROP SCHEMA temporary_schema CASCADE; +1q: + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +!\retcode gpstop -u; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_truncate.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_truncate.sql new file mode 100644 index 00000000000..538b6318209 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_truncate.sql @@ -0,0 +1,31 @@ +-- Test various race conditions for TRUNCATE. + +-- Case 1: Pulling active table before swapping relfilenode +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; + +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +SELECT diskquota.wait_for_worker_new_epoch(); +1&: TRUNCATE dummy_t1; +SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +1<: + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; +DROP TABLE dummy_t1; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_vacuum.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_vacuum.sql new file mode 100644 index 00000000000..4125ac5f055 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_vacuum.sql @@ -0,0 +1,43 @@ +-- This file tests various race conditions when performing 'VACUUM FULL'. + +-- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized +-- as the following 3 steps: +-- s1) create a new temporary relation (smgrcreate hook will be triggered, newly +-- created relfilenode will be put into shmem). +-- s2) insert data into the newly created relation from the old relation (smgrextend +-- hook will be triggered, newly created relfilenode will be put into shmem). +-- s3) change the old relation's relfilenode to the newly created one. +-- Consider the following situation: +-- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, +-- the newly created relfilenode is translated to the newly created temporary relation's oid, +-- the old relation's size cannot be updated. We resolve it by making altered relations' oids +-- constantly active so that the diskquota bgworker keeps updating the altered relation size +-- during 'VACUUM FULL'. +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +DELETE FROM dummy_t1; +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; +SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +SELECT diskquota.wait_for_worker_new_epoch(); +1&: VACUUM FULL dummy_t1; +SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +1<: + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; +DROP TABLE dummy_t1; diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_worker_timeout.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_worker_timeout.sql new file mode 100644 index 00000000000..630cd7f88f2 --- /dev/null +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_worker_timeout.sql @@ -0,0 +1,20 @@ +!\retcode gpconfig -c diskquota.worker_timeout -v 1; +!\retcode gpstop -u; + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=-1; + +1&: SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT pg_sleep(2 * current_setting('diskquota.worker_timeout')::int); + +SELECT pg_cancel_backend(pid) FROM pg_stat_activity +WHERE query = 'SELECT diskquota.wait_for_worker_new_epoch();'; + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'resume', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=-1; + +1<: + +!\retcode gpconfig -r diskquota.worker_timeout; +!\retcode gpstop -u; diff --git a/gpcontrib/diskquota/tests/regress/.gitignore b/gpcontrib/diskquota/tests/regress/.gitignore new file mode 100644 index 00000000000..484ab7e5c61 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/.gitignore @@ -0,0 +1 @@ +results/* diff --git a/gpcontrib/diskquota/tests/regress/diskquota_schedule b/gpcontrib/diskquota/tests/regress/diskquota_schedule new file mode 100644 index 00000000000..825600636bd --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/diskquota_schedule @@ -0,0 +1,47 @@ +test: config +test: test_create_extension +test: test_max_monitored_databases +test: test_readiness_logged +test: test_init_table_size_table +test: test_relation_size +test: test_relation_cache +test: test_uncommitted_table_size +test: test_pause_and_resume +test: test_pause_and_resume_multiple_db +test: test_drop_after_pause +test: test_show_status +test: test_quota_view_no_table +# disable this test due to GPDB behavior change +# test: test_table_size +test: test_fast_disk_check +test: test_worker_not_ready +#test: test_insert_after_drop +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index test_recreate +test: test_ctas_no_preload_lib +test: test_ctas_before_set_quota +test: test_truncate +test: test_delete_quota +test: test_partition +test: test_vacuum +test: test_primary_failure +test: test_extension +test: test_activetable_limit +test: test_many_active_tables +test: test_fetch_table_stat +test: test_appendonly +test: test_rejectmap +test: test_clean_rejectmap_after_drop +test: test_rejectmap_mul_db +test: test_ctas_pause +test: test_ctas_role +test: test_ctas_schema +test: test_ctas_tablespace_role +test: test_ctas_tablespace_schema +test: test_default_tablespace +test: test_tablespace_diff_schema +# test: test_worker_schedule +# test: test_worker_schedule_exception +test: test_dbname_encoding +test: test_drop_any_extension +test: test_drop_extension +test: reset_config diff --git a/gpcontrib/diskquota/tests/regress/expected/config.out b/gpcontrib/diskquota/tests/regress/expected/config.out new file mode 100644 index 00000000000..afeaa6b1d03 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/config.out @@ -0,0 +1,28 @@ +\c +-- Show the values of all GUC variables +-- start_ignore +-- naptime cannot be 0 on Release build, so it will be 2 +SHOW diskquota.naptime; + diskquota.naptime +------------------- + 0 +(1 row) +-- end_ignore +SHOW diskquota.max_active_tables; + diskquota.max_active_tables +----------------------------- + 307200 +(1 row) + +SHOW diskquota.worker_timeout; + diskquota.worker_timeout +-------------------------- + 60 +(1 row) + +SHOW diskquota.hard_limit; + diskquota.hard_limit +---------------------- + off +(1 row) + diff --git a/gpcontrib/diskquota/tests/regress/expected/dummy.out b/gpcontrib/diskquota/tests/regress/expected/dummy.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/gpcontrib/diskquota/tests/regress/expected/reset_config.out b/gpcontrib/diskquota/tests/regress/expected/reset_config.out new file mode 100644 index 00000000000..c65092e54b4 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/reset_config.out @@ -0,0 +1,6 @@ +SHOW diskquota.naptime; + diskquota.naptime +------------------- + 2 +(1 row) + diff --git a/gpcontrib/diskquota/tests/regress/expected/test_activetable_limit.out b/gpcontrib/diskquota/tests/regress/expected/test_activetable_limit.out new file mode 100644 index 00000000000..c556f32bb38 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_activetable_limit.out @@ -0,0 +1,56 @@ +-- table in 'diskquota not enabled database' should not be activetable +\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +\! gpstop -arf > /dev/null +\c +CREATE DATABASE test_tablenum_limit_01; +CREATE DATABASE test_tablenum_limit_02; +\c test_tablenum_limit_01 +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); +\c test_tablenum_limit_02 +CREATE EXTENSION diskquota; +CREATE SCHEMA s; +SELECT diskquota.set_schema_quota('s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -- activetable = 1 +INSERT INTO s.t1 SELECT generate_series(1, 100000); -- ok. diskquota soft limit does not check when first write +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -- activetable = 2 +INSERT INTO s.t2 SELECT generate_series(1, 10); -- expect failed +ERROR: schema's disk space quota exceeded with name: s +CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -- activetable = 3 should not crash. +INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed +ERROR: schema's disk space quota exceeded with name: s +-- Q: why diskquota still works when activetable = 3? +-- A: the activetable limit by shmem size, calculate by hash_estimate_size() +-- the result will bigger than sizeof(DiskQuotaActiveTableEntry) * max_active_tables +-- the real capacity of this data structure based on the hash conflict probability. +-- so we can not predict when the data structure will be fill in fully. +-- +-- this test case is useless, remove this if anyone dislike it. +-- but the hash capacity is smaller than 6, so the test case works for issue 51 +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_tablenum_limit_01; +DROP DATABASE test_tablenum_limit_02; +\! gpconfig -r diskquota.max_active_tables > /dev/null +\! gpstop -arf > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/expected/test_appendonly.out b/gpcontrib/diskquota/tests/regress/expected/test_appendonly.out new file mode 100644 index 00000000000..cfa19a46114 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_appendonly.out @@ -0,0 +1,78 @@ +-- Create new schema for running tests. +CREATE SCHEMA s_appendonly; +SET search_path TO s_appendonly; +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +-- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. +CREATE INDEX index_t ON t_ao(i); +CREATE INDEX index_t2 ON t_aoco(i); +-- 1. Show that the relation's size in diskquota.table_size +-- is identical to the result of pg_table_size(). +INSERT INTO t_ao SELECT generate_series(1, 100); +INSERT INTO t_aoco SELECT generate_series(1, 100); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Query the size of t_ao. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_ao') and segid=-1; + tableid | size +---------+-------- + t_ao | 558168 +(1 row) + +SELECT pg_table_size('t_ao'); + pg_table_size +--------------- + 558168 +(1 row) + +-- Query the size of t_aoco. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_aoco') and segid=-1; + tableid | size +---------+-------- + t_aoco | 557584 +(1 row) + +SELECT pg_table_size('t_aoco'); + pg_table_size +--------------- + 557584 +(1 row) + +-- 2. Test that we are able to perform quota limit on appendonly tables. +SELECT diskquota.set_schema_quota('s_appendonly', '2 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect success. +INSERT INTO t_ao SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail. +INSERT INTO t_ao SELECT generate_series(1, 10); +ERROR: schema's disk space quota exceeded with name: s_appendonly +INSERT INTO t_aoco SELECT generate_series(1, 10); +ERROR: schema's disk space quota exceeded with name: s_appendonly +DROP TABLE t_ao; +DROP TABLE t_aoco; +SET search_path TO DEFAULT; +DROP SCHEMA s_appendonly; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_clean_rejectmap_after_drop.out b/gpcontrib/diskquota/tests/regress/expected/test_clean_rejectmap_after_drop.out new file mode 100644 index 00000000000..4da3507cd1d --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_clean_rejectmap_after_drop.out @@ -0,0 +1,41 @@ +CREATE DATABASE test_clean_rejectmap_after_drop; +\c test_clean_rejectmap_after_drop +CREATE EXTENSION diskquota; +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +CREATE ROLE r; +SELECT diskquota.set_role_quota('r', '1MB'); + set_role_quota +---------------- + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO r; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: role's disk space quota exceeded with name: 34523 (seg0 127.0.0.1:6002 pid=23690) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +INSERT INTO b SELECT generate_series(1, 100); -- ok +\c contrib_regression +DROP DATABASE test_clean_rejectmap_after_drop; +DROP ROLE r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/expected/test_column.out b/gpcontrib/diskquota/tests/regress/expected/test_column.out new file mode 100644 index 00000000000..61f79ca5458 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_column.out @@ -0,0 +1,44 @@ +-- Test alter table add column +CREATE SCHEMA scolumn; +SELECT diskquota.set_schema_quota('scolumn', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO scolumn; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect fail +INSERT INTO a2 SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: scolumn +ALTER TABLE a2 ADD COLUMN j VARCHAR(50); +UPDATE a2 SET j = 'add value for column j'; +ERROR: schema's disk space quota exceeded with name: scolumn +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert failed after add column +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: scolumn +DROP TABLE a2; +RESET search_path; +DROP SCHEMA scolumn; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_copy.out b/gpcontrib/diskquota/tests/regress/expected/test_copy.out new file mode 100644 index 00000000000..86799232a99 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_copy.out @@ -0,0 +1,28 @@ +-- Test copy +CREATE SCHEMA s3; +SELECT diskquota.set_schema_quota('s3', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s3; +\! seq 100 > /tmp/csmall.txt +CREATE TABLE c (i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +COPY c FROM '/tmp/csmall.txt'; +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect copy fail +COPY c FROM '/tmp/csmall.txt'; +ERROR: schema's disk space quota exceeded with name: s3 +DROP TABLE c; +RESET search_path; +DROP SCHEMA s3; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_create_extension.out b/gpcontrib/diskquota/tests/regress/expected/test_create_extension.out new file mode 100644 index 00000000000..a90178ce350 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_create_extension.out @@ -0,0 +1,14 @@ +CREATE EXTENSION diskquota; +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_before_set_quota.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_before_set_quota.out new file mode 100644 index 00000000000..ac69b2b5226 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_before_set_quota.out @@ -0,0 +1,61 @@ +CREATE ROLE test SUPERUSER; +SET ROLE test; +CREATE TABLE t_before_set_quota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_before_set_quota'::regclass ORDER BY segid; + tableid | size | segid +--------------------+---------+------- + t_before_set_quota | 3637248 | -1 + t_before_set_quota | 1212416 | 0 + t_before_set_quota | 1212416 | 1 + t_before_set_quota | 1212416 | 2 +(4 rows) + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +SELECT diskquota.set_role_quota(current_role, '1MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + rolname +--------- + test +(1 row) + +SELECT diskquota.set_role_quota(current_role, '-1'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP TABLE t_before_set_quota; +RESET ROLE; +DROP ROLE test; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_no_preload_lib.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_no_preload_lib.out new file mode 100644 index 00000000000..b85a18ac92b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_no_preload_lib.out @@ -0,0 +1,85 @@ +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -far > /dev/null +\c +CREATE ROLE test SUPERUSER; +SET ROLE test; +-- Create table with diskquota disabled +CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null +\! gpstop -far > /dev/null +\c +SET ROLE test; +-- Init table_size to include the table +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Restart to load diskquota.table_size to the memory. +\! gpstop -far > /dev/null +\c +SET ROLE test; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; + tableid | size | segid +---------------------+---------+------- + t_without_diskquota | 3637248 | -1 + t_without_diskquota | 1212416 | 0 + t_without_diskquota | 1212416 | 1 + t_without_diskquota | 1212416 | 2 +(4 rows) + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +SELECT diskquota.set_role_quota(current_role, '1MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + rolname +--------- + test +(1 row) + +SELECT diskquota.set_role_quota(current_role, '-1'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + rolname +--------- +(0 rows) + +DROP TABLE t_without_diskquota; +RESET ROLE; +DROP ROLE test; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_pause.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_pause.out new file mode 100644 index 00000000000..e4d6319be48 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_pause.out @@ -0,0 +1,41 @@ +CREATE SCHEMA hardlimit_s; +SET search_path TO hardlimit_s; +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ERROR: schema's disk space quota exceeded with name: 110528 (seg1 127.0.0.1:6003 pid=73892) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect succeed +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- disable hardlimit and do some clean-ups. +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +DROP SCHEMA hardlimit_s CASCADE; +NOTICE: drop cascades to table t1 diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_role.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_role.out new file mode 100644 index 00000000000..3ce86c0e952 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_role.out @@ -0,0 +1,86 @@ +-- Test that diskquota is able to cancel a running CTAS query by the role quota. +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +CREATE ROLE hardlimit_r; +SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); + set_role_quota +---------------- + +(1 row) + +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; +SET ROLE hardlimit_r; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- temp table +CREATE TEMP TABLE t2 (i) AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET ROLE; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; +DROP ROLE hardlimit_r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_schema.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_schema.out new file mode 100644 index 00000000000..173fcb723c8 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_schema.out @@ -0,0 +1,70 @@ +-- Test that diskquota is able to cancel a running CTAS query by the schema quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO hardlimit_s; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups. +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET search_path; +DROP SCHEMA hardlimit_s; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_role.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_role.out new file mode 100644 index 00000000000..ba2b4fdb2ca --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_role.out @@ -0,0 +1,85 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- start_ignore +\! mkdir -p /tmp/ctas_rolespc +-- end_ignore +-- prepare role and tablespace. +DROP TABLESPACE IF EXISTS ctas_rolespc; +NOTICE: tablespace "ctas_rolespc" does not exist, skipping +CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; +CREATE ROLE hardlimit_r; +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; +GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; +SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SET default_tablespace = ctas_rolespc; +SET ROLE hardlimit_r; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET ROLE; +RESET default_tablespace; +DROP TABLESPACE ctas_rolespc; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; +DROP ROLE hardlimit_r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_schema.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_schema.out new file mode 100644 index 00000000000..50ac7071935 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_schema.out @@ -0,0 +1,82 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- start_ignore +\! mkdir -p /tmp/ctas_schemaspc +-- end_ignore +-- prepare tablespace and schema +DROP TABLESPACE IF EXISTS ctas_schemaspc; +NOTICE: tablespace "ctas_schemaspc" does not exist, skipping +CREATE TABLESPACE ctas_schemaspc LOCATION '/tmp/ctas_schemaspc'; +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_tablespace_quota('hardlimit_s', 'ctas_schemaspc', '1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO hardlimit_s; +SET default_tablespace = ctas_schemaspc; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET search_path; +RESET default_tablespace; +DROP SCHEMA hardlimit_s; +DROP TABLESPACE ctas_schemaspc; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/expected/test_dbname_encoding.out b/gpcontrib/diskquota/tests/regress/expected/test_dbname_encoding.out new file mode 100644 index 00000000000..67e2f62d4ed --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_dbname_encoding.out @@ -0,0 +1,40 @@ +-- create a database with non-ascii characters +CREATE DATABASE 数据库1; +\c 数据库1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check whether current database name is logged. +SELECT + count(logpid) > 0 +FROM + gp_toolkit.__gp_log_master_ext +WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0; + ?column? +---------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE 数据库1; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_default_tablespace.out b/gpcontrib/diskquota/tests/regress/expected/test_default_tablespace.out new file mode 100644 index 00000000000..8b9338fa6e7 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_default_tablespace.out @@ -0,0 +1,186 @@ +-- test role_tablespace_quota works with tables/databases in default tablespace +-- test role_tablespace_quota works with tables/databases in non-default tablespace with hard limits on +-- start_ignore +\! mkdir -p /tmp/custom_tablespace +-- end_ignore +DROP ROLE if EXISTS role1; +NOTICE: role "role1" does not exist, skipping +DROP ROLE if EXISTS role2; +NOTICE: role "role2" does not exist, skipping +CREATE ROLE role1 SUPERUSER; +CREATE ROLE role2 SUPERUSER; +SET ROLE role1; +DROP TABLE if EXISTS t; +NOTICE: table "t" does not exist, skipping +CREATE TABLE t (i int) DISTRIBUTED BY (i); +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to success +INSERT INTO t SELECT generate_series(1, 100); +INSERT INTO t SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to fail +INSERT INTO t SELECT generate_series(1, 1000000); +ERROR: tablespace: pg_default, role: role1 diskquota exceeded +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + rolname | spcname | target_type +---------+------------+----------------------- + role1 | pg_default | ROLE_TABLESPACE_QUOTA +(1 row) + +DROP TABLE IF EXISTS t; +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '-1'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SET ROLE role2; +CREATE TABLE t (i int) DISTRIBUTED BY (i); +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role2', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to fail because of hard limits +INSERT INTO t SELECT generate_series(1, 50000000); +ERROR: tablespace: 1663, role: 3050113 diskquota exceeded (seg1 127.0.0.1:6003 pid=21307) +DROP TABLE IF EXISTS t; +SET ROLE role1; +-- database in customized tablespace +CREATE TABLESPACE custom_tablespace LOCATION '/tmp/custom_tablespace'; +CREATE DATABASE db_with_tablespace TABLESPACE custom_tablespace; +\c db_with_tablespace; +SET ROLE role1; +CREATE EXTENSION diskquota; +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to success +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to fail +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +ERROR: tablespace: custom_tablespace, role: role1 diskquota exceeded +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + rolname | spcname | target_type +---------+-------------------+----------------------- + role1 | custom_tablespace | ROLE_TABLESPACE_QUOTA +(1 row) + +DROP TABLE IF EXISTS t_in_custom_tablespace; +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '-1'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SET ROLE role2; +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role2', 'custom_tablespace', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP TABLE IF EXISTS t_in_custom_tablespace; +NOTICE: table "t_in_custom_tablespace" does not exist, skipping +-- expect insert to fail because of hard limits +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); +ERROR: tablespace: 3050120, role: 3050113 diskquota exceeded (seg0 127.0.0.1:6002 pid=22270) +-- clean up +DROP TABLE IF EXISTS t_in_custom_tablespace; +NOTICE: table "t_in_custom_tablespace" does not exist, skipping +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION IF EXISTS diskquota; +\c contrib_regression; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP DATABASE IF EXISTS db_with_tablespace; +DROP TABLESPACE IF EXISTS custom_tablespace; +RESET ROLE; +DROP ROLE IF EXISTS role1; +DROP ROLE IF EXISTS role2; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_delete_quota.out b/gpcontrib/diskquota/tests/regress/expected/test_delete_quota.out new file mode 100644 index 00000000000..c0ee3de0649 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_delete_quota.out @@ -0,0 +1,39 @@ +-- Test delete disk quota +CREATE SCHEMA deleteschema; +SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO deleteschema; +CREATE TABLE c (i INT) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +INSERT INTO c SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: deleteschema +SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO c SELECT generate_series(1,100); +DROP TABLE c; +RESET search_path; +DROP SCHEMA deleteschema; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_drop_after_pause.out b/gpcontrib/diskquota/tests/regress/expected/test_drop_after_pause.out new file mode 100644 index 00000000000..961d56fce20 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_drop_after_pause.out @@ -0,0 +1,66 @@ +CREATE DATABASE test_drop_after_pause; +\c test_drop_after_pause +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a SELECT generate_series(1,10000000); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: 16933 (seg2 127.0.0.1:6004 pid=24622) +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_drop_after_pause; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_drop_any_extension.out b/gpcontrib/diskquota/tests/regress/expected/test_drop_any_extension.out new file mode 100644 index 00000000000..c2fadb86c97 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_drop_any_extension.out @@ -0,0 +1,34 @@ +CREATE DATABASE test_drop_db; +\c test_drop_db +CREATE EXTENSION diskquota; +CREATE EXTENSION gp_inject_fault; +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.set_schema_quota(current_schema, '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE t(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +DROP EXTENSION gp_inject_fault; +-- expect success +INSERT INTO t SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +INSERT INTO t SELECT generate_series(1, 100000); +ERROR: schema's disk space quota exceeded with name: public +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_drop_db; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_drop_extension.out b/gpcontrib/diskquota/tests/regress/expected/test_drop_extension.out new file mode 100644 index 00000000000..b946654c7f3 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_drop_extension.out @@ -0,0 +1,13 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_drop_table.out b/gpcontrib/diskquota/tests/regress/expected/test_drop_table.out new file mode 100644 index 00000000000..8827d2dff4a --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_drop_table.out @@ -0,0 +1,38 @@ +-- Test Drop table +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO sdrtbl; +CREATE TABLE a(i INT) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: sdrtbl +DROP TABLE a; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO a2 SELECT generate_series(1,100); +DROP TABLE a2; +RESET search_path; +DROP SCHEMA sdrtbl; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_extension.out b/gpcontrib/diskquota/tests/regress/expected/test_extension.out new file mode 100644 index 00000000000..25b4c7a4cd3 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_extension.out @@ -0,0 +1,533 @@ +-- NOTE: when test this script, you must make sure that there is no diskquota +-- worker process. +CREATE DATABASE dbx0 ; +CREATE DATABASE dbx1 ; +CREATE DATABASE dbx2 ; +CREATE DATABASE dbx3 ; +CREATE DATABASE dbx4 ; +CREATE DATABASE dbx5 ; +CREATE DATABASE dbx6 ; +CREATE DATABASE dbx7 ; +CREATE DATABASE dbx8 ; +CREATE DATABASE dbx9 ; +CREATE DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 20 --skipvalidation +20220802:15:47:27:028366 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 20 --skipvalidation' +\! gpstop -arf +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:43:28:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:43:28:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220802:16:43:28:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220802:16:43:30:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c +show max_worker_processes; + max_worker_processes +---------------------- + 20 +(1 row) + +show diskquota.max_workers; + diskquota.max_workers +----------------------- + 20 +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c dbx0 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx1 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +INSERT INTO SX.a values(generate_series(0, 100000)); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx3 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx4 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx5 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx6 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx7 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx8 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx9 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c dbx10 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c dbx0 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx3 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx4 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx5 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx6 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx7 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx8 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx9 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx10 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE dbx0 ; +DROP DATABASE dbx1 ; +DROP DATABASE dbx2 ; +DROP DATABASE dbx3 ; +DROP DATABASE dbx4 ; +DROP DATABASE dbx5 ; +DROP DATABASE dbx6 ; +DROP DATABASE dbx7 ; +DROP DATABASE dbx8 ; +DROP DATABASE dbx9 ; +DROP DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +20220802:15:49:09:029439 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1 --skipvalidation' +\! gpstop -arf; +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:32:35:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220802:16:32:35:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:32:36:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:32:40:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:32:40:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220802:16:32:40:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220802:16:32:44:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c +show diskquota.max_workers; + diskquota.max_workers +----------------------- + 1 +(1 row) + diff --git a/gpcontrib/diskquota/tests/regress/expected/test_fast_disk_check.out b/gpcontrib/diskquota/tests/regress/expected/test_fast_disk_check.out new file mode 100644 index 00000000000..b38b931b07b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_fast_disk_check.out @@ -0,0 +1,20 @@ +-- Test SCHEMA +CREATE SCHEMA s1; +SET search_path to s1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,200000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; + ?column? +---------- + t +(1 row) + +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_fetch_table_stat.out b/gpcontrib/diskquota/tests/regress/expected/test_fetch_table_stat.out new file mode 100644 index 00000000000..3fbde382f86 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_fetch_table_stat.out @@ -0,0 +1,37 @@ +-- +-- 1. Test that when an error occurs in diskquota_fetch_table_stat +-- the error message is preserved for us to debug. +-- +CREATE TABLE t_error_handling (i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- Inject an error to a segment server, since this UDF is only called on segments. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Dispatch diskquota_fetch_table_stat to segments. +-- There should be a warning message from segment server saying: +-- fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' +-- We're not interested in the oid here, we aggregate the result by COUNT(*). +SELECT COUNT(*) + FROM (SELECT diskquota.diskquota_fetch_table_stat(1, array[(SELECT oid FROM pg_class WHERE relname='t_error_handling')]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0) AS count; + count +------- + 1 +(1 row) + +-- Reset the fault injector to prevent future failure. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Do some clean-ups. +DROP TABLE t_error_handling; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_index.out b/gpcontrib/diskquota/tests/regress/expected/test_index.out new file mode 100644 index 00000000000..1c317f3ba62 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_index.out @@ -0,0 +1,133 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/indexspc +-- end_ignore +CREATE SCHEMA indexschema1; +DROP TABLESPACE IF EXISTS indexspc; +NOTICE: tablespace "indexspc" does not exist, skipping +CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; +SET search_path TO indexschema1; +CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); +INSERT INTO test_index_a SELECT generate_series(1,20000); +SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view +WHERE schema_name='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 1081344 +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + tableid | size | segid +--------------+---------+------- + test_index_a | 1081344 | -1 + test_index_a | 360448 | 0 + test_index_a | 360448 | 1 + test_index_a | 360448 | 2 +(4 rows) + +-- create index for the table, index in default tablespace +CREATE INDEX a_index ON test_index_a(i); +INSERT INTO test_index_a SELECT generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 1441792 +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + tableid | size | segid +--------------+---------+------- + test_index_a | 1441792 | -1 + test_index_a | 491520 | 0 + test_index_a | 491520 | 1 + test_index_a | 458752 | 2 +(4 rows) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'a_index'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + a_index | 1015808 | -1 + a_index | 327680 | 0 + a_index | 327680 | 1 + a_index | 327680 | 2 +(4 rows) + +-- add index to tablespace indexspc +ALTER index a_index SET TABLESPACE indexspc; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 2457600 +(1 row) + +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; + size | segid +---------+------- + 1441792 | -1 + 1015808 | -1 +(2 rows) + +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded +-- index tablespace quota exceeded +ALTER table test_index_a SET TABLESPACE pg_default; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +INSERT INTO test_index_a SELECT generate_series(1,200000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded +RESET search_path; +DROP INDEX indexschema1.a_index; +DROP TABLE indexschema1.test_index_a; +DROP SCHEMA indexschema1; +DROP TABLESPACE indexspc; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_init_table_size_table.out b/gpcontrib/diskquota/tests/regress/expected/test_init_table_size_table.out new file mode 100644 index 00000000000..fe0347070ec --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_init_table_size_table.out @@ -0,0 +1,71 @@ +-- heap table +CREATE TABLE t(i int) DISTRIBUTED BY (i); +INSERT INTO t SELECT generate_series(1, 100000); +-- heap table index +CREATE INDEX idx on t(i); +-- toast table +CREATE TABLE toast(t text) DISTRIBUTED BY (t); +INSERT INTO toast SELECT repeat('a', 10000) FROM generate_series(1, 1000); +-- toast table index +CREATE INDEX toast_idx on toast(t); +-- AO table +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); +-- AO table index +CREATE INDEX ao_idx on ao(i); +-- AOCS table +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +-- AOCS table index +CREATE INDEX aocs_idx on aocs(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Tables here are fetched by diskquota_fetch_table_stat() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +-----------+----------+------- + t | 3932160 | -1 + idx | 2490368 | -1 + toast | 393216 | -1 + toast_idx | 163840 | -1 + ao | 1558696 | -1 + ao_idx | 2490368 | -1 + aocs | 10649752 | -1 + aocs_idx | 524288 | -1 +(8 rows) + +-- init diskquota.table_size +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- diskquota.table_size should not change after init_table_size_table() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +-----------+----------+------- + t | 3932160 | -1 + idx | 2490368 | -1 + toast | 393216 | -1 + toast_idx | 163840 | -1 + ao | 1558696 | -1 + ao_idx | 2490368 | -1 + aocs | 10649752 | -1 + aocs_idx | 524288 | -1 +(8 rows) + +DROP TABLE t; +DROP TABLE toast; +DROP TABLE ao; +DROP TABLE aocs; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_insert_after_drop.out b/gpcontrib/diskquota/tests/regress/expected/test_insert_after_drop.out new file mode 100644 index 00000000000..06410d063f0 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_insert_after_drop.out @@ -0,0 +1,31 @@ +CREATE DATABASE db_insert_after_drop; +\c db_insert_after_drop +CREATE EXTENSION diskquota; +-- Test Drop Extension +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO sdrtbl; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: sdrtbl +DROP EXTENSION diskquota; +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +\c postgres +DROP DATABASE db_insert_after_drop; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_many_active_tables.out b/gpcontrib/diskquota/tests/regress/expected/test_many_active_tables.out new file mode 100644 index 00000000000..8bf8158708e --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_many_active_tables.out @@ -0,0 +1,1030 @@ +CREATE TABLE t1 (pk int, val int) +DISTRIBUTED BY (pk) +PARTITION BY RANGE (pk) (START (1) END (1000) EVERY (1)); +NOTICE: CREATE TABLE will create partition "t1_1_prt_1" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_2" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_3" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_4" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_5" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_6" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_7" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_8" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_9" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_10" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_11" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_12" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_13" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_14" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_15" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_16" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_17" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_18" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_19" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_20" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_21" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_22" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_23" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_24" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_25" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_26" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_27" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_28" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_29" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_30" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_31" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_32" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_33" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_34" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_35" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_36" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_37" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_38" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_39" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_40" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_41" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_42" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_43" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_44" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_45" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_46" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_47" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_48" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_49" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_50" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_51" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_52" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_53" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_54" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_55" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_56" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_57" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_58" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_59" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_60" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_61" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_62" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_63" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_64" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_65" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_66" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_67" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_68" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_69" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_70" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_71" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_72" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_73" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_74" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_75" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_76" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_77" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_78" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_79" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_80" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_81" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_82" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_83" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_84" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_85" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_86" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_87" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_88" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_89" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_90" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_91" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_92" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_93" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_94" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_95" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_96" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_97" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_98" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_99" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_100" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_101" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_102" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_103" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_104" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_105" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_106" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_107" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_108" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_109" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_110" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_111" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_112" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_113" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_114" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_115" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_116" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_117" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_118" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_119" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_120" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_121" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_122" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_123" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_124" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_125" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_126" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_127" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_128" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_129" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_130" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_131" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_132" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_133" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_134" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_135" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_136" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_137" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_138" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_139" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_140" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_141" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_142" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_143" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_144" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_145" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_146" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_147" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_148" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_149" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_150" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_151" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_152" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_153" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_154" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_155" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_156" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_157" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_158" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_159" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_160" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_161" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_162" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_163" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_164" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_165" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_166" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_167" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_168" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_169" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_170" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_171" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_172" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_173" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_174" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_175" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_176" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_177" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_178" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_179" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_180" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_181" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_182" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_183" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_184" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_185" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_186" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_187" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_188" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_189" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_190" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_191" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_192" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_193" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_194" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_195" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_196" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_197" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_198" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_199" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_200" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_201" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_202" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_203" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_204" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_205" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_206" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_207" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_208" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_209" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_210" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_211" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_212" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_213" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_214" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_215" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_216" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_217" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_218" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_219" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_220" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_221" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_222" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_223" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_224" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_225" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_226" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_227" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_228" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_229" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_230" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_231" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_232" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_233" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_234" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_235" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_236" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_237" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_238" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_239" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_240" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_241" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_242" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_243" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_244" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_245" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_246" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_247" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_248" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_249" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_250" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_251" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_252" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_253" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_254" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_255" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_256" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_257" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_258" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_259" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_260" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_261" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_262" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_263" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_264" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_265" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_266" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_267" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_268" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_269" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_270" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_271" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_272" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_273" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_274" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_275" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_276" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_277" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_278" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_279" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_280" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_281" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_282" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_283" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_284" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_285" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_286" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_287" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_288" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_289" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_290" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_291" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_292" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_293" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_294" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_295" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_296" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_297" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_298" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_299" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_300" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_301" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_302" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_303" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_304" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_305" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_306" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_307" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_308" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_309" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_310" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_311" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_312" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_313" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_314" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_315" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_316" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_317" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_318" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_319" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_320" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_321" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_322" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_323" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_324" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_325" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_326" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_327" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_328" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_329" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_330" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_331" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_332" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_333" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_334" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_335" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_336" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_337" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_338" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_339" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_340" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_341" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_342" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_343" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_344" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_345" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_346" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_347" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_348" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_349" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_350" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_351" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_352" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_353" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_354" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_355" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_356" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_357" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_358" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_359" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_360" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_361" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_362" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_363" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_364" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_365" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_366" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_367" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_368" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_369" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_370" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_371" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_372" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_373" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_374" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_375" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_376" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_377" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_378" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_379" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_380" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_381" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_382" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_383" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_384" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_385" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_386" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_387" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_388" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_389" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_390" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_391" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_392" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_393" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_394" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_395" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_396" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_397" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_398" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_399" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_400" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_401" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_402" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_403" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_404" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_405" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_406" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_407" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_408" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_409" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_410" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_411" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_412" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_413" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_414" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_415" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_416" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_417" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_418" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_419" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_420" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_421" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_422" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_423" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_424" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_425" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_426" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_427" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_428" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_429" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_430" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_431" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_432" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_433" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_434" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_435" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_436" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_437" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_438" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_439" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_440" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_441" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_442" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_443" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_444" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_445" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_446" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_447" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_448" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_449" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_450" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_451" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_452" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_453" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_454" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_455" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_456" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_457" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_458" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_459" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_460" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_461" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_462" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_463" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_464" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_465" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_466" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_467" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_468" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_469" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_470" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_471" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_472" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_473" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_474" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_475" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_476" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_477" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_478" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_479" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_480" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_481" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_482" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_483" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_484" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_485" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_486" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_487" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_488" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_489" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_490" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_491" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_492" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_493" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_494" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_495" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_496" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_497" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_498" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_499" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_500" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_501" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_502" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_503" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_504" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_505" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_506" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_507" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_508" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_509" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_510" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_511" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_512" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_513" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_514" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_515" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_516" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_517" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_518" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_519" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_520" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_521" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_522" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_523" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_524" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_525" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_526" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_527" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_528" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_529" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_530" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_531" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_532" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_533" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_534" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_535" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_536" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_537" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_538" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_539" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_540" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_541" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_542" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_543" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_544" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_545" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_546" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_547" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_548" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_549" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_550" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_551" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_552" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_553" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_554" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_555" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_556" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_557" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_558" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_559" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_560" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_561" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_562" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_563" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_564" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_565" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_566" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_567" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_568" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_569" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_570" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_571" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_572" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_573" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_574" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_575" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_576" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_577" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_578" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_579" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_580" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_581" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_582" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_583" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_584" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_585" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_586" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_587" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_588" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_589" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_590" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_591" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_592" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_593" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_594" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_595" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_596" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_597" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_598" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_599" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_600" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_601" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_602" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_603" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_604" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_605" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_606" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_607" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_608" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_609" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_610" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_611" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_612" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_613" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_614" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_615" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_616" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_617" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_618" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_619" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_620" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_621" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_622" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_623" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_624" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_625" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_626" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_627" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_628" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_629" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_630" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_631" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_632" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_633" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_634" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_635" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_636" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_637" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_638" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_639" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_640" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_641" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_642" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_643" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_644" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_645" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_646" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_647" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_648" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_649" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_650" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_651" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_652" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_653" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_654" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_655" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_656" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_657" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_658" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_659" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_660" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_661" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_662" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_663" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_664" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_665" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_666" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_667" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_668" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_669" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_670" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_671" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_672" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_673" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_674" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_675" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_676" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_677" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_678" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_679" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_680" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_681" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_682" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_683" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_684" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_685" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_686" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_687" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_688" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_689" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_690" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_691" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_692" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_693" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_694" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_695" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_696" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_697" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_698" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_699" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_700" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_701" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_702" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_703" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_704" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_705" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_706" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_707" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_708" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_709" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_710" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_711" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_712" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_713" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_714" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_715" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_716" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_717" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_718" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_719" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_720" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_721" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_722" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_723" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_724" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_725" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_726" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_727" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_728" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_729" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_730" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_731" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_732" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_733" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_734" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_735" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_736" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_737" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_738" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_739" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_740" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_741" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_742" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_743" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_744" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_745" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_746" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_747" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_748" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_749" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_750" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_751" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_752" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_753" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_754" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_755" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_756" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_757" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_758" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_759" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_760" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_761" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_762" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_763" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_764" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_765" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_766" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_767" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_768" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_769" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_770" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_771" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_772" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_773" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_774" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_775" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_776" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_777" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_778" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_779" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_780" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_781" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_782" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_783" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_784" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_785" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_786" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_787" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_788" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_789" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_790" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_791" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_792" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_793" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_794" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_795" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_796" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_797" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_798" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_799" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_800" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_801" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_802" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_803" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_804" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_805" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_806" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_807" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_808" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_809" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_810" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_811" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_812" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_813" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_814" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_815" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_816" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_817" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_818" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_819" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_820" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_821" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_822" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_823" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_824" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_825" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_826" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_827" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_828" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_829" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_830" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_831" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_832" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_833" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_834" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_835" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_836" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_837" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_838" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_839" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_840" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_841" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_842" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_843" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_844" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_845" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_846" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_847" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_848" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_849" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_850" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_851" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_852" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_853" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_854" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_855" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_856" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_857" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_858" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_859" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_860" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_861" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_862" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_863" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_864" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_865" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_866" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_867" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_868" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_869" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_870" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_871" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_872" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_873" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_874" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_875" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_876" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_877" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_878" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_879" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_880" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_881" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_882" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_883" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_884" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_885" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_886" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_887" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_888" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_889" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_890" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_891" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_892" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_893" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_894" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_895" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_896" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_897" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_898" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_899" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_900" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_901" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_902" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_903" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_904" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_905" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_906" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_907" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_908" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_909" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_910" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_911" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_912" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_913" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_914" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_915" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_916" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_917" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_918" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_919" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_920" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_921" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_922" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_923" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_924" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_925" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_926" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_927" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_928" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_929" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_930" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_931" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_932" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_933" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_934" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_935" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_936" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_937" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_938" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_939" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_940" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_941" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_942" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_943" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_944" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_945" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_946" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_947" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_948" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_949" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_950" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_951" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_952" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_953" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_954" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_955" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_956" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_957" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_958" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_959" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_960" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_961" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_962" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_963" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_964" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_965" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_966" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_967" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_968" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_969" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_970" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_971" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_972" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_973" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_974" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_975" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_976" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_977" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_978" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_979" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_980" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_981" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_982" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_983" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_984" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_985" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_986" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_987" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_988" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_989" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_990" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_991" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_992" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_993" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_994" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_995" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_996" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_997" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_998" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_999" for table "t1" +INSERT INTO t1 +SELECT pk, val +FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; + ?column? +---------- + t +(1 row) + +DROP TABLE t1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; + ?column? +---------- + t +(1 row) + diff --git a/gpcontrib/diskquota/tests/regress/expected/test_max_monitored_databases.out b/gpcontrib/diskquota/tests/regress/expected/test_max_monitored_databases.out new file mode 100644 index 00000000000..84568bc114a --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_max_monitored_databases.out @@ -0,0 +1,104 @@ +--start_ignore +\! gpconfig -c diskquota.max_monitored_databases -v 3 +20230905:12:39:55:332748 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_monitored_databases -v 3' +\! gpstop -ari +--end_ignore +\c +DROP DATABASE IF EXISTS test_db1; +NOTICE: database "test_db1" does not exist, skipping +DROP DATABASE IF EXISTS test_db2; +NOTICE: database "test_db2" does not exist, skipping +DROP DATABASE IF EXISTS test_db3; +NOTICE: database "test_db3" does not exist, skipping +CREATE DATABASE test_db1; +CREATE DATABASE test_db2; +CREATE DATABASE test_db3; +\c test_db1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c test_db2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +\c test_db3 +CREATE EXTENSION diskquota; +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:406) +-- clean extension +\c test_db1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c test_db2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +-- clean database +\c contrib_regression +DROP DATABASE test_db1; +DROP DATABASE test_db2; +DROP DATABASE test_db3; +-- start_ignore +\! gpconfig -r diskquota.max_monitored_databases +20230905:12:40:29:350921 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_monitored_databases' +\! gpstop -ari +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -ari +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Master catalog information +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from master... +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.24.4+dev.45.gad3671f087 build dev' +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Commencing Master instance shutdown with mode='immediate' +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Master segment instance directory=/home/zhrt/workspace/gpdb6/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover master process +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb6/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230905:12:40:37:352551 gpstop:zhrt:zhrt-[INFO]:-Stopping master standby host zhrt mode=immediate +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230905:12:40:43:352551 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230905:12:40:43:352551 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230905:12:40:43:352551 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Cleaning up leftover gpmmon process +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-No leftover gpmmon process found +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Cleaning up leftover gpsmon processes +20230905:12:40:47:352551 gpstop:zhrt:zhrt-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20230905:12:40:47:352551 gpstop:zhrt:zhrt-[INFO]:-Cleaning up leftover shared memory +20230905:12:40:48:352551 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +-- end_ignore diff --git a/gpcontrib/diskquota/tests/regress/expected/test_mistake.out b/gpcontrib/diskquota/tests/regress/expected/test_mistake.out new file mode 100644 index 00000000000..bd11eb5f1a5 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_mistake.out @@ -0,0 +1,33 @@ +-- to make sure that the schema 'notfoundns' is really not found +select nspname from pg_namespace where nspname = 'notfoundns'; + nspname +--------- +(0 rows) + +select diskquota.set_schema_quota('notfoundns', '1 MB'); +ERROR: schema "notfoundns" does not exist +DROP SCHEMA IF EXISTS nmistake; +NOTICE: schema "nmistake" does not exist, skipping +CREATE SCHEMA nmistake; +select diskquota.set_schema_quota('nmistake', '0 MB'); +ERROR: disk quota can not be set to 0 MB +DROP ROLE IF EXISTS rmistake; +NOTICE: role "rmistake" does not exist, skipping +CREATE ROLE rmistake; +select diskquota.set_role_quota('rmistake', '0 MB'); +ERROR: disk quota can not be set to 0 MB +-- start_ignore +\! mkdir -p /tmp/spcmistake +-- end_ignore +DROP TABLESPACE IF EXISTS spcmistake; +NOTICE: tablespace "spcmistake" does not exist, skipping +CREATE TABLESPACE spcmistake LOCATION '/tmp/spcmistake'; +SELECT diskquota.set_schema_tablespace_quota('nmistake', 'spcmistake','0 MB'); +ERROR: disk quota can not be set to 0 MB +SELECT diskquota.set_role_tablespace_quota('rmistake', 'spcmistake','0 MB'); +ERROR: disk quota can not be set to 0 MB +SELECT diskquota.set_per_segment_quota('spcmistake', 0); +ERROR: per segment quota ratio can not be set to 0 +DROP SCHEMA nmistake; +DROP ROLE rmistake; +DROP TABLESPACE spcmistake; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_partition.out b/gpcontrib/diskquota/tests/regress/expected/test_partition.out new file mode 100644 index 00000000000..a531e2db302 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_partition.out @@ -0,0 +1,65 @@ +-- Test partition table +CREATE SCHEMA s8; +SELECT diskquota.SET_schema_quota('s8', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s8; +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +)PARTITION BY RANGE (logdate) +( + PARTITION Feb06 START (date '2006-02-01') INCLUSIVE, + PARTITION Mar06 START (date '2006-03-01') INCLUSIVE + END (date '2016-04-01') EXCLUSIVE +); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'city_id' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +NOTICE: CREATE TABLE will create partition "measurement_1_prt_feb06" for table "measurement" +NOTICE: CREATE TABLE will create partition "measurement_1_prt_mar06" for table "measurement" +INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +-- expect insert fail +INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +ERROR: schema's disk space quota exceeded with name: s8 +-- expect insert fail +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; +ERROR: schema's disk space quota exceeded with name: s8 +DELETE FROM measurement WHERE logdate='2006-03-02'; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +VACUUM FULL measurement; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; +DROP TABLE measurement; +RESET search_path; +DROP SCHEMA s8; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume.out b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume.out new file mode 100644 index 00000000000..9b3d264ac8f --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume.out @@ -0,0 +1,72 @@ +-- Test pause and resume. +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +-- pause extension +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + a | 3932160 | -1 +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); +-- resume extension +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +-- table size should be updated after resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + a | 7569408 | -1 +(1 row) + +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume_multiple_db.out b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume_multiple_db.out new file mode 100644 index 00000000000..34419ee58dc --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume_multiple_db.out @@ -0,0 +1,207 @@ +-- need 'contrib_regression' as test database +\c +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE DATABASE test_pause_and_resume; +CREATE DATABASE test_new_create_database; +\c test_pause_and_resume +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c contrib_regression +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +\c test_pause_and_resume +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +\c contrib_regression +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c test_pause_and_resume +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c contrib_regression +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +\c test_pause_and_resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +\c test_new_create_database; +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active although other database is paused + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +-- resume should onle effect current database +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c contrib_regression +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c test_pause_and_resume +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c test_new_create_database +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP SCHEMA s1 CASCADE; +NOTICE: drop cascades to table s1.a +DROP DATABASE test_pause_and_resume; +DROP DATABASE test_new_create_database; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_primary_failure.out b/gpcontrib/diskquota/tests/regress/expected/test_primary_failure.out new file mode 100644 index 00000000000..48160e24616 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_primary_failure.out @@ -0,0 +1,300 @@ +CREATE SCHEMA ftsr; +SELECT diskquota.set_schema_quota('ftsr', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO ftsr; +create or replace language plpythonu; +-- +-- pg_ctl: +-- datadir: data directory of process to target with `pg_ctl` +-- command: commands valid for `pg_ctl` +-- command_mode: modes valid for `pg_ctl -m` +-- +create or replace function pg_ctl(datadir text, command text, command_mode text default 'immediate') +returns text as $$ + import subprocess + if command not in ('stop', 'restart'): + return 'Invalid command input' + + cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir + cmd = cmd + '-W -m %s %s' % (command_mode, command) + if 'plpython2u' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') + +$$ language plpython2u; +create or replace function pg_recoverseg(datadir text, command text) +returns text as $$ + import subprocess + cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) + if 'plpython2u' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') +$$ language plpython2u; +CREATE TABLE a(i int, j int) DISTRIBUTED BY (i); +-- the entries will be inserted into seg0 +INSERT INTO a SELECT 2, generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + tableid | size | segid +---------+---------+------- + a | 3735552 | -1 + a | 3735552 | 0 + a | 0 | 1 + a | 0 | 2 +(4 rows) + +-- expect insert fail +INSERT INTO a SELECT 2, generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: ftsr +-- now one of primary is down +select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); + pg_ctl +---------------------- + server shutting down+ + +(1 row) + +-- switch mirror to primary +select gp_request_fts_probe_scan(); + gp_request_fts_probe_scan +--------------------------- + t +(1 row) + +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + content | preferred_role | role | status | mode +---------+----------------+------+--------+------ + 0 | p | m | d | n + 0 | m | p | u | n +(2 rows) + +-- expect insert fail +INSERT INTO a SELECT 2, generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: ftsr +-- increase quota +SELECT diskquota.set_schema_quota('ftsr', '200 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT 2, generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check whether monitored_dbid_cache is refreshed in mirror +-- diskquota.table_size should be updated +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + tableid | size | segid +---------+---------+------- + a | 4096000 | -1 + a | 4096000 | 0 + a | 0 | 1 + a | 0 | 2 +(4 rows) + +-- pull up failed primary +-- start_ignore +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -a -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Greenplum instance recovery parameters + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Recovery type = Standard + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Recovery 1 of 1 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Synchronization mode = Incremental + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance host = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance address = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance port = 6002 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance host = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance address = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance port = 6005 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Target = in-place + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Starting to create new pg_hbaconf on primary segments + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-Successfully modified pg_hbaconf on primary segments to allow replication connections + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-1 segment(s) to recover + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-Ensuring 1 failed segment(s) are stopped + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + + 20211216:16:28:50:371791 gprecoverseg:laptop:v-[INFO]:-Updating configuration with new mirrors + + 20211216:16:28:50:371791 gprecoverseg:laptop:v-[INFO]:-Updating mirrors + + 20211216:16:28:50:371791 gprecoverseg:laptop:v-[INFO]:-Running pg_rewind on failed segments + + laptop (dbid 2): 0/689186 kB (0%) copied + + laptop (dbid 2): syncing target data directory + + laptop (dbid 2): syncing target data directory + + laptop (dbid 2): Done! + + 20211216:16:28:55:371791 gprecoverseg:laptop:v-[INFO]:-Starting mirrors + + 20211216:16:28:55:371791 gprecoverseg:laptop:v-[INFO]:-era is 85b8357bd546c506_211216162717 + + 20211216:16:28:55:371791 gprecoverseg:laptop:v-[INFO]:-Commencing parallel segment instance startup, please wait + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Process results + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Triggering FTS probe + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Segments successfully recovered + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Recovered mirror segments need to sync WAL with primary segments + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Use 'gpstate -e' to check progress of WAL sync remaining bytes + + +(1 row) + +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -ar -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Greenplum instance recovery parameters + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Recovery type = Rebalance + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Unbalanced segment 1 of 2 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance host = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance address = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance port = 6005 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Balanced role = Mirror + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Current role = Primary + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Unbalanced segment 2 of 2 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance host = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance address = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance port = 6002 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Balanced role = Primary + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Current role = Mirror + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Determining primary and mirror segment pairs to rebalance + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Stopping unbalanced primary segments + + 20211216:16:28:57:373757 gprecoverseg:laptop:v-[INFO]:-Triggering segment reconfiguration + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Starting segment synchronization + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-=============================START ANOTHER RECOVER========================================= + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Greenplum instance recovery parameters + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Recovery type = Standard + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Recovery 1 of 1 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Synchronization mode = Incremental + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance host = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance address = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance port = 6005 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance host = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance address = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance port = 6002 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Target = in-place + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Starting to create new pg_hbaconf on primary segments + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-Successfully modified pg_hbaconf on primary segments to allow replication connections + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-1 segment(s) to recover + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-Ensuring 1 failed segment(s) are stopped + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + + 20211216:16:29:06:373757 gprecoverseg:laptop:v-[INFO]:-Updating configuration with new mirrors + + 20211216:16:29:06:373757 gprecoverseg:laptop:v-[INFO]:-Updating mirrors + + 20211216:16:29:06:373757 gprecoverseg:laptop:v-[INFO]:-Running pg_rewind on failed segments + + laptop (dbid 5): no rewind required + + 20211216:16:29:07:373757 gprecoverseg:laptop:v-[INFO]:-Starting mirrors + + 20211216:16:29:07:373757 gprecoverseg:laptop:v-[INFO]:-era is 85b8357bd546c506_211216162717 + + 20211216:16:29:07:373757 gprecoverseg:laptop:v-[INFO]:-Commencing parallel segment instance startup, please wait + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Process results + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Triggering FTS probe + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Segments successfully recovered + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Recovered mirror segments need to sync WAL with primary segments + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Use 'gpstate -e' to check progress of WAL sync remaining bytes + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-==============================END ANOTHER RECOVER========================================== + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-****************************************************************** + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-The rebalance operation has completed successfully + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-****************************************************************** + + +(1 row) + +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -a -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-No segments to recover + + +(1 row) + +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:29:08:375616 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -ar -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-No segments are running in their non-preferred role and need to be rebalanced + + +(1 row) + +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + content | preferred_role | role | status | mode +---------+----------------+------+--------+------ + 0 | p | p | u | s + 0 | m | m | u | s +(2 rows) + +-- end_ignore +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; + quota_in_mb | nspsize_in_bytes +-------------+------------------ + 200 | 4096000 +(1 row) + +INSERT INTO a SELECT 2, generate_series(1,100); +DROP TABLE a; +DROP SCHEMA ftsr CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pg_ctl(text,text,text) +drop cascades to function pg_recoverseg(text,text) diff --git a/gpcontrib/diskquota/tests/regress/expected/test_quota_view_no_table.out b/gpcontrib/diskquota/tests/regress/expected/test_quota_view_no_table.out new file mode 100644 index 00000000000..27a0b315f5b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_quota_view_no_table.out @@ -0,0 +1,64 @@ +CREATE ROLE no_table SUPERUSER; +CREATE SCHEMA no_table; +SELECT diskquota.set_schema_quota('no_table', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT schema_name, quota_in_mb, nspsize_in_bytes +FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + no_table | 1 | 0 +(1 row) + +SELECT diskquota.set_role_quota('no_table', '1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT role_name, quota_in_mb, rolsize_in_bytes +FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + no_table | 1 | 0 +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('no_table', 'pg_default', '1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + no_table | pg_default | 1 | 0 +(1 row) + +SELECT diskquota.set_role_tablespace_quota('no_table', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT role_name, tablespace_name , quota_in_mb, rolsize_tablespace_in_bytes +FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + no_table | pg_default | 1 | 0 +(1 row) + +DROP ROLE no_table; +DROP SCHEMA no_table; +-- Wait until the quota configs are removed from the memory +-- automatically after DROP. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + diff --git a/gpcontrib/diskquota/tests/regress/expected/test_readiness_logged.out b/gpcontrib/diskquota/tests/regress/expected/test_readiness_logged.out new file mode 100644 index 00000000000..ed303e706a3 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_readiness_logged.out @@ -0,0 +1,60 @@ +CREATE DATABASE test_readiness_logged; +\c test_readiness_logged +-- Get bgworker's log by database name. +-- 1. select bgworker pid by database name. +-- 2. select logmessage by bgworker pid. +CREATE VIEW logmessage_count_view AS WITH logp AS( + SELECT + MAX(logpid) as max_logpid + FROM + gp_toolkit.__gp_log_master_ext + WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0 +) +SELECT + count(*) +FROM + gp_toolkit.__gp_log_master_ext, + logp +WHERE + logmessage = '[diskquota] diskquota is not ready' + and logpid = max_logpid; +CREATE TABLE t (i int) DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +CREATE EXTENSION diskquota_test; +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t +(1 row) + +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; + count +------- + 1 +(1 row) + +\! gpstop -raf > /dev/null +\c +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t +(1 row) + +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; + count +------- + 1 +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_readiness_logged; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_recreate.out b/gpcontrib/diskquota/tests/regress/expected/test_recreate.out new file mode 100644 index 00000000000..c69cd82e77e --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_recreate.out @@ -0,0 +1,27 @@ +\c +CREATE DATABASE test_recreate; +\c diskquota +INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; +\c test_recreate +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- shoud be ok + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_recreate; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_rejectmap.out b/gpcontrib/diskquota/tests/regress/expected/test_rejectmap.out new file mode 100644 index 00000000000..f7dbccbe783 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_rejectmap.out @@ -0,0 +1,292 @@ +-- +-- This file contains tests for dispatching and quering rejectmap. +-- +CREATE SCHEMA s_rejectmap; +SET search_path TO s_rejectmap; +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) + RETURNS text AS $$ + BEGIN + RETURN COALESCE( + REGEXP_REPLACE(given_name, + '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', + '\1' || + (SELECT relname FROM pg_class + WHERE oid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); + END; +$$ LANGUAGE plpgsql; +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) + RETURNS oid AS +$$ +BEGIN + CASE + WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; + ELSE RETURN ( + CASE tablespaceoid + WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) + ELSE + tablespaceoid + END + ); + END CASE; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) + RETURNS void AS $$ + DECLARE + bt int; + targetoid oid; + tablespaceoid oid; + BEGIN + SELECT reltablespace INTO tablespaceoid FROM pg_class WHERE relname=rel::text; + CASE block_type + WHEN 'NAMESPACE' THEN + bt = 0; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE' THEN + bt = 1; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'NAMESPACE_TABLESPACE' THEN + bt = 2; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE_TABLESPACE' THEN + bt = 3; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + END CASE; + PERFORM diskquota.refresh_rejectmap( + ARRAY[ + ROW(targetoid, + (SELECT oid FROM pg_database WHERE datname=current_database()), + (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), + bt, + false) + ]::diskquota.rejectmap_entry[], + ARRAY[rel]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + END; $$ +LANGUAGE 'plpgsql'; +-- +-- 1. Create an ordinary table and add its oid to rejectmap on seg0. +-- Check that it's relfilenode is blocked on seg0 by various conditions. +-- +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | namespace_matched +------------+-----------------+------------------- + blocked_t1 | NAMESPACE_QUOTA | t +(1 row) + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | owner_matched +------------+-------------+--------------- + blocked_t1 | ROLE_QUOTA | t +(1 row) + +-- Create a tablespace to test the rest of blocking types. +\! mkdir -p /tmp/blocked_space +CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; +ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | namespace_matched | tablespace_matched +------------+----------------------------+-------------------+-------------------- + blocked_t1 | NAMESPACE_TABLESPACE_QUOTA | t | t +(1 row) + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relowner) AS owner_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | owner_matched | tablespace_matched +------------+-----------------------+---------------+-------------------- + blocked_t1 | ROLE_TABLESPACE_QUOTA | t | t +(1 row) + +-- +-- 2. Test that the relfilenodes of toast relation together with its +-- index are blocked on seg0. +-- +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +-- Insert an entry for blocked_t2 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t2 together with its toast relation and toast +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +---------------------------+---------+-----------------+------------------- + pg_toast_blocked_t2_index | i | NAMESPACE_QUOTA | f + pg_toast_blocked_t2 | t | NAMESPACE_QUOTA | f + blocked_t2 | r | NAMESPACE_QUOTA | t +(3 rows) + +-- +-- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE INDEX blocked_t3_index ON blocked_t3(i); +-- Insert an entry for blocked_t3 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t3 together with its appendonly relation and appendonly +-- index relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t3_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t3 | M | NAMESPACE_QUOTA | f + pg_aoseg_blocked_t3 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t3_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t3 | b | NAMESPACE_QUOTA | f + blocked_t3 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- +-- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE INDEX blocked_t4_index ON blocked_t4(i); +-- Insert an entry for blocked_t4 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t4 together with its appendonly relation and appendonly +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t4_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t4 | M | NAMESPACE_QUOTA | f + pg_aocsseg_blocked_t4 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t4_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t4 | b | NAMESPACE_QUOTA | f + blocked_t4 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- +-- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE INDEX blocked_t5_index ON blocked_t5(i); +-- Insert an entry for blocked_t5 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t5 together with its toast relation, toast +-- index relation and appendonly relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t5_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t5 | M | NAMESPACE_QUOTA | f + pg_aocsseg_blocked_t5 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t5_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t5 | b | NAMESPACE_QUOTA | f + blocked_t5 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- Do some clean-ups. +DROP FUNCTION replace_oid_with_relname(text); +DROP FUNCTION block_relation_on_seg0(regclass, text); +DROP FUNCTION get_real_tablespace_oid(text, oid); +DROP TABLE blocked_t1; +DROP TABLE blocked_t2; +DROP TABLE blocked_t3; +DROP TABLE blocked_t4; +DROP TABLE blocked_t5; +DROP TABLESPACE blocked_space; +SET search_path TO DEFAULT; +DROP SCHEMA s_rejectmap; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_rejectmap_mul_db.out b/gpcontrib/diskquota/tests/regress/expected/test_rejectmap_mul_db.out new file mode 100644 index 00000000000..8ac4193c4fc --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_rejectmap_mul_db.out @@ -0,0 +1,89 @@ +-- One db's rejectmap update should not impact on other db's rejectmap +CREATE DATABASE tjmu1; +CREATE DATABASE tjmu2; +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +\c tjmu1 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Trigger hard limit to dispatch rejectmap for tjmu1 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=87165) +-- FIXME: Pause to avoid tjmu1's worker clear the active table. Since there are bugs, this might be flaky. +SELECT diskquota.pause(); + pause +------- + +(1 row) + +-- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + count +------- + 1 +(1 row) + +\c tjmu2 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Trigger hard limit to dispatch rejectmap for tjmu2 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:6003 pid=4001721) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +--\c tjmu1 +-- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 +-- The entries for tjmu1 should not be cleared +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + count +------- + 2 +(1 row) + +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +\c tjmu1 +DROP EXTENSION diskquota; +\c tjmu2 +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE tjmu1; +DROP DATABASE tjmu2; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_relation_cache.out b/gpcontrib/diskquota/tests/regress/expected/test_relation_cache.out new file mode 100644 index 00000000000..5f0c3124066 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_relation_cache.out @@ -0,0 +1,127 @@ +-- init +CREATE OR REPLACE FUNCTION diskquota.check_relation_cache() +RETURNS boolean +as $$ +declare t1 oid[]; +declare t2 oid[]; +begin +t1 := (select array_agg(distinct((a).relid)) from diskquota.show_relation_cache_all_seg() as a where (a).relid != (a).primary_table_oid); +t2 := (select distinct((a).auxrel_oid) from diskquota.show_relation_cache_all_seg() as a where (a).relid = (a).primary_table_oid); +return t1 = t2; +end; +$$ LANGUAGE plpgsql; +-- heap table +begin; +create table t(i int) DISTRIBUTED BY (i); +insert into t select generate_series(1, 100000); +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 3 +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- toast table +begin; +create table t(t text) DISTRIBUTED BY (t); +insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 9 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- AO table +begin; +create table t(a int, b text) with(appendonly=true) DISTRIBUTED BY (a); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 18 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- AOCS table +begin; +create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 12 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +DROP FUNCTION diskquota.check_relation_cache(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_relation_size.out b/gpcontrib/diskquota/tests/regress/expected/test_relation_size.out new file mode 100644 index 00000000000..9931beeba12 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_relation_size.out @@ -0,0 +1,102 @@ +CREATE TEMP TABLE t1(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1'); + relation_size +--------------- + 688128 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 688128 +(1 row) + +CREATE TABLE t2(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2'); + relation_size +--------------- + 688128 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 688128 +(1 row) + +-- start_ignore +\! mkdir -p /tmp/test_spc +-- end_ignore +DROP TABLESPACE IF EXISTS test_spc; +NOTICE: tablespace "test_spc" does not exist, skipping +CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; +ALTER TABLE t1 SET TABLESPACE test_spc; +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1'); + relation_size +--------------- + 1081344 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 1081344 +(1 row) + +ALTER TABLE t2 SET TABLESPACE test_spc; +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2'); + relation_size +--------------- + 1081344 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 1081344 +(1 row) + +DROP TABLE t1, t2; +DROP TABLESPACE test_spc; +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO ao SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('ao'); + relation_size +--------------- + 100200 +(1 row) + +SELECT pg_relation_size('ao'); + pg_relation_size +------------------ + 100200 +(1 row) + +DROP TABLE ao; +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.relation_size('aocs'); + relation_size +--------------- + 10092696 +(1 row) + +SELECT pg_relation_size('aocs'); + pg_relation_size +------------------ + 10092696 +(1 row) + +DROP TABLE aocs; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_relkind.out b/gpcontrib/diskquota/tests/regress/expected/test_relkind.out new file mode 100644 index 00000000000..30cf8646171 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_relkind.out @@ -0,0 +1,42 @@ +CREATE DATABASE test_relkind; +\c test_relkind +CREATE TYPE test_type AS ( + "dbid" oid, + "datname" text +); +CREATE VIEW v AS select * from pg_class; +CREATE EXTENSION diskquota; +CREATE table test(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- diskquota.table_size should not change after creating a new type +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +---------+------+------- + test | 0 | -1 +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_relkind; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_rename.out b/gpcontrib/diskquota/tests/regress/expected/test_rename.out new file mode 100644 index 00000000000..ae96a1e797f --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_rename.out @@ -0,0 +1,74 @@ +-- test rename schema +CREATE SCHEMA srs1; +SELECT diskquota.set_schema_quota('srs1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to srs1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: srs1 +ALTER SCHEMA srs1 RENAME TO srs2; +SET search_path TO srs2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: srs2 +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: srs2 +DROP TABLE a2; +RESET search_path; +DROP SCHEMA srs2; +-- test rename role +CREATE SCHEMA srr1; +CREATE ROLE srerole NOLOGIN; +SELECT diskquota.set_role_quota('srerole', '1MB'); + set_role_quota +---------------- + +(1 row) + +SET search_path TO srr1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE a OWNER TO srerole; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name: srerole +ALTER ROLE srerole RENAME TO srerole2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name: srerole2 +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name: srerole2 +DROP TABLE a2; +DROP ROLE srerole2; +RESET search_path; +DROP SCHEMA srr1; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_reschema.out b/gpcontrib/diskquota/tests/regress/expected/test_reschema.out new file mode 100644 index 00000000000..5ede5fed2d9 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_reschema.out @@ -0,0 +1,41 @@ +-- Test re-set_schema_quota +CREATE SCHEMA srE; +SELECT diskquota.set_schema_quota('srE', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO srE; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail when exceed quota limit +INSERT INTO a SELECT generate_series(1,1000); +ERROR: schema's disk space quota exceeded with name: sre +-- set schema quota larger +SELECT diskquota.set_schema_quota('srE', '1 GB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,1000); +DROP TABLE a; +RESET search_path; +DROP SCHEMA srE; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_role.out b/gpcontrib/diskquota/tests/regress/expected/test_role.out new file mode 100644 index 00000000000..3f18ab804db --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_role.out @@ -0,0 +1,135 @@ +-- Test role quota +CREATE SCHEMA srole; +SET search_path TO srole; +CREATE ROLE u1 NOLOGIN; +CREATE ROLE u2 NOLOGIN; +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO u1; +CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b2 OWNER TO u1; +SELECT diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u1 +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u1 +-- Delete role quota +SELECT diskquota.set_role_quota('u1', '-1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Reset role quota +SELECT diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u1 +SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + u1 | 1 | 4194304 +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + b | 4063232 | -1 + b | 1343488 | 0 + b | 1343488 | 1 + b | 1343488 | 2 +(4 rows) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b2'::regclass +ORDER BY segid; + tableid | size | segid +---------+--------+------- + b2 | 131072 | -1 + b2 | 32768 | 0 + b2 | 32768 | 1 + b2 | 32768 | 2 +(4 rows) + +ALTER TABLE b OWNER TO u2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +-- superuser is blocked to set quota +--start_ignore +SELECT rolname from pg_roles where rolsuper=true; + rolname +--------- + sa +(1 row) + +--end_ignore +\gset +select diskquota.set_role_quota(:'rolname', '1mb'); +ERROR: Can not set disk quota for system owner: sa +select diskquota.set_role_quota(:'rolname', '-1mb'); + set_role_quota +---------------- + +(1 row) + +CREATE ROLE "Tn" NOLOGIN; +SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail +ERROR: role "tn" does not exist +SELECT diskquota.set_role_quota('"tn"', '-1 MB'); -- fail +ERROR: role "tn" does not exist +SELECT diskquota.set_role_quota('"Tn"', '-1 MB'); + set_role_quota +---------------- + +(1 row) + +DROP TABLE b, b2; +DROP ROLE u1, u2, "Tn"; +RESET search_path; +DROP SCHEMA srole; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_schema.out b/gpcontrib/diskquota/tests/regress/expected/test_schema.out new file mode 100644 index 00000000000..a85d161571b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_schema.out @@ -0,0 +1,108 @@ +-- Test schema +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +CREATE TABLE a2(i int) DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +-- Test alter table set schema +CREATE SCHEMA s2; +ALTER TABLE s1.a SET SCHEMA s2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO s2.a SELECT generate_series(1,200); +-- prepare a schema that has reached quota limit +CREATE SCHEMA badquota; +DROP ROLE IF EXISTS testbody; +NOTICE: role "testbody" does not exist, skipping +CREATE ROLE testbody; +CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.set_schema_quota('badquota', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT size, segid FROM diskquota.table_size + WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') + ORDER BY segid DESC; + size | segid +---------+------- + 1310720 | 2 + 1310720 | 1 + 1310720 | 0 + 3932160 | -1 +(4 rows) + +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); +ERROR: schema's disk space quota exceeded with name: badquota +ALTER TABLE s2.a SET SCHEMA badquota; +-- expect failed +INSERT INTO badquota.a SELECT generate_series(0, 100); +ERROR: schema's disk space quota exceeded with name: badquota +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; + schema_name | quota_in_mb +-------------+------------- + s1 | 1 +(1 row) + +CREATE SCHEMA "Tn1"; +SELECT diskquota.set_schema_quota('"Tn1"', '-1 MB'); + set_schema_quota +------------------ + +(1 row) + +RESET search_path; +DROP TABLE s1.a2, badquota.a; +DROP SCHEMA s1, s2, "Tn1"; +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_show_status.out b/gpcontrib/diskquota/tests/regress/expected/test_show_status.out new file mode 100644 index 00000000000..14c3e7de9fd --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_show_status.out @@ -0,0 +1,67 @@ +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | off +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | on +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | off +(2 rows) + +select from diskquota.pause(); +-- +(1 row) + +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | paused + hard limits | off +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | paused + hard limits | paused +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | paused + hard limits | off +(2 rows) + +select from diskquota.resume(); +-- +(1 row) + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | off +(2 rows) + diff --git a/gpcontrib/diskquota/tests/regress/expected/test_table_size.out b/gpcontrib/diskquota/tests/regress/expected/test_table_size.out new file mode 100644 index 00000000000..aa2e6442641 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_table_size.out @@ -0,0 +1,24 @@ +-- Test tablesize table +create table a(i text) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +insert into a select * from generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select pg_table_size('a') as table_size; + table_size +------------ + 819200 +(1 row) + +\gset +select :table_size = diskquota.table_size.size from diskquota.table_size where tableid = 'a'::regclass and segid=-1; + ?column? +---------- + t +(1 row) + diff --git a/gpcontrib/diskquota/tests/regress/expected/test_tablespace_diff_schema.out b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_diff_schema.out new file mode 100644 index 00000000000..93da486b836 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_diff_schema.out @@ -0,0 +1,87 @@ +-- allow set quota for different schema in the same tablespace +-- delete quota for one schema will not drop other quotas with different schema in the same tablespace +-- start_ignore +\! mkdir -p /tmp/spc_diff_schema +-- end_ignore +CREATE TABLESPACE spc_diff_schema LOCATION '/tmp/spc_diff_schema'; +CREATE SCHEMA schema_in_tablespc; +SET search_path TO schema_in_tablespc; +CREATE TABLE a(i int) TABLESPACE spc_diff_schema DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'spc_diff_schema','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- with hardlimits off, expect to success +INSERT INTO a SELECT generate_series(1,1000000); +-- wait for next loop for bgworker to add it to rejectmap +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); +ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema + schema_in_tablespc | pg_default +(2 rows) + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','-1'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema +(1 row) + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); +ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded +reset search_path; +DROP TABLE IF EXISTS schema_in_tablespc.a; +DROP tablespace IF EXISTS spc_diff_schema; +DROP SCHEMA IF EXISTS schema_in_tablespc; +-- start_ignore +\! rmdir /tmp/spc_diff_schema + -- end_ignore diff --git a/gpcontrib/diskquota/tests/regress/expected/test_tablespace_role.out b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_role.out new file mode 100644 index 00000000000..1d1d165c503 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_role.out @@ -0,0 +1,191 @@ +-- Test role quota +-- start_ignore +\! mkdir -p /tmp/rolespc +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc; +NOTICE: tablespace "rolespc" does not exist, skipping +CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; +CREATE SCHEMA rolespcrole; +SET search_path TO rolespcrole; +DROP ROLE IF EXISTS rolespcu1; +NOTICE: role "rolespcu1" does not exist, skipping +DROP ROLE IF EXISTS rolespcu2; +NOTICE: role "rolespcu2" does not exist, skipping +CREATE ROLE rolespcu1 NOLOGIN; +CREATE ROLE rolespcu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); +ALTER TABLE b2 OWNER TO rolespcu1; +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test show_fast_role_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + rolespcu1 | rolespc | 1 | 4194304 +(1 row) + +-- Test alter owner +ALTER TABLE b OWNER TO rolespcu2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/rolespc2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc2; +NOTICE: tablespace "rolespc2" does not exist, skipping +CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; +ALTER TABLE b SET TABLESPACE rolespc2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc +ALTER TABLE b SET TABLESPACE rolespc; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- superuser is blocked to set quota +-- start_ignore +SELECT rolname from pg_roles where rolsuper=true; + rolname +--------- + sa +(1 row) + +-- end_ignore +\gset +select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); +ERROR: Can not set disk quota for system owner: sa +-- start_ignore +\! mkdir -p /tmp/rolespc3 +-- end_ignore +DROP ROLE IF EXISTS "Rolespcu3"; +NOTICE: role "Rolespcu3" does not exist, skipping +CREATE ROLE "Rolespcu3" NOLOGIN; +DROP TABLESPACE IF EXISTS "Rolespc3"; +NOTICE: tablespace "Rolespc3" does not exist, skipping +CREATE TABLESPACE "Rolespc3" LOCATION '/tmp/rolespc3'; +SELECT diskquota.set_role_tablespace_quota('rolespcu1', '"Rolespc3"', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', 'rolespc', '-1 mB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', '"Rolespc3"', '-1 Mb'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +DROP TABLE b, b2; +DROP ROLE rolespcu1, rolespcu2; +RESET search_path; +DROP SCHEMA rolespcrole; +DROP TABLESPACE rolespc; +DROP TABLESPACE rolespc2; +DROP TABLESPACE "Rolespc3"; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_tablespace_role_perseg.out b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_role_perseg.out new file mode 100644 index 00000000000..eafbb92aef6 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_role_perseg.out @@ -0,0 +1,232 @@ +-- Test role quota +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg; +NOTICE: tablespace "rolespc_perseg" does not exist, skipping +CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; +CREATE SCHEMA rolespc_persegrole; +SET search_path TO rolespc_persegrole; +DROP ROLE IF EXISTS rolespc_persegu1; +NOTICE: role "rolespc_persegu1" does not exist, skipping +DROP ROLE IF EXISTS rolespc_persegu2; +NOTICE: role "rolespc_persegu2" does not exist, skipping +CREATE ROLE rolespc_persegu1 NOLOGIN; +CREATE ROLE rolespc_persegu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded +-- change tablespace role quota +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter owner +ALTER TABLE b OWNER TO rolespc_persegu2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg2; +NOTICE: tablespace "rolespc_perseg2" does not exist, skipping +CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; +ALTER TABLE b SET TABLESPACE rolespc_perseg2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc_perseg +ALTER TABLE b SET TABLESPACE rolespc_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Rolespc_perseg3"; +NOTICE: tablespace "Rolespc_perseg3" does not exist, skipping +CREATE TABLESPACE "Rolespc_perseg3" LOCATION '/tmp/rolespc_perseg3'; +CREATE ROLE "Rolespc_persegu3" NOLOGIN; +SELECT diskquota.set_role_tablespace_quota('"Rolespc_persegu3"', '"Rolespc_perseg3"', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_per_segment_quota('"Rolespc_perseg3"', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +DROP table b; +DROP ROLE rolespc_persegu1, rolespc_persegu2, "Rolespc_persegu3"; +RESET search_path; +DROP SCHEMA rolespc_persegrole; +DROP TABLESPACE rolespc_perseg; +DROP TABLESPACE rolespc_perseg2; +DROP TABLESPACE "Rolespc_perseg3"; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_tablespace_schema.out b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_schema.out new file mode 100644 index 00000000000..a7e57c594be --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_schema.out @@ -0,0 +1,147 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/schemaspc +-- end_ignore +CREATE SCHEMA spcs1; +DROP TABLESPACE IF EXISTS schemaspc; +NOTICE: tablespace "schemaspc" does not exist, skipping +CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; +SET search_path TO spcs1; +CREATE TABLE a(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +-- Test alter table set schema +CREATE SCHEMA spcs2; +ALTER TABLE spcs1.a SET SCHEMA spcs2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO spcs2.a SELECT generate_series(1,200); +ALTER TABLE spcs2.a SET SCHEMA spcs1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + spcs1 | schemaspc | 1 | 4030464 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/schemaspc2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc2; +NOTICE: tablespace "schemaspc2" does not exist, skipping +CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; +ALTER TABLE a SET TABLESPACE schemaspc2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/schemaspc3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc3"; +NOTICE: tablespace "Schemaspc3" does not exist, skipping +CREATE TABLESPACE "Schemaspc3" LOCATION '/tmp/schemaspc3'; +CREATE SCHEMA "Spcs2"; +SELECT diskquota.set_schema_tablespace_quota('"Spcs2"', '"Schemaspc3"', '-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +RESET search_path; +DROP TABLE spcs1.a2, spcs1.a; +DROP SCHEMA spcs1, spcs2; +DROP TABLESPACE schemaspc; +DROP TABLESPACE schemaspc2; +DROP TABLESPACE "Schemaspc3"; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_tablespace_schema_perseg.out b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_schema_perseg.out new file mode 100644 index 00000000000..c27f3e0ea9e --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_tablespace_schema_perseg.out @@ -0,0 +1,282 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/schemaspc_perseg +-- end_ignore +-- Test tablespace quota perseg +CREATE SCHEMA spcs1_perseg; +DROP TABLESPACE IF EXISTS schemaspc_perseg; +NOTICE: tablespace "schemaspc_perseg" does not exist, skipping +CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO spcs1_perseg; +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail by tablespace schema diskquota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded +-- change tablespace schema quota +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test alter table set schema +CREATE SCHEMA spcs2_perseg; +ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); +ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/schemaspc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc_perseg2"; +NOTICE: tablespace "Schemaspc_perseg2" does not exist, skipping +CREATE TABLESPACE "Schemaspc_perseg2" LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE "Schemaspc_perseg2"; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test delete tablespace schema quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- +(0 rows) + +-- test config per segment quota +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','1'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + segratio +---------- + 1 +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', '"Schemaspc_perseg2"','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + segratio +---------- + 1 +(1 row) + +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','-2'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + segratio +---------- +(0 rows) + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + segratio +---------- + 0 +(1 row) + +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','3'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + segratio +---------- + 3 +(1 row) + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + segratio +---------- + 3 +(1 row) + +SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('Schemaspc_perseg2', 'schemaspc_perseg'); + tablespace_name | per_seg_quota_ratio +-------------------+--------------------- + schemaspc_perseg | 2 + Schemaspc_perseg2 | 3 +(2 rows) + +RESET search_path; +DROP TABLE spcs1_perseg.a; +DROP SCHEMA spcs1_perseg; +DROP TABLESPACE schemaspc_perseg; +DROP TABLESPACE "Schemaspc_perseg2"; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_temp_role.out b/gpcontrib/diskquota/tests/regress/expected/test_temp_role.out new file mode 100644 index 00000000000..c29d67aa314 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_temp_role.out @@ -0,0 +1,41 @@ +-- Test temp table restrained by role id +CREATE SCHEMA strole; +CREATE ROLE u3temp NOLOGIN; +SET search_path TO strole; +SELECT diskquota.set_role_quota('u3temp', '1MB'); + set_role_quota +---------------- + +(1 row) + +CREATE TABLE a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE a OWNER TO u3temp; +CREATE TEMP TABLE ta(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE ta OWNER TO u3temp; +-- expected failed: fill temp table +INSERT INTO ta SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expected failed: +INSERT INTO a SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u3temp +DROP TABLE ta; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +DROP ROLE u3temp; +RESET search_path; +DROP SCHEMA strole; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_toast.out b/gpcontrib/diskquota/tests/regress/expected/test_toast.out new file mode 100644 index 00000000000..273f64b8582 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_toast.out @@ -0,0 +1,33 @@ +-- Test toast +CREATE SCHEMA s5; +SELECT diskquota.set_schema_quota('s5', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s5; +CREATE TABLE a5 (t text) DISTRIBUTED BY (t); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'message' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,10000)) +FROM generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert toast fail +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,1000)) +FROM generate_series(1,1000); +ERROR: schema's disk space quota exceeded with name: s5 +DROP TABLE a5; +RESET search_path; +DROP SCHEMA s5; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_truncate.out b/gpcontrib/diskquota/tests/regress/expected/test_truncate.out new file mode 100644 index 00000000000..b19df93214d --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_truncate.out @@ -0,0 +1,40 @@ +-- Test truncate +CREATE SCHEMA s7; +SELECT diskquota.set_schema_quota('s7', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s7; +CREATE TABLE a (i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE b (i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,30); +ERROR: schema's disk space quota exceeded with name: s7 +INSERT INTO b SELECT generate_series(1,30); +ERROR: schema's disk space quota exceeded with name: s7 +TRUNCATE TABLE a; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s7; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_uncommitted_table_size.out b/gpcontrib/diskquota/tests/regress/expected/test_uncommitted_table_size.out new file mode 100644 index 00000000000..5fe2b7e4da7 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_uncommitted_table_size.out @@ -0,0 +1,236 @@ +-- temp table +begin; +CREATE TEMP TABLE t1(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t1 SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + t1 | 3932160 | -1 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 3932160 +(1 row) + +commit; +DROP table t1; +-- heap table +begin; +CREATE TABLE t2(i int) DISTRIBUTED BY (i); +INSERT INTO t2 SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + t2 | 3932160 | -1 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 3932160 +(1 row) + +commit; +-- heap table index +begin; +CREATE INDEX idx2 on t2(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + idx2 | 2490368 | -1 +(1 row) + +SELECT pg_table_size('idx2'); + pg_table_size +--------------- + 2490368 +(1 row) + +commit; +DROP table t2; +-- toast table +begin; +CREATE TABLE t3(t text) DISTRIBUTED BY (t); +INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; + tableid | size | segid +---------+--------+------- + t3 | 393216 | -1 +(1 row) + +SELECT pg_table_size('t3'); + pg_table_size +--------------- + 393216 +(1 row) + +commit; +DROP table t3; +-- AO table +begin; +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); + ?column? +---------- + t +(1 row) + +commit; +-- AO table index +begin; +CREATE INDEX ao_idx on ao(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + ao_idx | 2490368 | -1 +(1 row) + +SELECT pg_table_size('ao_idx'); + pg_table_size +--------------- + 2490368 +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + ao | 1558696 | -1 +(1 row) + +SELECT pg_table_size('ao'); + pg_table_size +--------------- + 1558696 +(1 row) + +commit; +DROP TABLE ao; +-- AO table CTAS +begin; +CREATE TABLE ao (i) WITH(appendonly=true) AS SELECT generate_series(1, 10000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); + ?column? +---------- + t +(1 row) + +commit; +DROP TABLE ao; +-- AOCS table +begin; +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; + tableid | size | segid +---------+----------+------- + aocs | 10322072 | -1 +(1 row) + +SELECT pg_table_size('aocs'); + pg_table_size +--------------- + 10322072 +(1 row) + +commit; +-- AOCS table index +begin; +CREATE INDEX aocs_idx on aocs(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; + tableid | size | segid +----------+--------+------- + aocs_idx | 524288 | -1 +(1 row) + +SELECT pg_table_size('aocs_idx'); + pg_table_size +--------------- + 524288 +(1 row) + +commit; +DROP TABLE aocs; +-- AOCS table CTAS +begin; +CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; + tableid | size | segid +---------+--------+------- + aocs | 632864 | -1 +(1 row) + +SELECT pg_table_size('aocs'); + pg_table_size +--------------- + 632864 +(1 row) + +commit; +DROP TABLE aocs; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_update.out b/gpcontrib/diskquota/tests/regress/expected/test_update.out new file mode 100644 index 00000000000..e4ac6e3bad7 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_update.out @@ -0,0 +1,25 @@ +-- Test Update +CREATE SCHEMA s4; +SELECT diskquota.set_schema_quota('s4', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s4; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect update fail. +UPDATE a SET i = 100; +ERROR: schema's disk space quota exceeded with name: s4 +DROP TABLE a; +RESET search_path; +DROP SCHEMA s4; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_vacuum.out b/gpcontrib/diskquota/tests/regress/expected/test_vacuum.out new file mode 100644 index 00000000000..af6680e02b7 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_vacuum.out @@ -0,0 +1,61 @@ +-- Test vacuum full +CREATE SCHEMA s6; +SELECT diskquota.set_schema_quota('s6', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s6; +CREATE TABLE a (i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE b (i int) DISTRIBUTED BY (i); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: s6 +-- expect insert fail +INSERT INTO b SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: s6 +DELETE FROM a WHERE i > 10; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +VACUUM FULL a; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; + tableid | size | segid +---------+-------+------- + b | 0 | 2 + b | 0 | 1 + b | 0 | 0 + b | 0 | -1 + a | 32768 | 2 + a | 32768 | 1 + a | 32768 | 0 + a | 98304 | -1 +(8 rows) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,10); +INSERT INTO b SELECT generate_series(1,10); +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s6; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_worker_not_ready.out b/gpcontrib/diskquota/tests/regress/expected/test_worker_not_ready.out new file mode 100644 index 00000000000..8d61fb6255b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_worker_not_ready.out @@ -0,0 +1,26 @@ +CREATE DATABASE db_not_ready; +\c db_not_ready; +CREATE TABLE t (i int) DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +CREATE EXTENSION diskquota_test; +SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); +ERROR: Can not set disk quota for system owner: gpadmin +SELECT diskquota.pause(); + pause +------- + +(1 row) + +-- diskquota.wait_for_worker_new_epoch() cannot be used here because +-- diskquota.state is not clean. +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE db_not_ready; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule.out b/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule.out new file mode 100644 index 00000000000..89fe78bd02a --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule.out @@ -0,0 +1,650 @@ +-- start_ignore +\c +DROP DATABASE IF EXISTS t1; +NOTICE: database "t1" does not exist, skipping +DROP DATABASE IF EXISTS t2; +NOTICE: database "t2" does not exist, skipping +DROP DATABASE IF EXISTS t3; +NOTICE: database "t3" does not exist, skipping +DROP DATABASE IF EXISTS t4; +NOTICE: database "t4" does not exist, skipping +DROP DATABASE IF EXISTS t5; +NOTICE: database "t5" does not exist, skipping +DROP DATABASE IF EXISTS t6; +NOTICE: database "t6" does not exist, skipping +DROP DATABASE IF EXISTS t7; +NOTICE: database "t7" does not exist, skipping +DROP DATABASE IF EXISTS t8; +NOTICE: database "t8" does not exist, skipping +DROP DATABASE IF EXISTS t9; +NOTICE: database "t9" does not exist, skipping +DROP DATABASE IF EXISTS t10; +NOTICE: database "t10" does not exist, skipping +DROP DATABASE IF EXISTS t11; +NOTICE: database "t11" does not exist, skipping +DROP DATABASE IF EXISTS t12; +NOTICE: database "t12" does not exist, skipping +CREATE DATABASE t1; +CREATE DATABASE t2; +CREATE DATABASE t3; +CREATE DATABASE t4; +CREATE DATABASE t5; +CREATE DATABASE t6; +CREATE DATABASE t7; +CREATE DATABASE t8; +CREATE DATABASE t9; +CREATE DATABASE t10; +CREATE DATABASE t11; +CREATE DATABASE t12; +--end_ignore +\c t1 +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f1 | 98304 | -1 +(1 row) + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1; +20220719:17:37:46:030120 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1' +\! gpstop -arf; +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:37:48:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:37:51:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:37:51:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220719:17:37:51:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220719:17:37:55:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220719:17:37:55:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220719:17:37:56:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c +SHOW diskquota.max_workers; + diskquota.max_workers +----------------------- + 1 +(1 row) + +\c t2 +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f2 | 98304 | -1 +(1 row) + +\c t3 +CREATE EXTENSION diskquota; +CREATE TABLE f3(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f3 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f3 | 98304 | -1 +(1 row) + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 11; +20220727:14:23:23:025074 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 11' +\! gpstop -arf; +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:38:29:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:38:33:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:38:33:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220719:17:38:33:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220719:17:38:38:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c +SHOW diskquota.max_workers; + diskquota.max_workers +----------------------- + 11 +(1 row) + +\c t4 +CREATE EXTENSION diskquota; +CREATE TABLE f4(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f4 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f4 | 98304 | -1 +(1 row) + +\c t5 +CREATE EXTENSION diskquota; +CREATE TABLE f5(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f5 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f5 | 98304 | -1 +(1 row) + +\c t6 +CREATE EXTENSION diskquota; +CREATE TABLE f6(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f6 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f6 | 98304 | -1 +(1 row) + +\c t7 +CREATE EXTENSION diskquota; +CREATE TABLE f7(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f7 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f7 | 98304 | -1 +(1 row) + +\c t8 +CREATE EXTENSION diskquota; +CREATE TABLE f8(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f8 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f8'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f8 | 98304 | -1 +(1 row) + +\c t9 +CREATE EXTENSION diskquota; +CREATE TABLE f9(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f9 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f9'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f9 | 98304 | -1 +(1 row) + +\c t10 +CREATE EXTENSION diskquota; +CREATE TABLE f10(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f10 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f10'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f10 | 98304 | -1 +(1 row) + +\c t11 +CREATE EXTENSION diskquota; +CREATE TABLE f11(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f11 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f11'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f11 | 98304 | -1 +(1 row) + +\c t1 +INSERT into f1 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + f1 | 3997696 | -1 +(1 row) + +\c t7 +INSERT into f7 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + f7 | 3997696 | -1 +(1 row) + +\c t1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +DROP TABLE f1; +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f1 | 98304 | -1 +(1 row) + +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +DROP TABLE f2; +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f2 | 98304 | -1 +(1 row) + +\c t3 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t4 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t5 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t6 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t7 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t8 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t9 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t10 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t11 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t12 +CREATE EXTENSION diskquota; +CREATE TABLE f12(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f12 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f12'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f12 | 98304 | -1 +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +--start_ignore +\c contrib_regression +DROP DATABASE t1; +DROP DATABASE t2; +DROP DATABASE t3; +DROP DATABASE t4; +DROP DATABASE t5; +DROP DATABASE t6; +DROP DATABASE t7; +DROP DATABASE t8; +DROP DATABASE t9; +DROP DATABASE t10; +DROP DATABASE t11; +DROP DATABASE t12; +\! gpconfig -r diskquota.worker_timeout; +20220719:17:19:18:023651 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-r diskquota.worker_timeout' +\! gpconfig -r diskquota.naptime; +20220719:17:19:20:023738 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-r diskquota.naptime' +\! gpconfig -r diskquota.max_workers; +20220719:17:19:23:023824 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' +\! gpstop -arf; +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:19:24:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:19:28:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:19:28:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220719:17:19:28:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220719:17:19:33:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore diff --git a/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule_exception.out b/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule_exception.out new file mode 100644 index 00000000000..432e27f9943 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule_exception.out @@ -0,0 +1,123 @@ +-- start_ignore +\! gpconfig -c diskquota.max_workers -v 10; +20221209:16:01:17:089154 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 10' +\! gpconfig -c diskquota.naptime -v 4; +20221209:16:01:19:089255 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 4' +\! gpstop -arf; +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Starting gpstop with args: -arf +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Gathering information and validating the environment... +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Greenplum Master catalog information +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Segment details from master... +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.22.1+dev.36.gedf0e003f8 build dev' +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing Master instance shutdown with mode='fast' +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Master segment instance directory=/Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Attempting forceful termination of any leftover master process +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Terminating processes for segment /Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Stopping master standby host wxiaoranVKGWQ.vmware.com mode=fast +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown standby process on wxiaoranVKGWQ.vmware.com +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:18:21:25:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:18:21:25:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20221209:18:21:25:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments stopped successfully = 6 +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments with errors during stop = 0 +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown 6 of 6 segment instances +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Database successfully shutdown with no errors reported +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpmmon process +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpmmon process found +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpsmon processes +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover shared memory +20221209:18:21:27:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Restarting System... +\c +DROP DATABASE IF EXISTS t1; +NOTICE: database "t1" does not exist, skipping +DROP DATABASE IF EXISTS t2; +NOTICE: database "t2" does not exist, skipping +--end_ignore +CREATE DATABASE t1; +CREATE DATABASE t2; +\c t1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\! pgrep -f "[p]ostgres.*bgworker.*t1" | xargs kill; +\! sleep 0.5 ; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +2 +-- start_ignore +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep + 503 89701 89678 0 4:01PM ?? 0:00.17 postgres: 6000, bgworker: [diskquota] - launcher + 503 89743 89678 0 4:01PM ?? 0:00.03 postgres: 6000, bgworker: [diskquota] contrib_regression cmd1 +--end_ignore +\c contrib_regression +DROP DATABASE t1; +\c t2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE t2; +--start_ignore +\! gpconfig -r diskquota.naptime; +20221209:16:02:10:089976 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-r diskquota.naptime' +\! gpconfig -r diskquota.max_workers; +20221209:16:02:12:090078 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' +\! gpstop -arf; +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Starting gpstop with args: -arf +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Gathering information and validating the environment... +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Greenplum Master catalog information +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Segment details from master... +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.22.1+dev.36.gedf0e003f8 build dev' +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing Master instance shutdown with mode='fast' +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Master segment instance directory=/Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Attempting forceful termination of any leftover master process +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Terminating processes for segment /Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:16:02:13:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Stopping master standby host wxiaoranVKGWQ.vmware.com mode=fast +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown standby process on wxiaoranVKGWQ.vmware.com +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments stopped successfully = 6 +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments with errors during stop = 0 +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown 6 of 6 segment instances +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Database successfully shutdown with no errors reported +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpmmon process +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpmmon process found +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpsmon processes +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover shared memory +20221209:16:02:17:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Restarting System... +--end_ignore diff --git a/gpcontrib/diskquota/tests/regress/sql/config.sql b/gpcontrib/diskquota/tests/regress/sql/config.sql new file mode 100644 index 00000000000..d8f54870ae4 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/config.sql @@ -0,0 +1,22 @@ +--start_ignore +CREATE DATABASE diskquota; + +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); +\! gpconfig -c diskquota.naptime -v 0 --skipvalidation +\! gpconfig -c max_worker_processes -v 20 --skipvalidation +\! gpconfig -c diskquota.hard_limit -v "off" --skipvalidation +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +\! gpconfig -c plpython3.python_path -v "'$GPHOME/lib/python'" --skipvalidation; +\! gpconfig -c log_min_messages -v debug1 + +\! gpstop -raf +--end_ignore + +\c +-- Show the values of all GUC variables +-- start_ignore +SHOW diskquota.naptime; +-- end_ignore +SHOW diskquota.max_active_tables; +SHOW diskquota.worker_timeout; +SHOW diskquota.hard_limit; diff --git a/gpcontrib/diskquota/tests/regress/sql/dummy.sql b/gpcontrib/diskquota/tests/regress/sql/dummy.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/gpcontrib/diskquota/tests/regress/sql/reset_config.sql b/gpcontrib/diskquota/tests/regress/sql/reset_config.sql new file mode 100644 index 00000000000..7d0330fbcdf --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/reset_config.sql @@ -0,0 +1,6 @@ +--start_ignore +\! gpconfig -c diskquota.naptime -v 2 +\! gpstop -u +--end_ignore + +SHOW diskquota.naptime; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_activetable_limit.sql b/gpcontrib/diskquota/tests/regress/sql/test_activetable_limit.sql new file mode 100644 index 00000000000..9ab6666a0e0 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_activetable_limit.sql @@ -0,0 +1,53 @@ +-- table in 'diskquota not enabled database' should not be activetable +\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +\! gpstop -arf > /dev/null + +\c + +CREATE DATABASE test_tablenum_limit_01; +CREATE DATABASE test_tablenum_limit_02; + +\c test_tablenum_limit_01 + +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); + +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); + +\c test_tablenum_limit_02 +CREATE EXTENSION diskquota; +CREATE SCHEMA s; +SELECT diskquota.set_schema_quota('s', '1 MB'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -- activetable = 1 +INSERT INTO s.t1 SELECT generate_series(1, 100000); -- ok. diskquota soft limit does not check when first write + +SELECT diskquota.wait_for_worker_new_epoch(); + +CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -- activetable = 2 +INSERT INTO s.t2 SELECT generate_series(1, 10); -- expect failed +CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -- activetable = 3 should not crash. +INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed + +-- Q: why diskquota still works when activetable = 3? +-- A: the activetable limit by shmem size, calculate by hash_estimate_size() +-- the result will bigger than sizeof(DiskQuotaActiveTableEntry) * max_active_tables +-- the real capacity of this data structure based on the hash conflict probability. +-- so we can not predict when the data structure will be fill in fully. +-- +-- this test case is useless, remove this if anyone dislike it. +-- but the hash capacity is smaller than 6, so the test case works for issue 51 + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_tablenum_limit_01; +DROP DATABASE test_tablenum_limit_02; + +\! gpconfig -r diskquota.max_active_tables > /dev/null +\! gpstop -arf > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/sql/test_appendonly.sql b/gpcontrib/diskquota/tests/regress/sql/test_appendonly.sql new file mode 100644 index 00000000000..c1e996bc820 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_appendonly.sql @@ -0,0 +1,48 @@ +-- Create new schema for running tests. +CREATE SCHEMA s_appendonly; +SET search_path TO s_appendonly; + +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +-- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. +CREATE INDEX index_t ON t_ao(i); +CREATE INDEX index_t2 ON t_aoco(i); + +-- 1. Show that the relation's size in diskquota.table_size +-- is identical to the result of pg_table_size(). +INSERT INTO t_ao SELECT generate_series(1, 100); +INSERT INTO t_aoco SELECT generate_series(1, 100); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- Query the size of t_ao. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_ao') and segid=-1; + +SELECT pg_table_size('t_ao'); + +-- Query the size of t_aoco. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_aoco') and segid=-1; + +SELECT pg_table_size('t_aoco'); + +-- 2. Test that we are able to perform quota limit on appendonly tables. +SELECT diskquota.set_schema_quota('s_appendonly', '2 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect success. +INSERT INTO t_ao SELECT generate_series(1, 100000); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect fail. +INSERT INTO t_ao SELECT generate_series(1, 10); +INSERT INTO t_aoco SELECT generate_series(1, 10); + +DROP TABLE t_ao; +DROP TABLE t_aoco; + +SET search_path TO DEFAULT; +DROP SCHEMA s_appendonly; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_clean_rejectmap_after_drop.sql b/gpcontrib/diskquota/tests/regress/sql/test_clean_rejectmap_after_drop.sql new file mode 100644 index 00000000000..10a5f9618c0 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_clean_rejectmap_after_drop.sql @@ -0,0 +1,28 @@ +CREATE DATABASE test_clean_rejectmap_after_drop; + +\c test_clean_rejectmap_after_drop +CREATE EXTENSION diskquota; + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +CREATE ROLE r; +SELECT diskquota.set_role_quota('r', '1MB'); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO r; +SELECT diskquota.wait_for_worker_new_epoch(); + +INSERT INTO b SELECT generate_series(1, 100000000); -- fail + +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +INSERT INTO b SELECT generate_series(1, 100); -- ok + +\c contrib_regression +DROP DATABASE test_clean_rejectmap_after_drop; +DROP ROLE r; + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/sql/test_column.sql b/gpcontrib/diskquota/tests/regress/sql/test_column.sql new file mode 100644 index 00000000000..125940ed9da --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_column.sql @@ -0,0 +1,22 @@ +-- Test alter table add column +CREATE SCHEMA scolumn; +SELECT diskquota.set_schema_quota('scolumn', '1 MB'); +SET search_path TO scolumn; +SELECT diskquota.wait_for_worker_new_epoch(); + +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); +-- expect fail +INSERT INTO a2 SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect fail +INSERT INTO a2 SELECT generate_series(1,10); +ALTER TABLE a2 ADD COLUMN j VARCHAR(50); +UPDATE a2 SET j = 'add value for column j'; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert failed after add column +INSERT INTO a2 SELECT generate_series(1,10); + +DROP TABLE a2; +RESET search_path; +DROP SCHEMA scolumn; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_copy.sql b/gpcontrib/diskquota/tests/regress/sql/test_copy.sql new file mode 100644 index 00000000000..92003562370 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_copy.sql @@ -0,0 +1,18 @@ +-- Test copy +CREATE SCHEMA s3; +SELECT diskquota.set_schema_quota('s3', '1 MB'); +SET search_path TO s3; + +\! seq 100 > /tmp/csmall.txt + +CREATE TABLE c (i int) DISTRIBUTED BY (i); +COPY c FROM '/tmp/csmall.txt'; +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect copy fail +COPY c FROM '/tmp/csmall.txt'; + +DROP TABLE c; +RESET search_path; +DROP SCHEMA s3; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_create_extension.sql b/gpcontrib/diskquota/tests/regress/sql/test_create_extension.sql new file mode 100644 index 00000000000..dfbc96a373e --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_create_extension.sql @@ -0,0 +1,6 @@ +CREATE EXTENSION diskquota; + +SELECT diskquota.init_table_size_table(); + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); \ No newline at end of file diff --git a/gpcontrib/diskquota/tests/regress/sql/test_ctas_before_set_quota.sql b/gpcontrib/diskquota/tests/regress/sql/test_ctas_before_set_quota.sql new file mode 100644 index 00000000000..8e3cb08ab4f --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_ctas_before_set_quota.sql @@ -0,0 +1,32 @@ +CREATE ROLE test SUPERUSER; + +SET ROLE test; + +CREATE TABLE t_before_set_quota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_before_set_quota'::regclass ORDER BY segid; + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + +SELECT diskquota.set_role_quota(current_role, '1MB'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + +SELECT diskquota.set_role_quota(current_role, '-1'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +DROP TABLE t_before_set_quota; + +RESET ROLE; + +DROP ROLE test; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_ctas_no_preload_lib.sql b/gpcontrib/diskquota/tests/regress/sql/test_ctas_no_preload_lib.sql new file mode 100644 index 00000000000..9af257b905c --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_ctas_no_preload_lib.sql @@ -0,0 +1,51 @@ +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -far > /dev/null +\c + +CREATE ROLE test SUPERUSER; + +SET ROLE test; + +-- Create table with diskquota disabled +CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null +\! gpstop -far > /dev/null +\c + +SET ROLE test; + +-- Init table_size to include the table +SELECT diskquota.init_table_size_table(); + +-- Restart to load diskquota.table_size to the memory. +\! gpstop -far > /dev/null +\c +SET ROLE test; +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + +SELECT diskquota.set_role_quota(current_role, '1MB'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + +SELECT diskquota.set_role_quota(current_role, '-1'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + +DROP TABLE t_without_diskquota; + +RESET ROLE; + +DROP ROLE test; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_ctas_pause.sql b/gpcontrib/diskquota/tests/regress/sql/test_ctas_pause.sql new file mode 100644 index 00000000000..425344fbb77 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_ctas_pause.sql @@ -0,0 +1,21 @@ +CREATE SCHEMA hardlimit_s; +SET search_path TO hardlimit_s; + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail + +SELECT diskquota.pause(); + +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect succeed + +-- disable hardlimit and do some clean-ups. +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.resume(); + +DROP SCHEMA hardlimit_s CASCADE; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_ctas_role.sql b/gpcontrib/diskquota/tests/regress/sql/test_ctas_role.sql new file mode 100644 index 00000000000..93e1c628550 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_ctas_role.sql @@ -0,0 +1,42 @@ +-- Test that diskquota is able to cancel a running CTAS query by the role quota. +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +CREATE ROLE hardlimit_r; +SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; +SET ROLE hardlimit_r; + +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- temp table +CREATE TEMP TABLE t2 (i) AS SELECT generate_series(1, 100000000); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- toast table +CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +SELECT diskquota.wait_for_worker_new_epoch(); + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET ROLE; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; +DROP ROLE hardlimit_r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/sql/test_ctas_schema.sql b/gpcontrib/diskquota/tests/regress/sql/test_ctas_schema.sql new file mode 100644 index 00000000000..06b11592176 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_ctas_schema.sql @@ -0,0 +1,35 @@ +-- Test that diskquota is able to cancel a running CTAS query by the schema quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); +SET search_path TO hardlimit_s; + +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +SELECT diskquota.wait_for_worker_new_epoch(); + +-- disable hardlimit and do some clean-ups. +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET search_path; +DROP SCHEMA hardlimit_s; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_ctas_tablespace_role.sql b/gpcontrib/diskquota/tests/regress/sql/test_ctas_tablespace_role.sql new file mode 100644 index 00000000000..35a236fcdb3 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_ctas_tablespace_role.sql @@ -0,0 +1,48 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- start_ignore +\! mkdir -p /tmp/ctas_rolespc +-- end_ignore + +-- prepare role and tablespace. +DROP TABLESPACE IF EXISTS ctas_rolespc; +CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; +CREATE ROLE hardlimit_r; +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; +GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; +SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); +SET default_tablespace = ctas_rolespc; +SET ROLE hardlimit_r; + +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET ROLE; +RESET default_tablespace; +DROP TABLESPACE ctas_rolespc; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; +DROP ROLE hardlimit_r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/sql/test_ctas_tablespace_schema.sql b/gpcontrib/diskquota/tests/regress/sql/test_ctas_tablespace_schema.sql new file mode 100644 index 00000000000..b467566e1b2 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_ctas_tablespace_schema.sql @@ -0,0 +1,46 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +-- start_ignore +\! mkdir -p /tmp/ctas_schemaspc +-- end_ignore + +-- prepare tablespace and schema +DROP TABLESPACE IF EXISTS ctas_schemaspc; +CREATE TABLESPACE ctas_schemaspc LOCATION '/tmp/ctas_schemaspc'; +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_tablespace_quota('hardlimit_s', 'ctas_schemaspc', '1 MB'); +SET search_path TO hardlimit_s; +SET default_tablespace = ctas_schemaspc; + +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- disable hardlimit and do some clean-ups +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET search_path; +RESET default_tablespace; +DROP SCHEMA hardlimit_s; +DROP TABLESPACE ctas_schemaspc; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/sql/test_dbname_encoding.sql b/gpcontrib/diskquota/tests/regress/sql/test_dbname_encoding.sql new file mode 100644 index 00000000000..6ae65b3ea15 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_dbname_encoding.sql @@ -0,0 +1,23 @@ +-- create a database with non-ascii characters +CREATE DATABASE 数据库1; + +\c 数据库1 + +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +-- check whether current database name is logged. +SELECT + count(logpid) > 0 +FROM + gp_toolkit.__gp_log_master_ext +WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0; + +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE 数据库1; \ No newline at end of file diff --git a/gpcontrib/diskquota/tests/regress/sql/test_default_tablespace.sql b/gpcontrib/diskquota/tests/regress/sql/test_default_tablespace.sql new file mode 100644 index 00000000000..ede1e48180c --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_default_tablespace.sql @@ -0,0 +1,110 @@ +-- test role_tablespace_quota works with tables/databases in default tablespace +-- test role_tablespace_quota works with tables/databases in non-default tablespace with hard limits on + +-- start_ignore +\! mkdir -p /tmp/custom_tablespace +-- end_ignore + +DROP ROLE if EXISTS role1; +DROP ROLE if EXISTS role2; +CREATE ROLE role1 SUPERUSER; +CREATE ROLE role2 SUPERUSER; +SET ROLE role1; + +DROP TABLE if EXISTS t; +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to success +INSERT INTO t SELECT generate_series(1, 100); +INSERT INTO t SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to fail +INSERT INTO t SELECT generate_series(1, 1000000); + +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + +DROP TABLE IF EXISTS t; +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '-1'); + +SET ROLE role2; +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role2', 'pg_default', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to fail because of hard limits +INSERT INTO t SELECT generate_series(1, 50000000); +DROP TABLE IF EXISTS t; + +SET ROLE role1; +-- database in customized tablespace +CREATE TABLESPACE custom_tablespace LOCATION '/tmp/custom_tablespace'; +CREATE DATABASE db_with_tablespace TABLESPACE custom_tablespace; +\c db_with_tablespace; +SET ROLE role1; +CREATE EXTENSION diskquota; + +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to success +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to fail +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); + +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + +DROP TABLE IF EXISTS t_in_custom_tablespace; +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '-1'); +SELECT diskquota.wait_for_worker_new_epoch(); +SET ROLE role2; + +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role2', 'custom_tablespace', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +DROP TABLE IF EXISTS t_in_custom_tablespace; +-- expect insert to fail because of hard limits +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); + +-- clean up +DROP TABLE IF EXISTS t_in_custom_tablespace; + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION IF EXISTS diskquota; + +\c contrib_regression; +SELECT diskquota.wait_for_worker_new_epoch(); +DROP DATABASE IF EXISTS db_with_tablespace; +DROP TABLESPACE IF EXISTS custom_tablespace; + +RESET ROLE; +DROP ROLE IF EXISTS role1; +DROP ROLE IF EXISTS role2; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_delete_quota.sql b/gpcontrib/diskquota/tests/regress/sql/test_delete_quota.sql new file mode 100644 index 00000000000..3658b5ac314 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_delete_quota.sql @@ -0,0 +1,19 @@ +-- Test delete disk quota +CREATE SCHEMA deleteschema; +SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); +SET search_path TO deleteschema; + +CREATE TABLE c (i INT) DISTRIBUTED BY (i); +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect fail +INSERT INTO c SELECT generate_series(1,100); +SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +INSERT INTO c SELECT generate_series(1,100); + +DROP TABLE c; +RESET search_path; +DROP SCHEMA deleteschema; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_drop_after_pause.sql b/gpcontrib/diskquota/tests/regress/sql/test_drop_after_pause.sql new file mode 100644 index 00000000000..ec51a8ddc24 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_drop_after_pause.sql @@ -0,0 +1,31 @@ +CREATE DATABASE test_drop_after_pause; + +\c test_drop_after_pause + +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a SELECT generate_series(1,10000000); -- expect insert fail + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c contrib_regression + +DROP DATABASE test_drop_after_pause; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_drop_any_extension.sql b/gpcontrib/diskquota/tests/regress/sql/test_drop_any_extension.sql new file mode 100644 index 00000000000..91a95dc2fc9 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_drop_any_extension.sql @@ -0,0 +1,23 @@ +CREATE DATABASE test_drop_db; + +\c test_drop_db + +CREATE EXTENSION diskquota; +CREATE EXTENSION gp_inject_fault; +SELECT diskquota.init_table_size_table(); + +SELECT diskquota.set_schema_quota(current_schema, '1MB'); +CREATE TABLE t(i int); + +DROP EXTENSION gp_inject_fault; + +-- expect success +INSERT INTO t SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect fail +INSERT INTO t SELECT generate_series(1, 100000); + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_drop_db; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_drop_extension.sql b/gpcontrib/diskquota/tests/regress/sql/test_drop_extension.sql new file mode 100644 index 00000000000..09f5b11fa7a --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_drop_extension.sql @@ -0,0 +1,3 @@ +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_drop_table.sql b/gpcontrib/diskquota/tests/regress/sql/test_drop_table.sql new file mode 100644 index 00000000000..7c0cd86ec54 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_drop_table.sql @@ -0,0 +1,19 @@ +-- Test Drop table +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); +SET search_path TO sdrtbl; +CREATE TABLE a(i INT) DISTRIBUTED BY (i); +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +DROP TABLE a; +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO a2 SELECT generate_series(1,100); + +DROP TABLE a2; +RESET search_path; +DROP SCHEMA sdrtbl; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_extension.sql b/gpcontrib/diskquota/tests/regress/sql/test_extension.sql new file mode 100644 index 00000000000..7ba3c34c1b9 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_extension.sql @@ -0,0 +1,206 @@ +-- NOTE: when test this script, you must make sure that there is no diskquota +-- worker process. +CREATE DATABASE dbx0 ; +CREATE DATABASE dbx1 ; +CREATE DATABASE dbx2 ; +CREATE DATABASE dbx3 ; +CREATE DATABASE dbx4 ; +CREATE DATABASE dbx5 ; +CREATE DATABASE dbx6 ; +CREATE DATABASE dbx7 ; +CREATE DATABASE dbx8 ; +CREATE DATABASE dbx9 ; +CREATE DATABASE dbx10 ; + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 20 --skipvalidation +\! gpstop -arf +--end_ignore +\c +show max_worker_processes; +show diskquota.max_workers; + +SELECT diskquota.wait_for_worker_new_epoch(); + +\c dbx0 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx1 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +INSERT INTO SX.a values(generate_series(0, 100000)); +CREATE EXTENSION diskquota; +SELECT diskquota.init_table_size_table(); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT diskquota.set_schema_quota('SX', '1MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx3 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx4 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx5 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx6 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx7 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx8 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx9 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\c dbx10 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\c dbx0 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx3 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx4 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx5 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx6 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx7 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx8 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx9 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c dbx10 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c contrib_regression + +DROP DATABASE dbx0 ; +DROP DATABASE dbx1 ; +DROP DATABASE dbx2 ; +DROP DATABASE dbx3 ; +DROP DATABASE dbx4 ; +DROP DATABASE dbx5 ; +DROP DATABASE dbx6 ; +DROP DATABASE dbx7 ; +DROP DATABASE dbx8 ; +DROP DATABASE dbx9 ; +DROP DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +\! gpstop -arf; +--end_ignore +\c +show diskquota.max_workers; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_fast_disk_check.sql b/gpcontrib/diskquota/tests/regress/sql/test_fast_disk_check.sql new file mode 100644 index 00000000000..c15e1bfed4f --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_fast_disk_check.sql @@ -0,0 +1,12 @@ +-- Test SCHEMA +CREATE SCHEMA s1; +SET search_path to s1; + +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,200000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_fetch_table_stat.sql b/gpcontrib/diskquota/tests/regress/sql/test_fetch_table_stat.sql new file mode 100644 index 00000000000..0eabbdaf536 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_fetch_table_stat.sql @@ -0,0 +1,24 @@ +-- +-- 1. Test that when an error occurs in diskquota_fetch_table_stat +-- the error message is preserved for us to debug. +-- + +CREATE TABLE t_error_handling (i int) DISTRIBUTED BY (i); +-- Inject an error to a segment server, since this UDF is only called on segments. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Dispatch diskquota_fetch_table_stat to segments. +-- There should be a warning message from segment server saying: +-- fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' +-- We're not interested in the oid here, we aggregate the result by COUNT(*). +SELECT COUNT(*) + FROM (SELECT diskquota.diskquota_fetch_table_stat(1, array[(SELECT oid FROM pg_class WHERE relname='t_error_handling')]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0) AS count; + +-- Reset the fault injector to prevent future failure. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Do some clean-ups. +DROP TABLE t_error_handling; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_index.sql b/gpcontrib/diskquota/tests/regress/sql/test_index.sql new file mode 100644 index 00000000000..9aa3ef02fe9 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_index.sql @@ -0,0 +1,64 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/indexspc +-- end_ignore +CREATE SCHEMA indexschema1; +DROP TABLESPACE IF EXISTS indexspc; +CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; +SET search_path TO indexschema1; + +CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); +INSERT INTO test_index_a SELECT generate_series(1,20000); + +SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view +WHERE schema_name='indexschema1' and tablespace_name='indexspc'; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + +-- create index for the table, index in default tablespace +CREATE INDEX a_index ON test_index_a(i); +INSERT INTO test_index_a SELECT generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'a_index'::regclass +ORDER BY segid; + +-- add index to tablespace indexspc +ALTER index a_index SET TABLESPACE indexspc; +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); + +-- index tablespace quota exceeded +ALTER table test_index_a SET TABLESPACE pg_default; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +INSERT INTO test_index_a SELECT generate_series(1,200000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +RESET search_path; +DROP INDEX indexschema1.a_index; +DROP TABLE indexschema1.test_index_a; +DROP SCHEMA indexschema1; +DROP TABLESPACE indexspc; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_init_table_size_table.sql b/gpcontrib/diskquota/tests/regress/sql/test_init_table_size_table.sql new file mode 100644 index 00000000000..4c871889b92 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_init_table_size_table.sql @@ -0,0 +1,50 @@ +-- heap table +CREATE TABLE t(i int) DISTRIBUTED BY (i); +INSERT INTO t SELECT generate_series(1, 100000); + +-- heap table index +CREATE INDEX idx on t(i); + +-- toast table +CREATE TABLE toast(t text) DISTRIBUTED BY (t); +INSERT INTO toast SELECT repeat('a', 10000) FROM generate_series(1, 1000); + +-- toast table index +CREATE INDEX toast_idx on toast(t); + +-- AO table +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); + +-- AO table index +CREATE INDEX ao_idx on ao(i); + +-- AOCS table +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; + +-- AOCS table index +CREATE INDEX aocs_idx on aocs(i); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- Tables here are fetched by diskquota_fetch_table_stat() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + +-- init diskquota.table_size +SELECT diskquota.init_table_size_table(); + +-- diskquota.table_size should not change after init_table_size_table() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + + +DROP TABLE t; +DROP TABLE toast; +DROP TABLE ao; +DROP TABLE aocs; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_insert_after_drop.sql b/gpcontrib/diskquota/tests/regress/sql/test_insert_after_drop.sql new file mode 100644 index 00000000000..d744fd7c552 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_insert_after_drop.sql @@ -0,0 +1,19 @@ +CREATE DATABASE db_insert_after_drop; +\c db_insert_after_drop +CREATE EXTENSION diskquota; +-- Test Drop Extension +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); +SET search_path TO sdrtbl; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO a SELECT generate_series(1,100); +DROP EXTENSION diskquota; +INSERT INTO a SELECT generate_series(1,100); + +DROP TABLE a; +\c postgres +DROP DATABASE db_insert_after_drop; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_many_active_tables.sql b/gpcontrib/diskquota/tests/regress/sql/test_many_active_tables.sql new file mode 100644 index 00000000000..4c617cf6222 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_many_active_tables.sql @@ -0,0 +1,17 @@ +CREATE TABLE t1 (pk int, val int) +DISTRIBUTED BY (pk) +PARTITION BY RANGE (pk) (START (1) END (1000) EVERY (1)); + +INSERT INTO t1 +SELECT pk, val +FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; + +DROP TABLE t1; + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_max_monitored_databases.sql b/gpcontrib/diskquota/tests/regress/sql/test_max_monitored_databases.sql new file mode 100644 index 00000000000..f0e2e8c1aa9 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_max_monitored_databases.sql @@ -0,0 +1,48 @@ +--start_ignore +\! gpconfig -c diskquota.max_monitored_databases -v 3 +\! gpstop -ari +--end_ignore + +\c + +DROP DATABASE IF EXISTS test_db1; +DROP DATABASE IF EXISTS test_db2; +DROP DATABASE IF EXISTS test_db3; + +CREATE DATABASE test_db1; +CREATE DATABASE test_db2; +CREATE DATABASE test_db3; + +\c test_db1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\c test_db2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect fail +\c test_db3 +CREATE EXTENSION diskquota; + +-- clean extension +\c test_db1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c test_db2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +-- clean database +\c contrib_regression +DROP DATABASE test_db1; +DROP DATABASE test_db2; +DROP DATABASE test_db3; + +-- start_ignore +\! gpconfig -r diskquota.max_monitored_databases +\! gpstop -ari +-- end_ignore \ No newline at end of file diff --git a/gpcontrib/diskquota/tests/regress/sql/test_mistake.sql b/gpcontrib/diskquota/tests/regress/sql/test_mistake.sql new file mode 100644 index 00000000000..fd0e9d300b3 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_mistake.sql @@ -0,0 +1,24 @@ +-- to make sure that the schema 'notfoundns' is really not found +select nspname from pg_namespace where nspname = 'notfoundns'; +select diskquota.set_schema_quota('notfoundns', '1 MB'); + +DROP SCHEMA IF EXISTS nmistake; +CREATE SCHEMA nmistake; +select diskquota.set_schema_quota('nmistake', '0 MB'); + +DROP ROLE IF EXISTS rmistake; +CREATE ROLE rmistake; +select diskquota.set_role_quota('rmistake', '0 MB'); + +-- start_ignore +\! mkdir -p /tmp/spcmistake +-- end_ignore +DROP TABLESPACE IF EXISTS spcmistake; +CREATE TABLESPACE spcmistake LOCATION '/tmp/spcmistake'; +SELECT diskquota.set_schema_tablespace_quota('nmistake', 'spcmistake','0 MB'); +SELECT diskquota.set_role_tablespace_quota('rmistake', 'spcmistake','0 MB'); +SELECT diskquota.set_per_segment_quota('spcmistake', 0); + +DROP SCHEMA nmistake; +DROP ROLE rmistake; +DROP TABLESPACE spcmistake; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_partition.sql b/gpcontrib/diskquota/tests/regress/sql/test_partition.sql new file mode 100644 index 00000000000..2409f59854f --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_partition.sql @@ -0,0 +1,37 @@ +-- Test partition table +CREATE SCHEMA s8; +SELECT diskquota.SET_schema_quota('s8', '1 MB'); +SET search_path TO s8; +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +)PARTITION BY RANGE (logdate) +( + PARTITION Feb06 START (date '2006-02-01') INCLUSIVE, + PARTITION Mar06 START (date '2006-03-01') INCLUSIVE + END (date '2016-04-01') EXCLUSIVE +); + +INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +-- expect insert fail +INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +-- expect insert fail +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; +DELETE FROM measurement WHERE logdate='2006-03-02'; +SELECT diskquota.wait_for_worker_new_epoch(); +VACUUM FULL measurement; +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; + +DROP TABLE measurement; +RESET search_path; +DROP SCHEMA s8; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_pause_and_resume.sql b/gpcontrib/diskquota/tests/regress/sql/test_pause_and_resume.sql new file mode 100644 index 00000000000..b5ab0748491 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_pause_and_resume.sql @@ -0,0 +1,38 @@ +-- Test pause and resume. +CREATE SCHEMA s1; +SET search_path TO s1; + +CREATE TABLE a(i int) DISTRIBUTED BY (i); + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); + +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- pause extension +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); + +-- resume extension +SELECT diskquota.resume(); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- table size should be updated after resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_pause_and_resume_multiple_db.sql b/gpcontrib/diskquota/tests/regress/sql/test_pause_and_resume_multiple_db.sql new file mode 100644 index 00000000000..10ff08e3bb4 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_pause_and_resume_multiple_db.sql @@ -0,0 +1,83 @@ +-- need 'contrib_regression' as test database +\c + +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE DATABASE test_pause_and_resume; +CREATE DATABASE test_new_create_database; + +\c test_pause_and_resume +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\c contrib_regression +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed + +\c test_pause_and_resume +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed + +\c contrib_regression +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c test_pause_and_resume +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c contrib_regression +SELECT diskquota.pause(); -- pause extension, onle effect current database +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed + +\c test_pause_and_resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +SELECT diskquota.pause(); -- pause extension, onle effect current database +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed + +\c test_new_create_database; +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active although other database is paused +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert fail +SELECT diskquota.pause(); -- pause extension, onle effect current database +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed + +-- resume should onle effect current database +SELECT diskquota.resume(); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c contrib_regression +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +SELECT diskquota.resume(); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c test_pause_and_resume +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c test_new_create_database +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c contrib_regression +DROP SCHEMA s1 CASCADE; +DROP DATABASE test_pause_and_resume; +DROP DATABASE test_new_create_database; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_primary_failure.in.sql b/gpcontrib/diskquota/tests/regress/sql/test_primary_failure.in.sql new file mode 100644 index 00000000000..2dd2689b6e7 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_primary_failure.in.sql @@ -0,0 +1,88 @@ +CREATE SCHEMA ftsr; +SELECT diskquota.set_schema_quota('ftsr', '1 MB'); +SET search_path TO ftsr; +create or replace language @PLPYTHON_LANG_STR@; +-- +-- pg_ctl: +-- datadir: data directory of process to target with `pg_ctl` +-- command: commands valid for `pg_ctl` +-- command_mode: modes valid for `pg_ctl -m` +-- +create or replace function pg_ctl(datadir text, command text, command_mode text default 'immediate') +returns text as $$ + import subprocess + if command not in ('stop', 'restart'): + return 'Invalid command input' + + cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir + cmd = cmd + '-W -m %s %s' % (command_mode, command) + if '@PLPYTHON_LANG_STR@' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') + +$$ language @PLPYTHON_LANG_STR@; + +create or replace function pg_recoverseg(datadir text, command text) +returns text as $$ + import subprocess + cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) + if '@PLPYTHON_LANG_STR@' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') +$$ language @PLPYTHON_LANG_STR@; + +CREATE TABLE a(i int, j int) DISTRIBUTED BY (i); +-- the entries will be inserted into seg0 +INSERT INTO a SELECT 2, generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + +-- expect insert fail +INSERT INTO a SELECT 2, generate_series(1,100); + +-- now one of primary is down +select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); + +-- switch mirror to primary +select gp_request_fts_probe_scan(); + +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + +-- expect insert fail +INSERT INTO a SELECT 2, generate_series(1,100); + +-- increase quota +SELECT diskquota.set_schema_quota('ftsr', '200 MB'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect insert success +INSERT INTO a SELECT 2, generate_series(1,10000); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- check whether monitored_dbid_cache is refreshed in mirror +-- diskquota.table_size should be updated +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + +-- pull up failed primary +-- start_ignore +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; +-- end_ignore + +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; +INSERT INTO a SELECT 2, generate_series(1,100); + +DROP TABLE a; +DROP SCHEMA ftsr CASCADE; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_quota_view_no_table.sql b/gpcontrib/diskquota/tests/regress/sql/test_quota_view_no_table.sql new file mode 100644 index 00000000000..11c0398bb36 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_quota_view_no_table.sql @@ -0,0 +1,31 @@ +CREATE ROLE no_table SUPERUSER; + +CREATE SCHEMA no_table; + +SELECT diskquota.set_schema_quota('no_table', '1 MB'); + +SELECT schema_name, quota_in_mb, nspsize_in_bytes +FROM diskquota.show_fast_schema_quota_view; + +SELECT diskquota.set_role_quota('no_table', '1 MB'); + +SELECT role_name, quota_in_mb, rolsize_in_bytes +FROM diskquota.show_fast_role_quota_view; + +SELECT diskquota.set_schema_tablespace_quota('no_table', 'pg_default', '1 MB'); + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view; + +SELECT diskquota.set_role_tablespace_quota('no_table', 'pg_default', '1 MB'); + +SELECT role_name, tablespace_name , quota_in_mb, rolsize_tablespace_in_bytes +FROM diskquota.show_fast_role_tablespace_quota_view; + +DROP ROLE no_table; + +DROP SCHEMA no_table; + +-- Wait until the quota configs are removed from the memory +-- automatically after DROP. +SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/sql/test_readiness_logged.sql b/gpcontrib/diskquota/tests/regress/sql/test_readiness_logged.sql new file mode 100644 index 00000000000..562733270aa --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_readiness_logged.sql @@ -0,0 +1,46 @@ +CREATE DATABASE test_readiness_logged; +\c test_readiness_logged + +-- Get bgworker's log by database name. +-- 1. select bgworker pid by database name. +-- 2. select logmessage by bgworker pid. +CREATE VIEW logmessage_count_view AS WITH logp AS( + SELECT + MAX(logpid) as max_logpid + FROM + gp_toolkit.__gp_log_master_ext + WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0 +) +SELECT + count(*) +FROM + gp_toolkit.__gp_log_master_ext, + logp +WHERE + logmessage = '[diskquota] diskquota is not ready' + and logpid = max_logpid; + +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +CREATE EXTENSION diskquota; +CREATE EXTENSION diskquota_test; +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; + +\! gpstop -raf > /dev/null +\c +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_readiness_logged; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_recreate.sql b/gpcontrib/diskquota/tests/regress/sql/test_recreate.sql new file mode 100644 index 00000000000..2e29656cef0 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_recreate.sql @@ -0,0 +1,17 @@ +\c + +CREATE DATABASE test_recreate; + +\c diskquota + +INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; + +\c test_recreate +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- shoud be ok +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_recreate; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_rejectmap.sql b/gpcontrib/diskquota/tests/regress/sql/test_rejectmap.sql new file mode 100644 index 00000000000..9cdb6f772ee --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_rejectmap.sql @@ -0,0 +1,218 @@ +-- +-- This file contains tests for dispatching and quering rejectmap. +-- + +CREATE SCHEMA s_rejectmap; +SET search_path TO s_rejectmap; + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) + RETURNS text AS $$ + BEGIN + RETURN COALESCE( + REGEXP_REPLACE(given_name, + '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', + '\1' || + (SELECT relname FROM pg_class + WHERE oid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); + END; +$$ LANGUAGE plpgsql; + +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) + RETURNS oid AS +$$ +BEGIN + CASE + WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; + ELSE RETURN ( + CASE tablespaceoid + WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) + ELSE + tablespaceoid + END + ); + END CASE; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) + RETURNS void AS $$ + DECLARE + bt int; + targetoid oid; + tablespaceoid oid; + BEGIN + SELECT reltablespace INTO tablespaceoid FROM pg_class WHERE relname=rel::text; + CASE block_type + WHEN 'NAMESPACE' THEN + bt = 0; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE' THEN + bt = 1; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'NAMESPACE_TABLESPACE' THEN + bt = 2; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE_TABLESPACE' THEN + bt = 3; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + END CASE; + PERFORM diskquota.refresh_rejectmap( + ARRAY[ + ROW(targetoid, + (SELECT oid FROM pg_database WHERE datname=current_database()), + (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), + bt, + false) + ]::diskquota.rejectmap_entry[], + ARRAY[rel]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + END; $$ +LANGUAGE 'plpgsql'; + +-- +-- 1. Create an ordinary table and add its oid to rejectmap on seg0. +-- Check that it's relfilenode is blocked on seg0 by various conditions. +-- +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + +-- Create a tablespace to test the rest of blocking types. +\! mkdir -p /tmp/blocked_space +CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; +ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relowner) AS owner_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + +-- +-- 2. Test that the relfilenodes of toast relation together with its +-- index are blocked on seg0. +-- +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +-- Insert an entry for blocked_t2 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t2 together with its toast relation and toast +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + +-- +-- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE INDEX blocked_t3_index ON blocked_t3(i); +-- Insert an entry for blocked_t3 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t3 together with its appendonly relation and appendonly +-- index relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + +-- +-- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE INDEX blocked_t4_index ON blocked_t4(i); +-- Insert an entry for blocked_t4 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t4 together with its appendonly relation and appendonly +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + +-- +-- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE INDEX blocked_t5_index ON blocked_t5(i); +-- Insert an entry for blocked_t5 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t5 together with its toast relation, toast +-- index relation and appendonly relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + +-- Do some clean-ups. +DROP FUNCTION replace_oid_with_relname(text); +DROP FUNCTION block_relation_on_seg0(regclass, text); +DROP FUNCTION get_real_tablespace_oid(text, oid); +DROP TABLE blocked_t1; +DROP TABLE blocked_t2; +DROP TABLE blocked_t3; +DROP TABLE blocked_t4; +DROP TABLE blocked_t5; +DROP TABLESPACE blocked_space; +SET search_path TO DEFAULT; +DROP SCHEMA s_rejectmap; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_rejectmap_mul_db.sql b/gpcontrib/diskquota/tests/regress/sql/test_rejectmap_mul_db.sql new file mode 100644 index 00000000000..e59647f3428 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_rejectmap_mul_db.sql @@ -0,0 +1,53 @@ +-- One db's rejectmap update should not impact on other db's rejectmap +CREATE DATABASE tjmu1; +CREATE DATABASE tjmu2; + +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +-- increase the naptime to avoid active table gets cleared by tjmu1's worker +\! gpconfig -c "diskquota.naptime" -v 1 > /dev/null +\! gpstop -u > /dev/null +-- end_ignore + +\c tjmu1 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); +-- Trigger hard limit to dispatch rejectmap for tjmu1 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +-- FIXME: Pause to avoid tjmu1's worker clear the active table. Since there are bugs, this might be flaky. +SELECT diskquota.pause(); +-- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + +\c tjmu2 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); +-- Trigger hard limit to dispatch rejectmap for tjmu2 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT diskquota.pause(); + +--\c tjmu1 +-- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 +-- The entries for tjmu1 should not be cleared +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpconfig -c "diskquota.naptime" -v 0 > /dev/null +\! gpstop -u > /dev/null +-- end_ignore + +\c tjmu1 +DROP EXTENSION diskquota; +\c tjmu2 +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE tjmu1; +DROP DATABASE tjmu2; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_relation_cache.sql b/gpcontrib/diskquota/tests/regress/sql/test_relation_cache.sql new file mode 100644 index 00000000000..d0e986e9395 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_relation_cache.sql @@ -0,0 +1,67 @@ +-- init +CREATE OR REPLACE FUNCTION diskquota.check_relation_cache() +RETURNS boolean +as $$ +declare t1 oid[]; +declare t2 oid[]; +begin +t1 := (select array_agg(distinct((a).relid)) from diskquota.show_relation_cache_all_seg() as a where (a).relid != (a).primary_table_oid); +t2 := (select distinct((a).auxrel_oid) from diskquota.show_relation_cache_all_seg() as a where (a).relid = (a).primary_table_oid); +return t1 = t2; +end; +$$ LANGUAGE plpgsql; + +-- heap table +begin; +create table t(i int) DISTRIBUTED BY (i); +insert into t select generate_series(1, 100000); + +select count(*) from diskquota.show_relation_cache_all_seg(); +commit; + +select diskquota.wait_for_worker_new_epoch(); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +-- toast table +begin; +create table t(t text) DISTRIBUTED BY (t); +insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); + +select count(*) from diskquota.show_relation_cache_all_seg(); + +select diskquota.check_relation_cache(); +commit; + +select diskquota.wait_for_worker_new_epoch(); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +-- AO table +begin; +create table t(a int, b text) with(appendonly=true) DISTRIBUTED BY (a); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; + +select count(*) from diskquota.show_relation_cache_all_seg(); + +select diskquota.check_relation_cache(); +commit; + +select diskquota.wait_for_worker_new_epoch(); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +-- AOCS table +begin; +create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; +select count(*) from diskquota.show_relation_cache_all_seg(); + +select diskquota.check_relation_cache(); +commit; + +select diskquota.wait_for_worker_new_epoch(); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +DROP FUNCTION diskquota.check_relation_cache(); diff --git a/gpcontrib/diskquota/tests/regress/sql/test_relation_size.sql b/gpcontrib/diskquota/tests/regress/sql/test_relation_size.sql new file mode 100644 index 00000000000..b783ec24227 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_relation_size.sql @@ -0,0 +1,43 @@ +CREATE TEMP TABLE t1(i int); +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1'); +SELECT pg_table_size('t1'); + +CREATE TABLE t2(i int) DISTRIBUTED BY (i); +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2'); +SELECT pg_table_size('t2'); + +-- start_ignore +\! mkdir -p /tmp/test_spc +-- end_ignore +DROP TABLESPACE IF EXISTS test_spc; +CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; + +ALTER TABLE t1 SET TABLESPACE test_spc; +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1'); +SELECT pg_table_size('t1'); + +ALTER TABLE t2 SET TABLESPACE test_spc; +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2'); +SELECT pg_table_size('t2'); + +DROP TABLE t1, t2; +DROP TABLESPACE test_spc; +-- start_ignore +\! rm -rf /tmp/test_spc + -- end_ignore + +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('ao'); +SELECT pg_relation_size('ao'); +DROP TABLE ao; + +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.relation_size('aocs'); +SELECT pg_relation_size('aocs'); +DROP TABLE aocs; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_relkind.sql b/gpcontrib/diskquota/tests/regress/sql/test_relkind.sql new file mode 100644 index 00000000000..2764a55f4cc --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_relkind.sql @@ -0,0 +1,21 @@ +CREATE DATABASE test_relkind; +\c test_relkind +CREATE TYPE test_type AS ( + "dbid" oid, + "datname" text +); +CREATE VIEW v AS select * from pg_class; +CREATE EXTENSION diskquota; +CREATE table test(a int); +SELECT diskquota.init_table_size_table(); +-- diskquota.table_size should not change after creating a new type +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_relkind; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_rename.sql b/gpcontrib/diskquota/tests/regress/sql/test_rename.sql new file mode 100644 index 00000000000..d6440c621eb --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_rename.sql @@ -0,0 +1,50 @@ +-- test rename schema +CREATE SCHEMA srs1; +SELECT diskquota.set_schema_quota('srs1', '1 MB'); +set search_path to srs1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ALTER SCHEMA srs1 RENAME TO srs2; +SET search_path TO srs2; + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); + +DROP TABLE a2; +RESET search_path; +DROP SCHEMA srs2; + +-- test rename role +CREATE SCHEMA srr1; +CREATE ROLE srerole NOLOGIN; +SELECT diskquota.set_role_quota('srerole', '1MB'); +SET search_path TO srr1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +ALTER TABLE a OWNER TO srerole; + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ALTER ROLE srerole RENAME TO srerole2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); + +DROP TABLE a2; +DROP ROLE srerole2; +RESET search_path; +DROP SCHEMA srr1; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_reschema.sql b/gpcontrib/diskquota/tests/regress/sql/test_reschema.sql new file mode 100644 index 00000000000..feb61a05d1f --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_reschema.sql @@ -0,0 +1,20 @@ +-- Test re-set_schema_quota +CREATE SCHEMA srE; +SELECT diskquota.set_schema_quota('srE', '1 MB'); +SET search_path TO srE; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail when exceed quota limit +INSERT INTO a SELECT generate_series(1,1000); +-- set schema quota larger +SELECT diskquota.set_schema_quota('srE', '1 GB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,1000); + +DROP TABLE a; +RESET search_path; +DROP SCHEMA srE; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_role.sql b/gpcontrib/diskquota/tests/regress/sql/test_role.sql new file mode 100644 index 00000000000..6472c4c5e7b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_role.sql @@ -0,0 +1,69 @@ +-- Test role quota + +CREATE SCHEMA srole; +SET search_path TO srole; + +CREATE ROLE u1 NOLOGIN; +CREATE ROLE u2 NOLOGIN; +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO u1; +CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b2 OWNER TO u1; + +SELECT diskquota.set_role_quota('u1', '1 MB'); + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +-- Delete role quota +SELECT diskquota.set_role_quota('u1', '-1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Reset role quota +SELECT diskquota.set_role_quota('u1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b'::regclass +ORDER BY segid; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b2'::regclass +ORDER BY segid; + + +ALTER TABLE b OWNER TO u2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); + +-- superuser is blocked to set quota +--start_ignore +SELECT rolname from pg_roles where rolsuper=true; +--end_ignore +\gset +select diskquota.set_role_quota(:'rolname', '1mb'); +select diskquota.set_role_quota(:'rolname', '-1mb'); + +CREATE ROLE "Tn" NOLOGIN; +SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail +SELECT diskquota.set_role_quota('"tn"', '-1 MB'); -- fail +SELECT diskquota.set_role_quota('"Tn"', '-1 MB'); + +DROP TABLE b, b2; +DROP ROLE u1, u2, "Tn"; +RESET search_path; +DROP SCHEMA srole; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_schema.sql b/gpcontrib/diskquota/tests/regress/sql/test_schema.sql new file mode 100644 index 00000000000..3478a8d84b9 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_schema.sql @@ -0,0 +1,59 @@ +-- Test schema +CREATE SCHEMA s1; +SET search_path TO s1; + +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); + +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +CREATE TABLE a2(i int) DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA s2; +ALTER TABLE s1.a SET SCHEMA s2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO s2.a SELECT generate_series(1,200); + +-- prepare a schema that has reached quota limit +CREATE SCHEMA badquota; +DROP ROLE IF EXISTS testbody; +CREATE ROLE testbody; +CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT diskquota.init_table_size_table(); +SELECT diskquota.set_schema_quota('badquota', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT size, segid FROM diskquota.table_size + WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') + ORDER BY segid DESC; +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); + +ALTER TABLE s2.a SET SCHEMA badquota; +-- expect failed +INSERT INTO badquota.a SELECT generate_series(0, 100); + +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; + +CREATE SCHEMA "Tn1"; +SELECT diskquota.set_schema_quota('"Tn1"', '-1 MB'); + +RESET search_path; +DROP TABLE s1.a2, badquota.a; +DROP SCHEMA s1, s2, "Tn1"; + +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_show_status.sql b/gpcontrib/diskquota/tests/regress/sql/test_show_status.sql new file mode 100644 index 00000000000..64fa4ebd270 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_show_status.sql @@ -0,0 +1,25 @@ +select * from diskquota.status() where name not like '%version'; + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + +select from diskquota.pause(); +select * from diskquota.status() where name not like '%version'; + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + +select from diskquota.resume(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_table_size.sql b/gpcontrib/diskquota/tests/regress/sql/test_table_size.sql new file mode 100644 index 00000000000..334ecc2e8e5 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_table_size.sql @@ -0,0 +1,11 @@ +-- Test tablesize table + +create table a(i text) DISTRIBUTED BY (i); + +insert into a select * from generate_series(1,10000); + +SELECT diskquota.wait_for_worker_new_epoch(); +select pg_table_size('a') as table_size; +\gset +select :table_size = diskquota.table_size.size from diskquota.table_size where tableid = 'a'::regclass and segid=-1; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_tablespace_diff_schema.sql b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_diff_schema.sql new file mode 100644 index 00000000000..fadfb0d6f79 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_diff_schema.sql @@ -0,0 +1,45 @@ +-- allow set quota for different schema in the same tablespace +-- delete quota for one schema will not drop other quotas with different schema in the same tablespace + +-- start_ignore +\! mkdir -p /tmp/spc_diff_schema +-- end_ignore + +CREATE TABLESPACE spc_diff_schema LOCATION '/tmp/spc_diff_schema'; +CREATE SCHEMA schema_in_tablespc; +SET search_path TO schema_in_tablespc; + +CREATE TABLE a(i int) TABLESPACE spc_diff_schema DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'spc_diff_schema','1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- with hardlimits off, expect to success +INSERT INTO a SELECT generate_series(1,1000000); + +-- wait for next loop for bgworker to add it to rejectmap +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','-1'); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); + +reset search_path; +DROP TABLE IF EXISTS schema_in_tablespc.a; +DROP tablespace IF EXISTS spc_diff_schema; +DROP SCHEMA IF EXISTS schema_in_tablespc; + +-- start_ignore +\! rmdir /tmp/spc_diff_schema + -- end_ignore diff --git a/gpcontrib/diskquota/tests/regress/sql/test_tablespace_role.sql b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_role.sql new file mode 100644 index 00000000000..a1a524b638b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_role.sql @@ -0,0 +1,104 @@ +-- Test role quota +-- start_ignore +\! mkdir -p /tmp/rolespc +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc; +CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; +CREATE SCHEMA rolespcrole; +SET search_path TO rolespcrole; + +DROP ROLE IF EXISTS rolespcu1; +DROP ROLE IF EXISTS rolespcu2; +CREATE ROLE rolespcu1 NOLOGIN; +CREATE ROLE rolespcu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); +ALTER TABLE b2 OWNER TO rolespcu1; + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); + +-- Test show_fast_role_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; + +-- Test alter owner +ALTER TABLE b OWNER TO rolespcu2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/rolespc2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc2; +CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; +ALTER TABLE b SET TABLESPACE rolespc2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc +ALTER TABLE b SET TABLESPACE rolespc; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test update quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,1000000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +-- superuser is blocked to set quota +-- start_ignore +SELECT rolname from pg_roles where rolsuper=true; +-- end_ignore +\gset +select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); + +-- start_ignore +\! mkdir -p /tmp/rolespc3 +-- end_ignore +DROP ROLE IF EXISTS "Rolespcu3"; +CREATE ROLE "Rolespcu3" NOLOGIN; +DROP TABLESPACE IF EXISTS "Rolespc3"; +CREATE TABLESPACE "Rolespc3" LOCATION '/tmp/rolespc3'; +SELECT diskquota.set_role_tablespace_quota('rolespcu1', '"Rolespc3"', '-1 MB'); +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', 'rolespc', '-1 mB'); +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', '"Rolespc3"', '-1 Mb'); + +DROP TABLE b, b2; +DROP ROLE rolespcu1, rolespcu2; +RESET search_path; +DROP SCHEMA rolespcrole; +DROP TABLESPACE rolespc; +DROP TABLESPACE rolespc2; +DROP TABLESPACE "Rolespc3"; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_tablespace_role_perseg.sql b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_role_perseg.sql new file mode 100644 index 00000000000..4a71e1d2614 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_role_perseg.sql @@ -0,0 +1,108 @@ +-- Test role quota +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg; +CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; +CREATE SCHEMA rolespc_persegrole; +SET search_path TO rolespc_persegrole; + +DROP ROLE IF EXISTS rolespc_persegu1; +DROP ROLE IF EXISTS rolespc_persegu2; +CREATE ROLE rolespc_persegu1 NOLOGIN; +CREATE ROLE rolespc_persegu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO rolespc_persegu1; + +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- change tablespace role quota +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); +SELECT diskquota.wait_for_worker_new_epoch(); +---- expect insert fail by tablespace schema perseg quota +INSERT INTO b SELECT generate_series(1,100); +-- Test alter owner +ALTER TABLE b OWNER TO rolespc_persegu2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg2; +CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; +ALTER TABLE b SET TABLESPACE rolespc_perseg2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc_perseg +ALTER TABLE b SET TABLESPACE rolespc_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Rolespc_perseg3"; +CREATE TABLESPACE "Rolespc_perseg3" LOCATION '/tmp/rolespc_perseg3'; +CREATE ROLE "Rolespc_persegu3" NOLOGIN; +SELECT diskquota.set_role_tablespace_quota('"Rolespc_persegu3"', '"Rolespc_perseg3"', '-1 MB'); +SELECT diskquota.set_per_segment_quota('"Rolespc_perseg3"', 0.11); + +DROP table b; +DROP ROLE rolespc_persegu1, rolespc_persegu2, "Rolespc_persegu3"; +RESET search_path; +DROP SCHEMA rolespc_persegrole; +DROP TABLESPACE rolespc_perseg; +DROP TABLESPACE rolespc_perseg2; +DROP TABLESPACE "Rolespc_perseg3"; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_tablespace_schema.sql b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_schema.sql new file mode 100644 index 00000000000..b9281da965a --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_schema.sql @@ -0,0 +1,82 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/schemaspc +-- end_ignore +CREATE SCHEMA spcs1; +DROP TABLESPACE IF EXISTS schemaspc; +CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; +SET search_path TO spcs1; + +CREATE TABLE a(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); + +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA spcs2; +ALTER TABLE spcs1.a SET SCHEMA spcs2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO spcs2.a SELECT generate_series(1,200); +ALTER TABLE spcs2.a SET SCHEMA spcs1; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/schemaspc2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc2; +CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; +ALTER TABLE a SET TABLESPACE schemaspc2; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); + +-- Test update quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,1000000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); + +-- start_ignore +\! mkdir -p /tmp/schemaspc3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc3"; +CREATE TABLESPACE "Schemaspc3" LOCATION '/tmp/schemaspc3'; +CREATE SCHEMA "Spcs2"; +SELECT diskquota.set_schema_tablespace_quota('"Spcs2"', '"Schemaspc3"', '-1 MB'); + +RESET search_path; +DROP TABLE spcs1.a2, spcs1.a; +DROP SCHEMA spcs1, spcs2; +DROP TABLESPACE schemaspc; +DROP TABLESPACE schemaspc2; +DROP TABLESPACE "Schemaspc3"; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_tablespace_schema_perseg.sql b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_schema_perseg.sql new file mode 100644 index 00000000000..3d1ffb4cf12 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -0,0 +1,118 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/schemaspc_perseg +-- end_ignore +-- Test tablespace quota perseg +CREATE SCHEMA spcs1_perseg; +DROP TABLESPACE IF EXISTS schemaspc_perseg; +CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); +SET search_path TO spcs1_perseg; + +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail by tablespace schema diskquota +INSERT INTO a SELECT generate_series(1,100); +-- change tablespace schema quota +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); +SELECT diskquota.wait_for_worker_new_epoch(); +---- expect insert fail by tablespace schema perseg quota +INSERT INTO a SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA spcs2_perseg; +ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); +ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/schemaspc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc_perseg2"; +CREATE TABLESPACE "Schemaspc_perseg2" LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE "Schemaspc_perseg2"; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); + +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); +SELECT diskquota.wait_for_worker_new_epoch(); +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); +SELECT diskquota.wait_for_worker_new_epoch(); +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete tablespace schema quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +-- test config per segment quota +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','1'); +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + +SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', '"Schemaspc_perseg2"','1 MB'); + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','-2'); + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','3'); + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; +SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('Schemaspc_perseg2', 'schemaspc_perseg'); + +RESET search_path; +DROP TABLE spcs1_perseg.a; +DROP SCHEMA spcs1_perseg; +DROP TABLESPACE schemaspc_perseg; +DROP TABLESPACE "Schemaspc_perseg2"; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_temp_role.sql b/gpcontrib/diskquota/tests/regress/sql/test_temp_role.sql new file mode 100644 index 00000000000..856a48e8dd8 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_temp_role.sql @@ -0,0 +1,24 @@ +-- Test temp table restrained by role id +CREATE SCHEMA strole; +CREATE ROLE u3temp NOLOGIN; +SET search_path TO strole; + +SELECT diskquota.set_role_quota('u3temp', '1MB'); +CREATE TABLE a(i int) DISTRIBUTED BY (i); +ALTER TABLE a OWNER TO u3temp; +CREATE TEMP TABLE ta(i int); +ALTER TABLE ta OWNER TO u3temp; + +-- expected failed: fill temp table +INSERT INTO ta SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expected failed: +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE ta; +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO a SELECT generate_series(1,100); + +DROP TABLE a; +DROP ROLE u3temp; +RESET search_path; +DROP SCHEMA strole; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_toast.sql b/gpcontrib/diskquota/tests/regress/sql/test_toast.sql new file mode 100644 index 00000000000..e96a595a729 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_toast.sql @@ -0,0 +1,23 @@ +-- Test toast +CREATE SCHEMA s5; +SELECT diskquota.set_schema_quota('s5', '1 MB'); +SET search_path TO s5; +CREATE TABLE a5 (t text) DISTRIBUTED BY (t); +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,10000)) +FROM generate_series(1,10000); + +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert toast fail +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,1000)) +FROM generate_series(1,1000); + +DROP TABLE a5; +RESET search_path; +DROP SCHEMA s5; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_truncate.sql b/gpcontrib/diskquota/tests/regress/sql/test_truncate.sql new file mode 100644 index 00000000000..2dafcb3126c --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_truncate.sql @@ -0,0 +1,21 @@ +-- Test truncate +CREATE SCHEMA s7; +SELECT diskquota.set_schema_quota('s7', '1 MB'); +SET search_path TO s7; +CREATE TABLE a (i int) DISTRIBUTED BY (i); +CREATE TABLE b (i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); +TRUNCATE TABLE a; +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); + +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s7; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_uncommitted_table_size.sql b/gpcontrib/diskquota/tests/regress/sql/test_uncommitted_table_size.sql new file mode 100644 index 00000000000..ee1c1e9b080 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_uncommitted_table_size.sql @@ -0,0 +1,98 @@ +-- temp table +begin; +CREATE TEMP TABLE t1(i int); +INSERT INTO t1 SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; +SELECT pg_table_size('t1'); +commit; + +DROP table t1; + +-- heap table +begin; +CREATE TABLE t2(i int) DISTRIBUTED BY (i); +INSERT INTO t2 SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; +SELECT pg_table_size('t2'); +commit; + +-- heap table index +begin; +CREATE INDEX idx2 on t2(i); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; +SELECT pg_table_size('idx2'); +commit; + +DROP table t2; + +-- toast table +begin; +CREATE TABLE t3(t text) DISTRIBUTED BY (t); +INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; +SELECT pg_table_size('t3'); +commit; + +DROP table t3; + +-- AO table +begin; +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); +commit; + +-- AO table index +begin; +CREATE INDEX ao_idx on ao(i); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; +SELECT pg_table_size('ao_idx'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; +SELECT pg_table_size('ao'); +commit; + +DROP TABLE ao; + +-- AO table CTAS +begin; +CREATE TABLE ao (i) WITH(appendonly=true) AS SELECT generate_series(1, 10000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); +commit; +DROP TABLE ao; + +-- AOCS table +begin; +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; +SELECT pg_table_size('aocs'); +commit; + +-- AOCS table index +begin; +CREATE INDEX aocs_idx on aocs(i); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; +SELECT pg_table_size('aocs_idx'); +commit; + +DROP TABLE aocs; + +-- AOCS table CTAS +begin; +CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; +SELECT pg_table_size('aocs'); +commit; +DROP TABLE aocs; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_update.sql b/gpcontrib/diskquota/tests/regress/sql/test_update.sql new file mode 100644 index 00000000000..75fb6ee8783 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_update.sql @@ -0,0 +1,13 @@ +-- Test Update +CREATE SCHEMA s4; +SELECT diskquota.set_schema_quota('s4', '1 MB'); +SET search_path TO s4; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect update fail. +UPDATE a SET i = 100; +DROP TABLE a; +RESET search_path; +DROP SCHEMA s4; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_vacuum.sql b/gpcontrib/diskquota/tests/regress/sql/test_vacuum.sql new file mode 100644 index 00000000000..3483db34469 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_vacuum.sql @@ -0,0 +1,26 @@ +-- Test vacuum full +CREATE SCHEMA s6; +SELECT diskquota.set_schema_quota('s6', '1 MB'); +SET search_path TO s6; +CREATE TABLE a (i int) DISTRIBUTED BY (i); +CREATE TABLE b (i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,10); +DELETE FROM a WHERE i > 10; +SELECT diskquota.wait_for_worker_new_epoch(); +VACUUM FULL a; +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,10); +INSERT INTO b SELECT generate_series(1,10); + +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s6; + diff --git a/gpcontrib/diskquota/tests/regress/sql/test_worker_not_ready.sql b/gpcontrib/diskquota/tests/regress/sql/test_worker_not_ready.sql new file mode 100644 index 00000000000..5185fc86791 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_worker_not_ready.sql @@ -0,0 +1,21 @@ +CREATE DATABASE db_not_ready; +\c db_not_ready; + +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +CREATE EXTENSION diskquota; +CREATE EXTENSION diskquota_test; + +SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); + +SELECT diskquota.pause(); + +-- diskquota.wait_for_worker_new_epoch() cannot be used here because +-- diskquota.state is not clean. +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + +DROP EXTENSION diskquota; + +\c contrib_regression + +DROP DATABASE db_not_ready; diff --git a/gpcontrib/diskquota/tests/regress/sql/test_worker_schedule.sql b/gpcontrib/diskquota/tests/regress/sql/test_worker_schedule.sql new file mode 100644 index 00000000000..94d27e9339b --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_worker_schedule.sql @@ -0,0 +1,226 @@ +-- start_ignore +\c + +DROP DATABASE IF EXISTS t1; +DROP DATABASE IF EXISTS t2; +DROP DATABASE IF EXISTS t3; +DROP DATABASE IF EXISTS t4; +DROP DATABASE IF EXISTS t5; +DROP DATABASE IF EXISTS t6; +DROP DATABASE IF EXISTS t7; +DROP DATABASE IF EXISTS t8; +DROP DATABASE IF EXISTS t9; +DROP DATABASE IF EXISTS t10; +DROP DATABASE IF EXISTS t11; +DROP DATABASE IF EXISTS t12; +CREATE DATABASE t1; +CREATE DATABASE t2; +CREATE DATABASE t3; +CREATE DATABASE t4; +CREATE DATABASE t5; +CREATE DATABASE t6; +CREATE DATABASE t7; +CREATE DATABASE t8; +CREATE DATABASE t9; +CREATE DATABASE t10; +CREATE DATABASE t11; +CREATE DATABASE t12; +--end_ignore +\c t1 +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1; +\! gpstop -arf; +--end_ignore + +\c +SHOW diskquota.max_workers; + +\c t2 +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + +\c t3 +CREATE EXTENSION diskquota; +CREATE TABLE f3(a int); +INSERT into f3 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 11; +\! gpstop -arf; +--end_ignore + +\c +SHOW diskquota.max_workers; + +\c t4 +CREATE EXTENSION diskquota; +CREATE TABLE f4(a int); +INSERT into f4 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; + +\c t5 +CREATE EXTENSION diskquota; +CREATE TABLE f5(a int); +INSERT into f5 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; + +\c t6 +CREATE EXTENSION diskquota; +CREATE TABLE f6(a int); +INSERT into f6 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; + +\c t7 +CREATE EXTENSION diskquota; +CREATE TABLE f7(a int); +INSERT into f7 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + +\c t8 +CREATE EXTENSION diskquota; +CREATE TABLE f8(a int); +INSERT into f8 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f8'::regclass and segid = -1; + +\c t9 +CREATE EXTENSION diskquota; +CREATE TABLE f9(a int); +INSERT into f9 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f9'::regclass and segid = -1; + +\c t10 +CREATE EXTENSION diskquota; +CREATE TABLE f10(a int); +INSERT into f10 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f10'::regclass and segid = -1; + +\c t11 +CREATE EXTENSION diskquota; +CREATE TABLE f11(a int); +INSERT into f11 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f11'::regclass and segid = -1; + +\c t1 +INSERT into f1 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + +\c t7 +INSERT into f7 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + +\c t1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +DROP TABLE f1; +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + +\c t2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +DROP TABLE f2; +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + +\c t3 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t4 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t5 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t6 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t7 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t8 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t9 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t10 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t11 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c t12 +CREATE EXTENSION diskquota; +CREATE TABLE f12(a int); +INSERT into f12 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f12'::regclass and segid = -1; +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c t1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +--start_ignore +\c contrib_regression +DROP DATABASE t1; +DROP DATABASE t2; +DROP DATABASE t3; +DROP DATABASE t4; +DROP DATABASE t5; +DROP DATABASE t6; +DROP DATABASE t7; +DROP DATABASE t8; +DROP DATABASE t9; +DROP DATABASE t10; +DROP DATABASE t11; +DROP DATABASE t12; +\! gpconfig -r diskquota.worker_timeout; +\! gpconfig -r diskquota.max_workers; +\! gpstop -arf; +--end_ignore diff --git a/gpcontrib/diskquota/tests/regress/sql/test_worker_schedule_exception.sql b/gpcontrib/diskquota/tests/regress/sql/test_worker_schedule_exception.sql new file mode 100644 index 00000000000..83fe7faf0a0 --- /dev/null +++ b/gpcontrib/diskquota/tests/regress/sql/test_worker_schedule_exception.sql @@ -0,0 +1,38 @@ +-- start_ignore +\! gpconfig -c diskquota.max_workers -v 10; +\! gpconfig -c diskquota.naptime -v 4; +\! gpstop -arf; +\c +DROP DATABASE IF EXISTS t1; +DROP DATABASE IF EXISTS t2; +--end_ignore + +CREATE DATABASE t1; +CREATE DATABASE t2; +\c t1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\! pgrep -f "[p]ostgres.*bgworker.*t1" | xargs kill; +\! sleep 0.5 ; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +-- start_ignore +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep +--end_ignore +\c contrib_regression +DROP DATABASE t1; +\c t2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\c t2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE t2; +--start_ignore +\! gpconfig -r diskquota.naptime; +\! gpconfig -r diskquota.max_workers; +\! gpstop -arf; +--end_ignore diff --git a/gpcontrib/diskquota/upgrade_test/CMakeLists.txt b/gpcontrib/diskquota/upgrade_test/CMakeLists.txt new file mode 100644 index 00000000000..5aef39535ae --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/CMakeLists.txt @@ -0,0 +1,25 @@ +include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) + +set(EXPECTED_DIR "${CMAKE_CURRENT_SOURCE_DIR}/expected") +list(APPEND schedule_files + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.2--2.3 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.3--2.2 +) +regresstarget_add( + upgradecheck + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + SQL_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/sql + EXPECTED_DIR + ${EXPECTED_DIR} + RESULTS_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/results + SCHEDULE_FILE + ${schedule_files} + REGRESS_OPTS + --dbname=contrib_regression) + +# NOTE: DDL change detection and upgrade version validation logic was removed +# as diskquota is now part of the Cloudberry source tree. Upgrade testing +# should be handled as part of the Cloudberry release process if needed. diff --git a/gpcontrib/diskquota/upgrade_test/README.md b/gpcontrib/diskquota/upgrade_test/README.md new file mode 100644 index 00000000000..deab7da6058 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/README.md @@ -0,0 +1,29 @@ +# add new version to upgrade or downgrade test + +- add a new `schedule` file like `schedule_1.0--2.0`. +- write those new test: + +``` +test: 1.0_install # Install diskquota version 1.0 +test: 1.0_set_quota # Create some quota configs under "1.0" diskquota schema +test: 1.0_catalog # Check if diskquota DDL is expected +test: 2.0_migrate_to_version_2.0 # Migrate 1.0 diskquota DDL to 2.0 +test: 2.0_catalog # Check if the migration results is expected as a newly created 2.0 diskquota schema +test: 1.0_test_in_2.0_quota_create_in_1.0 # Check if the quota config still works which has been created by 1.0 extension +test: 1.0_cleanup_quota # Drop extension +``` + +the file name means this is a upgrade test from 1.0 to 2.0. + +for downgrade test, just reverse the schedule file. + +--- + +`10.1_test_in_10.0_quota_create_in_10.1` means: + +- the file is for version 10.1 +- this is a test file +- the test occur in 10.0, use 10.0 binary and 10.0 SQL +- the item to test is created in 10.1 + +---- diff --git a/gpcontrib/diskquota/upgrade_test/alter_test.sh b/gpcontrib/diskquota/upgrade_test/alter_test.sh new file mode 100755 index 00000000000..15046a3ed73 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/alter_test.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Test if all the previous diskquota minor versions can be directly upgraded +# to the current version. + +set -ex + +SCRIPT_PATH="${BASH_SOURCE[0]}" +SRC_DIR="$(cd "$(dirname "${SCRIPT_PATH}")"/.. >/dev/null 2>&1 && pwd)" + +# Versions like major.minor +CUR_VERSION=$(cut --delimiter="." --fields=1-2 "${SRC_DIR}/VERSION") +ALL_VERSIONS=$(cd "${SRC_DIR}" && git tag | cut --delimiter="." --fields=1-2 | sort -V -u) +VERSIONS_TO_TEST=() + +test_alter_from() { + local from_ver=$1 + local to_ver=$CUR_VERSION + + gpconfig -c shared_preload_libraries -v "" + gpstop -rai + dropdb diskquota --if-exists + dropdb diskquota_alter_test --if-exists + createdb diskquota + + local from_so_name="diskquota" + if [ "${from_ver}" != "1.0" ];then + from_so_name="diskquota-${from_ver}" + fi + local to_so_name="diskquota-${to_ver}" + + # Preload the old diskquota so + gpconfig -c shared_preload_libraries -v "${from_so_name}" + gpstop -rai + + createdb diskquota_alter_test + + # Test if the extension and be upgraded directly + psql -d diskquota_alter_test -c "CREATE EXTENSION diskquota version '${from_ver}'" + + # Preload the new diskquota so + gpconfig -c shared_preload_libraries -v "${to_so_name}" + gpstop -rai + + psql -d diskquota_alter_test -c "ALTER EXTENSION diskquota update to '${to_ver}'" + # Sleep wait for bgworker starting, otherwise, we will get a warning + # 'cannot remove the database from db list, dbid not found'. + sleep 5 + psql -d diskquota_alter_test -c "DROP EXTENSION diskquota" +} + +compare_versions() { + # implementing string manipulation + local a=${1%%.*} b=${2%%.*} + [[ "10#${a:-0}" -gt "10#${b:-0}" ]] && return 1 + [[ "10#${a:-0}" -lt "10#${b:-0}" ]] && return 2 + # re-assigning a and b with greatest of 1 and 2 after manipulation + a=${1:${#a} + 1} + b=${2:${#b} + 1} + # terminal condition for recursion + [[ -z $a && -z $b ]] || compare_versions "$a" "$b" +} + + +# Find all minor versions before current one +# The first version of diskquota for Cloudberry is 2.2 +while IFS= read -r ver; do + if [ "${ver}" = "${CUR_VERSION}" ]; then + break + fi + if [ "${ver}" = "0.8" ]; then + continue + fi + # Skip versions before 2.2 (the legacy GP6 era) + set +e + compare_versions $ver "2.2" + cmp_res=$? + set -e + if [ $cmp_res -eq "2" ]; then + continue + fi + VERSIONS_TO_TEST+=("${ver}") +done <<< "$ALL_VERSIONS" + +for from_ver in "${VERSIONS_TO_TEST[@]}"; do + test_alter_from "${from_ver}" +done diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_catalog.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_catalog.out new file mode 100644 index 00000000000..48d2934a6c9 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_catalog.out @@ -0,0 +1,308 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+------------------------------------------------------- + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} + show_all_relation_view | {oid,oid,oid,oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} +(16 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | + target_rowid_seq | | +(13 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.2.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.2.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.2.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.2.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.2.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM pg_class as relstorage WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.2.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.2.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.2.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.2.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.2.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.2.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.2.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.2.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.2.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.2.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.2.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.2.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_all_relation_view | WITH relation_cache AS ( + + | | SELECT f.relid, + + | | f.primary_table_oid, + + | | f.auxrel_num, + + | | f.owneroid, + + | | f.namespaceoid, + + | | f.backendid, + + | | f.spcnode, + + | | f.dbnode, + + | | f.relnode, + + | | f.relstorage, + + | | f.auxrel_oid, + + | | f.relam + + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ + | | ) + + | | SELECT DISTINCT union_relation.oid, + + | | union_relation.relowner, + + | | union_relation.relnamespace, + + | | union_relation.reltablespace + + | | FROM ( SELECT relation_cache.relid AS oid, + + | | relation_cache.owneroid AS relowner, + + | | relation_cache.namespaceoid AS relnamespace, + + | | relation_cache.spcnode AS reltablespace + + | | FROM relation_cache + + | | UNION + + | | SELECT pg_class.oid, + + | | pg_class.relowner, + + | | pg_class.relnamespace, + + | | pg_class.reltablespace + + | | FROM pg_class) union_relation; + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(8 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_cleanup_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_install.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_install.out new file mode 100644 index 00000000000..c4b7f4c95ce --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_install.out @@ -0,0 +1,13 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_migrate_to_version_2.2.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_migrate_to_version_2.2.out new file mode 100644 index 00000000000..d6fbb96247b --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_migrate_to_version_2.2.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.2.so +Segment value: diskquota-2.2.so +\c +alter extension diskquota update to '2.2'; +\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_set_quota.out new file mode 100644 index 00000000000..5083f5747f2 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_set_quota.out @@ -0,0 +1,72 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.2.so +Segment value: diskquota-2.2.so +create extension diskquota with version '2.2'; +select diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_test_in_2.3_quota_create_in_2.2.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_test_in_2.3_quota_create_in_2.2.out new file mode 100644 index 00000000000..aab1cb100c1 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_test_in_2.3_quota_create_in_2.2.out @@ -0,0 +1,16 @@ +-- need run 2.3_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_catalog.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_catalog.out new file mode 100644 index 00000000000..016aecd94c9 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_catalog.out @@ -0,0 +1,308 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+------------------------------------------------------- + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} + show_all_relation_view | {oid,oid,oid,oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} +(16 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | + target_rowid_seq | | +(13 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.3.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.3.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.3.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.3.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.3.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM pg_class as relstorage WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.3.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.3.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.3.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.3.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.3.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.3.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.3.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.3.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.3.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.3.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.3.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.3.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_all_relation_view | WITH relation_cache AS ( + + | | SELECT f.relid, + + | | f.primary_table_oid, + + | | f.auxrel_num, + + | | f.owneroid, + + | | f.namespaceoid, + + | | f.backendid, + + | | f.spcnode, + + | | f.dbnode, + + | | f.relnode, + + | | f.relstorage, + + | | f.auxrel_oid, + + | | f.relam + + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ + | | ) + + | | SELECT DISTINCT union_relation.oid, + + | | union_relation.relowner, + + | | union_relation.relnamespace, + + | | union_relation.reltablespace + + | | FROM ( SELECT relation_cache.relid AS oid, + + | | relation_cache.owneroid AS relowner, + + | | relation_cache.namespaceoid AS relnamespace, + + | | relation_cache.spcnode AS reltablespace + + | | FROM relation_cache + + | | UNION + + | | SELECT pg_class.oid, + + | | pg_class.relowner, + + | | pg_class.relnamespace, + + | | pg_class.reltablespace + + | | FROM pg_class) union_relation; + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(8 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_cleanup_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_install.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_install.out new file mode 100644 index 00000000000..4738c064a82 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_install.out @@ -0,0 +1,13 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_migrate_to_version_2.3.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_migrate_to_version_2.3.out new file mode 100644 index 00000000000..db67a0e36dd --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_migrate_to_version_2.3.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.3.so +Segment value: diskquota-2.3.so +\c +alter extension diskquota update to '2.3'; +\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_set_quota.out new file mode 100644 index 00000000000..114f346dddf --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_set_quota.out @@ -0,0 +1,66 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.3.so +Segment value: diskquota-2.3.so +create extension diskquota with version '2.3'; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_test_in_2.2_quota_create_in_2.3.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_test_in_2.2_quota_create_in_2.3.out new file mode 100644 index 00000000000..71c24e5865b --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_test_in_2.2_quota_create_in_2.3.out @@ -0,0 +1,16 @@ +-- need run 2.2_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/init_file b/gpcontrib/diskquota/upgrade_test/init_file new file mode 100644 index 00000000000..a764e9d5254 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/init_file @@ -0,0 +1,13 @@ +-- This file contains global patterns of messages that should be ignored or +-- masked out, when comparing test results with the expected output. +-- Individual tests can contain additional patterns specific to the test. + +-- start_matchignore +m/^NOTICE: resource queue required -- using default resource queue "pg_default"/ +-- end_matchignore +-- start_matchsubs +m/diskquota.c:\d+\)/ +s/diskquota.c:\d+\)/diskquota.c:xxx/ +m/diskquota_utility.c:\d+\)/ +s/diskquota_utility.c:\d+\)/diskquota_utility.c:xxx/ +-- end_matchsubs diff --git a/gpcontrib/diskquota/upgrade_test/schedule_2.2--2.3 b/gpcontrib/diskquota/upgrade_test/schedule_2.2--2.3 new file mode 100644 index 00000000000..486775836d8 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/schedule_2.2--2.3 @@ -0,0 +1,8 @@ +test: 2.2_install +test: 2.2_set_quota +test: 2.2_catalog +test: 2.3_migrate_to_version_2.3 +test: 2.3_catalog +# run 2.2 behavior test using 2.3 DDL and binary +test: 2.2_test_in_2.3_quota_create_in_2.2 +test: 2.2_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/schedule_2.3--2.2 b/gpcontrib/diskquota/upgrade_test/schedule_2.3--2.2 new file mode 100644 index 00000000000..0de828c96e1 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/schedule_2.3--2.2 @@ -0,0 +1,8 @@ +test: 2.3_install +test: 2.3_set_quota +test: 2.3_catalog +test: 2.2_migrate_to_version_2.2 +test: 2.2_catalog +# run 2.3 behavior test using 2.2 DDL and binary +test: 2.3_test_in_2.2_quota_create_in_2.3 +test: 2.3_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.2_catalog.sql b/gpcontrib/diskquota/upgrade_test/sql/2.2_catalog.sql new file mode 100644 index 00000000000..ebf5f00aa56 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.2_catalog.sql @@ -0,0 +1,81 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; + +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; +-- types end + +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; +-- tables end + +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; +-- UDF end + +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; +-- views end + +DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.2_cleanup_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.2_cleanup_quota.sql new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.2_cleanup_quota.sql @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.2_install.sql b/gpcontrib/diskquota/upgrade_test/sql/2.2_install.sql new file mode 100644 index 00000000000..33b2f0d3f4c --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.2_install.sql @@ -0,0 +1,17 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null + +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.2_migrate_to_version_2.2.sql b/gpcontrib/diskquota/upgrade_test/sql/2.2_migrate_to_version_2.2.sql new file mode 100644 index 00000000000..88303a66875 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.2_migrate_to_version_2.2.sql @@ -0,0 +1,8 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null + +\! gpconfig -s 'shared_preload_libraries' + +\c +alter extension diskquota update to '2.2'; +\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.2_set_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.2_set_quota.sql new file mode 100644 index 00000000000..8ccb3a80d44 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.2_set_quota.sql @@ -0,0 +1,45 @@ +\!gpconfig -s 'shared_preload_libraries' + +create extension diskquota with version '2.2'; +select diskquota.init_table_size_table(); +select diskquota.wait_for_worker_new_epoch(); + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. + +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); +insert into srole.b select generate_series(1,100000); -- ok. + +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. + +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +insert into rolespcrole.b select generate_series(1,100000); -- ok. + +\!sleep 5 + +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.2_test_in_2.1_quota_create_in_2.2.sql b/gpcontrib/diskquota/upgrade_test/sql/2.2_test_in_2.1_quota_create_in_2.2.sql new file mode 100644 index 00000000000..974df545602 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.2_test_in_2.1_quota_create_in_2.2.sql @@ -0,0 +1,16 @@ +-- need run 2.1_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.2_test_in_2.3_quota_create_in_2.2.sql b/gpcontrib/diskquota/upgrade_test/sql/2.2_test_in_2.3_quota_create_in_2.2.sql new file mode 100644 index 00000000000..e67027c7e6f --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.2_test_in_2.3_quota_create_in_2.2.sql @@ -0,0 +1,16 @@ +-- need run 2.3_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.3_catalog.sql b/gpcontrib/diskquota/upgrade_test/sql/2.3_catalog.sql new file mode 100644 index 00000000000..ebf5f00aa56 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.3_catalog.sql @@ -0,0 +1,81 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; + +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; +-- types end + +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; +-- tables end + +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; +-- UDF end + +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; +-- views end + +DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.3_cleanup_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.3_cleanup_quota.sql new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.3_cleanup_quota.sql @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.3_install.sql b/gpcontrib/diskquota/upgrade_test/sql/2.3_install.sql new file mode 100644 index 00000000000..03020f08b58 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.3_install.sql @@ -0,0 +1,17 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null + +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.3_migrate_to_version_2.3.sql b/gpcontrib/diskquota/upgrade_test/sql/2.3_migrate_to_version_2.3.sql new file mode 100644 index 00000000000..f6ce2141d74 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.3_migrate_to_version_2.3.sql @@ -0,0 +1,8 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null + +\! gpconfig -s 'shared_preload_libraries' + +\c +alter extension diskquota update to '2.3'; +\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.3_set_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.3_set_quota.sql new file mode 100644 index 00000000000..482841550cb --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.3_set_quota.sql @@ -0,0 +1,44 @@ +\!gpconfig -s 'shared_preload_libraries' + +create extension diskquota with version '2.3'; +select diskquota.wait_for_worker_new_epoch(); + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. + +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); +insert into srole.b select generate_series(1,100000); -- ok. + +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. + +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +insert into rolespcrole.b select generate_series(1,100000); -- ok. + +\!sleep 5 + +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.3_test_in_2.2_quota_create_in_2.3.sql b/gpcontrib/diskquota/upgrade_test/sql/2.3_test_in_2.2_quota_create_in_2.3.sql new file mode 100644 index 00000000000..4a599cfb3c3 --- /dev/null +++ b/gpcontrib/diskquota/upgrade_test/sql/2.3_test_in_2.2_quota_create_in_2.3.sql @@ -0,0 +1,16 @@ +-- need run 2.2_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/licenses/LICENSE-diskquota.txt b/licenses/LICENSE-diskquota.txt new file mode 100644 index 00000000000..6e94d88cbc9 --- /dev/null +++ b/licenses/LICENSE-diskquota.txt @@ -0,0 +1,31 @@ +Copyright (c) 2004-2020 Pivotal Software, Inc. +Copyright (c) 2020-Present VMware, Inc. or its affiliates + +diskquota is licensed under the PostgreSQL license, the same license +as PostgreSQL. It contains parts of PostgreSQL source code. A copy of +the license is below: + +-------------- +PostgreSQL Database Management System +(formerly known as Postgres, then as Postgres95) + +Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + +Portions Copyright (c) 1994, The Regents of the University of California + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose, without fee, and without a written agreement +is hereby granted, provided that the above copyright notice and this +paragraph and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR +DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING +LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS +DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO +PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/pom.xml b/pom.xml index 6d33d7e9de5..75fdaf6619e 100644 --- a/pom.xml +++ b/pom.xml @@ -153,6 +153,8 @@ code or new licensing patterns. gpcontrib/gp_exttable_fdw/data/** gpcontrib/gp_exttable_fdw/gp_exttable_fdw.control + gpcontrib/diskquota/** + getversion .git-blame-ignore-revs .dir-locals.el