diff --git a/docs/features/sharding/cross-shard.md b/docs/features/sharding/cross-shard.md index 53a4683..0cd091e 100644 --- a/docs/features/sharding/cross-shard.md +++ b/docs/features/sharding/cross-shard.md @@ -132,7 +132,7 @@ Some statements, like `CREATE INDEX CONCURRENTLY`, cannot run inside transaction ```postgresql DROP INDEX IF EXISTS user_id_idx; -CREATE INDEX CONCURRENTLY user_id_idx USING btree(user_id); +CREATE INDEX CONCURRENTLY user_id_idx ON users USING btree(user_id); ``` ## Under the hood diff --git a/docs/features/sharding/schema_management/primary_keys.md b/docs/features/sharding/schema_management/primary_keys.md index de6a2e3..e674235 100644 --- a/docs/features/sharding/schema_management/primary_keys.md +++ b/docs/features/sharding/schema_management/primary_keys.md @@ -25,7 +25,11 @@ CREATE TABLE users ( If you run this command through PgDog, this table will be created on all shards. Underneath, Postgres expands `BIGSERIAL` to the following code: ```postgresql -BIGINT NOT NULL DEFAULT nextval('users_id_seq'::regclass) +CREATE TABLE users ( + id BIGINT UNIQUE NOT NULL DEFAULT nextval('users_id_seq'::regclass), + email VARCHAR NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); ``` The `users_id_seq` is a sequence, automatically created by Postgres, that will be used to generate unique values for inserted rows that don't provide one for the `id` column. diff --git a/docs/features/sharding/supported-queries.md b/docs/features/sharding/supported-queries.md index cfce7c3..b619578 100644 --- a/docs/features/sharding/supported-queries.md +++ b/docs/features/sharding/supported-queries.md @@ -24,10 +24,10 @@ Postgres has 3 kinds of queries, each handled a little bit differently in a shar ```postgresql -- Sharding key equals a single value -SELECT * FROM users WHERE user_id = $1 +SELECT * FROM users WHERE user_id = $1; -- Sharding keys IN tuple -SELECT * FROM users WHERE id IN ($1, $2, $3) +SELECT * FROM users WHERE id IN ($1, $2, $3); ``` Queries that don't match this pattern will currently be routed to all shards. We are continuously adding support for more complex patterns. diff --git a/tests/requirements.txt b/tests/requirements.txt index aa71855..cee78d5 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,2 +1,3 @@ markdown-it-py regex +pglast diff --git a/tests/test_code_blocks.py b/tests/test_code_blocks.py index e153172..2fb56d4 100644 --- a/tests/test_code_blocks.py +++ b/tests/test_code_blocks.py @@ -5,12 +5,20 @@ import subprocess from markdown_it import MarkdownIt import sys +import pglast from regex import sub from regex.regex import Regex, RegexFlag mdp = MarkdownIt() -pattern = re.compile(r'(?msi)^(?P[`~]{3,})[ \t]*toml\b[^\n]*\r?\n(?P.*?)^(?P=fence)[ \t]*\r?$',) +pattern = re.compile( + r'(?msi)^(?P[`~]{3,})[^\n]*\r?\n(?P.*?)^(?P=fence)[ \t]*\r?$' +) + +replication = [ + "CREATE_REPLICATION_SLOT", + "START_REPLICATION", +] def verify(binary): for file in glob.glob("docs/**/*.md", @@ -27,6 +35,17 @@ def verify(binary): pass else: check_file(binary, "pgdog", token.content) + elif token.type == "fence" and token.info == "postgresql": + try: + pglast.parser.parse_sql(token.content) + except Exception as e: + found = False + for cmd in replication: + if cmd in token.content: + found = True + if not found: + print(token.content) + raise e def check_file(binary, kind, content): tmp = f"/tmp/pgdog_config_test.toml"