diff --git a/.gitignore b/.gitignore index 787a0004..8719d1d4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ .DS_Store .idea/ +.env engine/bin/ /db-lab-run/ diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..a4267581 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,23 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Build/Test/Lint Commands +- Build all components: `cd engine && make build` +- Lint code: `cd engine && make lint` +- Run unit tests: `cd engine && make test` +- Run integration tests: `cd engine && make test-ci-integration` +- Run a specific test: `cd engine && GO111MODULE=on go test -v ./path/to/package -run TestName` +- Run UI: `cd ui && pnpm start:ce` (Community Edition) or `pnpm start:platform` + +## Code Style Guidelines +- Go code follows "Effective Go" and "Go Code Review Comments" guidelines +- Use present tense and imperative mood in commit messages +- Limit first commit line to 72 characters +- All Git commits must be signed +- Format Go code with `cd engine && make fmt` +- Use error handling with pkg/errors +- Follow standard Go import ordering +- Group similar functions together +- Error messages should be descriptive and actionable +- UI uses pnpm for package management \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0c4afb69..1608d877 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -121,6 +121,45 @@ We encourage you to follow the principles described in the following documents: - [Effective Go](https://go.dev/doc/effective_go) - [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) +### Message style guide +Consistent messaging is important throughout the codebase. Follow these guidelines for errors, logs, and user-facing messages: + +#### Error messages +- Lowercase for internal errors and logs: `failed to start session` (no ending period) +- Uppercase for user-facing errors: `Requested object does not exist. Specify your request.` (with ending period) +- Omit articles ("a", "an", "the") for brevity: use `failed to update clone` not `failed to update the clone` +- Be specific and actionable whenever possible +- For variable interpolation, use consistent formatting: `failed to find clone: %s` + +#### CLI output +- Use concise, action-oriented language +- Present tense with ellipsis for in-progress actions: `Creating clone...` + - Ellipsis (`...`) indicates an ongoing process where the user should wait + - Always follow up with a completion message when the operation finishes +- Past tense with period for results: `Clone created successfully.` +- Include relevant identifiers (IDs, names) in output + +#### Progress indication +- Use ellipsis (`...`) to indicate that an operation is in progress and the user should wait +- For longer operations, consider providing percentage or step indicators: `Cloning database... (25%)` +- When an operation with ellipsis completes, always provide a completion message without ellipsis +- Example sequence: + ``` + Creating clone... + Clone "test-clone" created successfully. + ``` + +#### UI messages +- Be consistent with terminology across UI and documentation +- For confirmations, use format: `{Resource} {action} successfully.` +- For errors, provide clear next steps when possible +- Use sentence case for all messages (capitalize first word only) + +#### Commit messages +- Start with lowercase type prefix: `fix:`, `feat:`, `docs:`, etc. +- Use imperative mood: `add feature` not `added feature` +- Provide context in the body if needed + ### Documentation styleguide Documentation for Database Lab Engine and additional components is hosted at https://postgres.ai/docs and is maintained in this GitLab repo: https://gitlab.com/postgres-ai/docs. diff --git a/README.md b/README.md index 6df7f229..3eaeadb6 100644 --- a/README.md +++ b/README.md @@ -173,6 +173,9 @@ Check out our [contributing guide](./CONTRIBUTING.md) for more details. - [How to install and initialize Database Lab CLI](https://postgres.ai/docs/how-to-guides/cli/cli-install-init) - [How to manage DBLab](https://postgres.ai/docs/how-to-guides/administration) - [How to work with clones](https://postgres.ai/docs/how-to-guides/cloning) +- [How to work with branches](XXXXXXX) – TBD +- [How to integrate DBLab with GitHub Actions](XXXXXXX) – TBD +- [How to integrate DBLab with GitLab CI/CD](XXXXXXX) – TBD More you can find in [the "How-to guides" section](https://postgres.ai/docs/how-to-guides) of the docs. diff --git a/engine/.gitlab-ci.yml b/engine/.gitlab-ci.yml index 7b8f074f..b2a5901c 100644 --- a/engine/.gitlab-ci.yml +++ b/engine/.gitlab-ci.yml @@ -85,7 +85,7 @@ build-binary-client-master: # Install google-cloud-sdk. - echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor | tee /usr/share/keyrings/cloud.google.gpg > /dev/null + - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - - apt-get update && apt-get install -y google-cloud-sdk # Authenticate. @@ -105,7 +105,7 @@ build-binary-client: # Install google-cloud-sdk. - echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor | tee /usr/share/keyrings/cloud.google.gpg > /dev/null + - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - - apt-get update && apt-get install -y google-cloud-sdk # Authenticate. @@ -126,7 +126,7 @@ build-binary-client-rc: # Install google-cloud-sdk. - echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor | tee /usr/share/keyrings/cloud.google.gpg > /dev/null + - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - - apt-get update && apt-get install -y google-cloud-sdk # Authenticate. @@ -420,6 +420,8 @@ build-image-swagger-release: artifacts: paths: - engine/bin + before_script: + - bash engine/test/_cleanup.sh script: - bash engine/test/1.synthetic.sh - bash engine/test/2.logical_generic.sh @@ -482,7 +484,7 @@ integration-test: stage: integration-test variables: # Instruct Testcontainers to use the daemon of DinD. - DOCKER_HOST: "tcp://docker:2375" +# DOCKER_HOST: "tcp://docker:2375" # Instruct Docker not to start over TLS. DOCKER_TLS_CERTDIR: "" # Improve performance with overlayfs. diff --git a/engine/.golangci.yml b/engine/.golangci.yml index c8a38ec1..bad31644 100644 --- a/engine/.golangci.yml +++ b/engine/.golangci.yml @@ -91,7 +91,6 @@ linters: - depguard - gosec - gocyclo # currently unmaintained - #presets: fast: false issues: diff --git a/engine/api/README.md b/engine/api/README.md new file mode 100644 index 00000000..7ff54a7b --- /dev/null +++ b/engine/api/README.md @@ -0,0 +1,22 @@ +## In this directory +- `swagger-spec` – OpenAPI 3.0 specification of DBLab API +- `swagger-ui` – Swagger UI to see the API specification (embedded in DBLab, available at :2345 or :2346/api) +- `postman` – [Postman](https://www.postman.com/) collection and environment files, used to test API in CI/CD pipelines (running [`newman`](https://github.com/postmanlabs/newman)) + +## Design principles +WIP: https://gitlab.com/postgres-ai/database-lab/-/merge_requests/744 + +## API docs +We use readme.io to host the API docs: https://dblab.readme.io/. Once a new API spec is ready, upload it there as a new documentation version, and publish. + +## Postman, newman, and CI/CD tests +Postman collection is to be generated based on the OpenAPI spec file, using [Portman](https://github.com/apideck-libraries/portman). +1. First, install and initialize `porman` +1. Next, generate a new version of the Postman collection file: + ``` + portman --cliOptionsFile engine/api/postman/portman-cli.json + ``` +1. Review it, edit, adjust: + - Object creation first, then deletion of this object, passing the ID of new object from one action to another (TODO: show how) + - Review and fix tests (TODO: details) +1. Commit, push, ensure `newman` testing works in CI/CD \ No newline at end of file diff --git a/engine/api/postman/branching.aws.postgres.ai.postman_environment.json b/engine/api/postman/branching.aws.postgres.ai.postman_environment.json new file mode 100644 index 00000000..407d3d88 --- /dev/null +++ b/engine/api/postman/branching.aws.postgres.ai.postman_environment.json @@ -0,0 +1,21 @@ +{ + "id": "30035c51-5e48-4d31-8676-2aac8af456ee", + "name": "branching.aws.postgres.ai", + "values": [ + { + "key": "baseUrl", + "value": "https://branching.aws.postgres.ai:446/api", + "type": "default", + "enabled": true + }, + { + "key": "verificationToken", + "value": "demo-token", + "type": "default", + "enabled": true + } + ], + "_postman_variable_scope": "environment", + "_postman_exported_at": "2023-05-18T04:01:37.154Z", + "_postman_exported_using": "Postman/10.14.2-230517-0637" +} \ No newline at end of file diff --git a/engine/api/postman/dblab.postman_collection.json b/engine/api/postman/dblab.postman_collection.json deleted file mode 100644 index 2c57013d..00000000 --- a/engine/api/postman/dblab.postman_collection.json +++ /dev/null @@ -1,431 +0,0 @@ -{ - "variables": [], - "info": { - "name": "Database Lab", - "_postman_id": "d0182a6c-79d0-877f-df91-18dbca63b734", - "description": "", - "schema": "https://schema.getpostman.com/json/collection/v2.0.0/collection.json" - }, - "item": [ - { - "name": "status", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "var jsonData = JSON.parse(responseBody);", - "tests[\"Check instance status\"] = responseCode.code === 200 && jsonData && jsonData.status && jsonData.status.code && jsonData.status.code === \"OK\";" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/status", - "method": "GET", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "", - "disabled": true - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"dblab_id\": 1\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "snapshots", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "var jsonData = JSON.parse(responseBody);", - "tests[\"Check snapshots list\"] = responseCode.code === 200 && jsonData && Array.isArray(jsonData) && jsonData.length === 1;", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/snapshots", - "method": "GET", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"dblab_id\": 1\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "clone not found", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "var jsonData = JSON.parse(responseBody);", - "tests[\"Check for clone status\"] = responseCode.code === 404 && jsonData && jsonData.detail && jsonData.detail === \"Requested object does not exist.\";", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/bopta26mq8oddsim86v0", - "method": "GET", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"dblab_id\": 1\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "create clone", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "var jsonData = JSON.parse(responseBody);", - "tests[\"Check for clone create\"] = responseCode.code === 201 && jsonData && jsonData.id && jsonData.status && ", - "(jsonData.status.code == 'OK' || jsonData.status.code == 'CREATING');", - "postman.setGlobalVariable(\"DBLAB_CLONE_ID\", jsonData.id);" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone", - "method": "POST", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "" - } - ], - "body": { - "mode": "raw", - "raw": "{\r\n\t\"name\": \"test-demo-clone\",\r\n\t\"protected\": false,\r\n\t\"db\": {\r\n\t\t\"username\": \"username\",\r\n\t\t\"password\": \"password\"\r\n\t}\r\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "clone status", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "var jsonData = JSON.parse(responseBody);", - "tests[\"Check for clone status\"] = responseCode.code === 200 && jsonData && jsonData.id && jsonData.status && ", - "(jsonData.status.code == 'OK' || jsonData.status.code == 'CREATING');", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/{{DBLAB_CLONE_ID}}", - "method": "GET", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"dblab_id\": 1\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "clone update (name, protected)", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "tests[\"Check for clone update\"] = responseCode.code === 200;", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/{{DBLAB_CLONE_ID}}", - "method": "PATCH", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "", - "disabled": true - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"protected\": true,\n\t\"name\": \"UPDATE_CLONE_TEST\"\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "clone/reset", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "tests[\"Check for clone reset\"] = responseCode.code === 200;", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/{{DBLAB_CLONE_ID}}/reset", - "method": "POST", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "", - "disabled": true - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"id\": \"xxx\"\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "delete protected clone", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "var jsonData = JSON.parse(responseBody);", - "tests[\"Check for delete protected clone\"] = responseCode.code === 500 && jsonData && jsonData.detail && jsonData.detail === \"clone is protected\";", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/{{DBLAB_CLONE_ID}}", - "method": "DELETE", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "" - } - ], - "body": { - "mode": "raw", - "raw": "" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "clone update (disable protection)", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "tests[\"Check for clone update\"] = responseCode.code === 200;", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/{{DBLAB_CLONE_ID}}", - "method": "PATCH", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "", - "disabled": true - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"protected\": false\n}" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "delete clone", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "tests[\"Check for delete protected clone\"] = responseCode.code === 200;", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/{{DBLAB_CLONE_ID}}", - "method": "DELETE", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "" - } - ], - "body": { - "mode": "raw", - "raw": "" - }, - "description": "Select users" - }, - "response": [] - }, - { - "name": "removed clone status", - "event": [ - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "var jsonData = JSON.parse(responseBody);", - "tests[\"Check for clone status\"] = (responseCode.code === 200 && jsonData && jsonData.id && jsonData.status && ", - "jsonData.status.code == 'DELETING') || responseCode.code == 404;", - "" - ] - } - } - ], - "request": { - "url": "{{DBLAB_URL}}/clone/{{DBLAB_CLONE_ID}}", - "method": "GET", - "header": [ - { - "key": "Verification-Token", - "value": "{{DBLAB_VERIFY_TOKEN}}", - "description": "" - }, - { - "key": "Content-Type", - "value": "application/json", - "description": "" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"dblab_id\": 1\n}" - }, - "description": "Select users" - }, - "response": [] - } - ] -} diff --git a/engine/api/postman/dblab.postman_environment.json b/engine/api/postman/dblab.postman_environment.json deleted file mode 100644 index 5f7244c9..00000000 --- a/engine/api/postman/dblab.postman_environment.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "ff4200f0-7acd-eb4f-1dee-59da8c98c313", - "name": "Database Lab", - "values": [ - { - "enabled": true, - "key": "DBLAB_URL", - "value": "https://url", - "type": "text" - }, - { - "enabled": true, - "key": "DBLAB_VERIFY_TOKEN", - "value": "secret_token", - "type": "text" - } - ], - "timestamp": 1580454458304, - "_postman_variable_scope": "environment", - "_postman_exported_at": "2020-01-31T09:42:37.377Z", - "_postman_exported_using": "Postman/5.5.4" -} diff --git a/engine/api/postman/dblab_api.postman_collection.json b/engine/api/postman/dblab_api.postman_collection.json new file mode 100644 index 00000000..7995382f --- /dev/null +++ b/engine/api/postman/dblab_api.postman_collection.json @@ -0,0 +1,4057 @@ +{ + "info": { + "_postman_id": "ed8af9f0-1cde-4633-8a57-a47e10d12bfa", + "name": "DBLab API 4.0.0-beta.2", + "description": "This page provides the OpenAPI specification for the Database Lab (DBLab) API, previously recognized as the DLE API (Database Lab Engine API).\n\nContact Support:\n Name: DBLab API Support\n Email: api@postgres.ai", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "34026417" + }, + "item": [ + { + "name": "Instance", + "item": [ + { + "name": "DBLab instance status and detailed information", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/status - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/status - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[GET]::/status - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"object\",\"properties\":{\"status\":{\"required\":[\"code\",\"message\"],\"type\":\"object\",\"properties\":{\"code\":{\"type\":\"string\",\"description\":\"Status code\"},\"message\":{\"type\":\"string\",\"description\":\"Status description\"}}},\"engine\":{\"type\":\"object\",\"properties\":{\"version\":{\"type\":\"string\"},\"edition\":{\"type\":\"string\"},\"billingActive\":{\"type\":\"string\"},\"instanceID\":{\"type\":\"string\"},\"startedAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"telemetry\":{\"type\":\"boolean\"},\"disableConfigModification\":{\"type\":\"boolean\"}}},\"pools\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"name\":{\"type\":\"string\"},\"mode\":{\"type\":\"string\"},\"dataStateAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"status\":{\"type\":\"string\"},\"cloneList\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"fileSystem\":{\"type\":\"object\",\"properties\":{\"mode\":{\"type\":\"string\"},\"free\":{\"type\":\"integer\",\"format\":\"int64\"},\"size\":{\"type\":\"integer\",\"format\":\"int64\"},\"used\":{\"type\":\"integer\",\"format\":\"int64\"},\"dataSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"usedBySnapshots\":{\"type\":\"integer\",\"format\":\"int64\"},\"usedByClones\":{\"type\":\"integer\",\"format\":\"int64\"},\"compressRatio\":{\"type\":\"integer\",\"format\":\"float64\"}}}}}},\"cloning\":{\"type\":\"object\",\"properties\":{\"expectedCloningTime\":{\"type\":\"integer\",\"format\":\"float64\"},\"numClones\":{\"type\":\"integer\",\"format\":\"int64\"},\"clones\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"name\":{\"type\":\"string\"},\"snapshot\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"dataStateAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"physicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"pool\":{\"type\":\"string\"},\"numClones\":{\"type\":\"integer\",\"format\":\"int\"}}},\"protected\":{\"type\":\"boolean\",\"default\":false},\"deleteAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"status\":{\"required\":[\"code\",\"message\"],\"type\":\"object\",\"properties\":{\"code\":{\"type\":\"string\",\"description\":\"Status code\"},\"message\":{\"type\":\"string\",\"description\":\"Status description\"}}},\"db\":{\"type\":\"object\",\"properties\":{\"connStr\":{\"type\":\"string\"},\"host\":{\"type\":\"string\"},\"port\":{\"type\":\"string\"},\"username\":{\"type\":\"string\"},\"password\":{\"type\":\"string\"}}},\"metadata\":{\"type\":\"object\",\"properties\":{\"cloneDiffSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"cloningTime\":{\"type\":\"integer\",\"format\":\"float64\"},\"maxIdleMinutes\":{\"type\":\"integer\",\"format\":\"int64\"}}}}}}}},\"retrieving\":{\"type\":\"object\",\"properties\":{\"mode\":{\"type\":\"string\"},\"status\":{\"type\":\"string\"},\"lastRefresh\":{\"type\":\"string\",\"format\":\"date-time\"},\"nextRefresh\":{\"type\":\"string\",\"format\":\"date-time\"},\"alerts\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"activity\":{\"type\":\"object\",\"properties\":{\"source\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"user\":{\"type\":\"string\"},\"query\":{\"type\":\"string\"},\"duration\":{\"type\":\"number\"},\"waitEventType\":{\"type\":\"string\"},\"waitEvent\":{\"type\":\"string\"}}}},\"target\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"user\":{\"type\":\"string\"},\"query\":{\"type\":\"string\"},\"duration\":{\"type\":\"number\"},\"waitEventType\":{\"type\":\"string\"},\"waitEvent\":{\"type\":\"string\"}}}}}}}},\"provisioner\":{\"type\":\"object\",\"properties\":{\"dockerImage\":{\"type\":\"string\"},\"containerConfig\":{\"type\":\"object\",\"properties\":{}}}},\"synchronization\":{\"type\":\"object\",\"properties\":{\"status\":{\"required\":[\"code\",\"message\"],\"type\":\"object\",\"properties\":{\"code\":{\"type\":\"string\",\"description\":\"Status code\"},\"message\":{\"type\":\"string\",\"description\":\"Status description\"}}},\"startedAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"lastReplayedLsn\":{\"type\":\"string\"},\"lastReplayedLsnAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"replicationLag\":{\"type\":\"string\"},\"replicationUptime\":{\"type\":\"integer\"}}}}}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[GET]::/status - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/status", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "status" + ] + }, + "description": "Retrieves detailed information about the DBLab instance: status, version, clones, snapshots, etc." + }, + "response": [ + { + "name": "Returned detailed information about the DBLab instance", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/status", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "status" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"status\": {\n \"code\": \"OK\",\n \"message\": \"Instance is ready\"\n },\n \"engine\": {\n \"version\": \"v4.0.0-alpha.5-20230516-0224\",\n \"edition\": \"standard\",\n \"billingActive\": true,\n \"instanceID\": \"chhfqfcnvrvc73d0lij0\",\n \"startedAt\": \"2023-05-16T03:50:19Z\",\n \"telemetry\": true,\n \"disableConfigModification\": false\n },\n \"pools\": [\n {\n \"name\": \"dblab_pool/dataset_1\",\n \"mode\": \"zfs\",\n \"dataStateAt\": \"\",\n \"status\": \"empty\",\n \"cloneList\": [],\n \"fileSystem\": {\n \"mode\": \"zfs\",\n \"size\": 30685528064,\n \"free\": 30685282816,\n \"used\": 245248,\n \"dataSize\": 12288,\n \"usedBySnapshots\": 0,\n \"usedByClones\": 219648,\n \"compressRatio\": 1\n }\n },\n {\n \"name\": \"dblab_pool/dataset_2\",\n \"mode\": \"zfs\",\n \"dataStateAt\": \"\",\n \"status\": \"empty\",\n \"cloneList\": [],\n \"fileSystem\": {\n \"mode\": \"zfs\",\n \"size\": 30685528064,\n \"free\": 30685282816,\n \"used\": 245248,\n \"dataSize\": 12288,\n \"usedBySnapshots\": 0,\n \"usedByClones\": 219648,\n \"compressRatio\": 1\n }\n },\n {\n \"name\": \"dblab_pool/dataset_3\",\n \"mode\": \"zfs\",\n \"dataStateAt\": \"\",\n \"status\": \"empty\",\n \"cloneList\": [],\n \"fileSystem\": {\n \"mode\": \"zfs\",\n \"size\": 30685528064,\n \"free\": 30685282816,\n \"used\": 245248,\n \"dataSize\": 12288,\n \"usedBySnapshots\": 0,\n \"usedByClones\": 219648,\n \"compressRatio\": 1\n }\n }\n ],\n \"cloning\": {\n \"expectedCloningTime\": 0,\n \"numClones\": 0,\n \"clones\": []\n },\n \"retrieving\": {\n \"mode\": \"logical\",\n \"status\": \"pending\",\n \"lastRefresh\": null,\n \"nextRefresh\": null,\n \"alerts\": {},\n \"activity\": null\n },\n \"provisioner\": {\n \"dockerImage\": \"postgresai/extended-postgres:15\",\n \"containerConfig\": {\n \"shm-size\": \"1gb\"\n }\n },\n \"synchronization\": {\n \"status\": {\n \"code\": \"Not available\",\n \"message\": \"\"\n },\n \"lastReplayedLsn\": \"\",\n \"lastReplayedLsnAt\": \"\",\n \"replicationLag\": 0,\n \"replicationUptime\": 0\n }\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/status", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "status" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Data refresh status", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/instance/retrieval - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/instance/retrieval - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[GET]::/instance/retrieval - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"object\",\"properties\":{\"mode\":{\"type\":\"string\"},\"status\":{\"type\":\"string\"},\"lastRefresh\":{\"type\":\"string\",\"format\":\"date-time\"},\"nextRefresh\":{\"type\":\"string\",\"format\":\"date-time\"},\"alerts\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"activity\":{\"type\":\"object\",\"properties\":{\"source\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"user\":{\"type\":\"string\"},\"query\":{\"type\":\"string\"},\"duration\":{\"type\":\"number\"},\"waitEventType\":{\"type\":\"string\"},\"waitEvent\":{\"type\":\"string\"}}}},\"target\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"user\":{\"type\":\"string\"},\"query\":{\"type\":\"string\"},\"duration\":{\"type\":\"number\"},\"waitEventType\":{\"type\":\"string\"},\"waitEvent\":{\"type\":\"string\"}}}}}}}}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[GET]::/instance/retrieval - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/instance/retrieval", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "instance", + "retrieval" + ] + }, + "description": "Report a status of the data refresh subsystem (also known as \"data retrieval\"): timestamps of the previous and next refresh runs, status, messages." + }, + "response": [ + { + "name": "Reported a status of the data retrieval subsystem", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/instance/retrieval", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "instance", + "retrieval" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"mode\": \"logical\",\n \"status\": \"pending\",\n \"lastRefresh\": null,\n \"nextRefresh\": null,\n \"alerts\": {},\n \"activity\": null\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/instance/retrieval", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "instance", + "retrieval" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Service health check", + "event": [ + { + "listen": "test", + "script": { + "type": "text/javascript", + "exec": [ + "// Validate status 2xx \npm.test(\"[GET]::/healthz - Status code is 2xx\", function () {\n pm.response.to.be.success;\n});\n", + "// Validate if response header has matching content-type\npm.test(\"[GET]::/healthz - Content-Type is application/json\", function () {\n pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");\n});\n", + "// Validate if response has JSON Body \npm.test(\"[GET]::/healthz - Response has JSON Body\", function () {\n pm.response.to.have.jsonBody();\n});\n", + "// Response Validation\nconst schema = {\"type\":\"object\",\"properties\":{\"version\":{\"type\":\"string\"},\"edition\":{\"type\":\"string\"},\"billingActive\":{\"type\":\"string\"},\"instanceID\":{\"type\":\"string\"},\"startedAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"telemetry\":{\"type\":\"boolean\"},\"disableConfigModification\":{\"type\":\"boolean\"}}}\n\n// Validate if response matches JSON schema \npm.test(\"[GET]::/healthz - Schema is valid\", function() {\n pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});\n});\n" + ] + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/healthz", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "healthz" + ] + }, + "description": "Check the overall health and availability of the API system. This endpoint does not require the 'Verification-Token' header." + }, + "response": [ + { + "name": "Successful operation", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/healthz", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "healthz" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"version\": \"v4.0.0-alpha.5-20230516-0224\",\n \"edition\": \"standard\",\n \"instanceID\": \"chhfqfcnvrvc73d0lij0\"\n}" + } + ] + } + ] + }, + { + "name": "Snapshots", + "item": [ + { + "name": "List all snapshots", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/snapshots - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/snapshots - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[GET]::/snapshots - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"dataStateAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"physicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"pool\":{\"type\":\"string\"},\"numClones\":{\"type\":\"integer\",\"format\":\"int\"}}}}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[GET]::/snapshots - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/snapshots", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "snapshots" + ] + }, + "description": "Return a list of all available snapshots." + }, + "response": [ + { + "name": "Returned a list of snapshots", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/snapshots", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "snapshots" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "[\n {\n \"id\": \"dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711\",\n \"createdAt\": \"2023-05-09T21:27:11Z\",\n \"dataStateAt\": \"2023-05-09T21:27:11Z\",\n \"physicalSize\": 0,\n \"logicalSize\": 11518021632,\n \"pool\": \"dblab_pool/dataset_2\",\n \"numClones\": 1\n },\n {\n \"id\": \"dblab_pool/dataset_2/nik-test-branch/20230307171959@20230307171959\",\n \"createdAt\": \"2023-03-07T17:19:59Z\",\n \"dataStateAt\": \"2023-03-07T17:19:59Z\",\n \"physicalSize\": 151552,\n \"logicalSize\": 11518015488,\n \"pool\": \"dblab_pool/dataset_2\",\n \"numClones\": 1\n }\n]" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/snapshots", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "snapshots" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Create a snapshot", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/branch/snapshot - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/branch/snapshot - Content-Type is */*\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"*/*\");", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "*/*" + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"cloneID\": \"test3\",\n \"message\": \"do\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/snapshot", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "snapshot" + ] + }, + "description": "Create a new snapshot using the specified clone. After a snapshot has been created, the original clone can be deleted in order to free up compute resources, if necessary. The snapshot created by this endpoint can be used later to create one or more new clones." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"cloneID\": \"aliquip sit nisi\",\n \"message\": \"do\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/snapshot", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "snapshot" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"snapshotID\": \"voluptate\"\n}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"cloneID\": \"aliquip sit nisi\",\n \"message\": \"do\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/snapshot", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "snapshot" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\",\n \"detail\": \"occaecat\",\n \"hint\": \"anim\"\n}" + } + ] + }, + { + "name": "Retrieve a snapshot", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/branch/snapshot/:id - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/branch/snapshot/:id - Content-Type is */*\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"*/*\");", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "url": { + "raw": "{{baseUrl}}/branch/snapshot/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "snapshot", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "" + } + ] + }, + "description": "Retrieves the information for the specified snapshot." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "url": { + "raw": "{{baseUrl}}/branch/snapshot/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "snapshot", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) ID of the branch snapshot" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"id\": \"nostrud exercitation id velit\",\n \"parent\": \"exercitation sunt do anim\",\n \"child\": \"cillum incididunt voluptate veniam\",\n \"branch\": [\n \"cillum\",\n \"Excepteur ut ut occaecat eu\"\n ],\n \"root\": \"mollit culpa enim nostrud\",\n \"dataStateAt\": \"2008-01-19T00:42:22.510Z\",\n \"message\": \"irure qui \"\n}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "url": { + "raw": "{{baseUrl}}/branch/snapshot/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "snapshot", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) ID of the branch snapshot" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\",\n \"detail\": \"occaecat\",\n \"hint\": \"anim\"\n}" + } + ] + }, + { + "name": "Delete a snapshot", + "request": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) ", + "type": "text" + }, + { + "key": "Accept", + "value": "*/*", + "type": "text" + } + ], + "url": { + "raw": "{{baseUrl}}/snapshot/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "snapshot", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "lorem" + } + ] + } + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "type": "text" + }, + { + "key": "Accept", + "value": "*/*", + "type": "text" + } + ], + "url": { + "raw": "{{baseUrl}}/snapshot/dblab_pool/dataset_3@snapshot_20250324084404", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "snapshot", + "dblab_pool", + "dataset_3@snapshot_20250324084404" + ] + } + }, + "_postman_previewlanguage": null, + "header": null, + "cookie": [], + "body": null + }, + { + "name": "Bad request", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "type": "text" + }, + { + "key": "Accept", + "value": "*/*", + "type": "text" + } + ], + "url": { + "raw": "{{baseUrl}}/snapshot/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "snapshot", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "" + } + ] + } + }, + "_postman_previewlanguage": null, + "header": null, + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\"\n}" + } + ] + } + ] + }, + { + "name": "Clones", + "item": [ + { + "name": "List all clones", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/clones - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/clones - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[GET]::/clones - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"name\":{\"type\":\"string\"},\"snapshot\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"dataStateAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"physicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"pool\":{\"type\":\"string\"},\"numClones\":{\"type\":\"integer\",\"format\":\"int\"}}},\"protected\":{\"type\":\"boolean\",\"default\":false},\"deleteAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"status\":{\"required\":[\"code\",\"message\"],\"type\":\"object\",\"properties\":{\"code\":{\"type\":\"string\",\"description\":\"Status code\"},\"message\":{\"type\":\"string\",\"description\":\"Status description\"}}},\"db\":{\"type\":\"object\",\"properties\":{\"connStr\":{\"type\":\"string\"},\"host\":{\"type\":\"string\"},\"port\":{\"type\":\"string\"},\"username\":{\"type\":\"string\"},\"password\":{\"type\":\"string\"}}},\"metadata\":{\"type\":\"object\",\"properties\":{\"cloneDiffSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"cloningTime\":{\"type\":\"integer\",\"format\":\"float64\"},\"maxIdleMinutes\":{\"type\":\"integer\",\"format\":\"int64\"}}}}}}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[GET]::/clones - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clones", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clones" + ] + }, + "description": "Return a list of all available clones (database endpoints)." + }, + "response": [ + { + "name": "Returned a list of all available clones", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clones", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clones" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "[\n {\n \"id\": \"test-clone-2\",\n \"snapshot\": {\n \"id\": \"dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711\",\n \"createdAt\": \"2023-05-09T21:27:11Z\",\n \"dataStateAt\": \"2023-05-09T21:27:11Z\",\n \"physicalSize\": 120832,\n \"logicalSize\": 11518021632,\n \"pool\": \"dblab_pool/dataset_2\",\n \"numClones\": 3\n },\n \"branch\": \"\",\n \"protected\": false,\n \"deleteAt\": null,\n \"createdAt\": \"2023-05-16T06:12:52Z\",\n \"status\": {\n \"code\": \"OK\",\n \"message\": \"Clone is ready to accept Postgres connections.\"\n },\n \"db\": {\n \"connStr\": \"host=branching.aws.postgres.ai port=6005 user=tester dbname=postgres\",\n \"host\": \"branching.aws.postgres.ai\",\n \"port\": \"6005\",\n \"username\": \"tester\",\n \"password\": \"\",\n \"dbName\": \"postgres\"\n },\n \"metadata\": {\n \"cloneDiffSize\": 484352,\n \"logicalSize\": 11518029312,\n \"cloningTime\": 1.5250661829999999,\n \"maxIdleMinutes\": 120\n }\n },\n {\n \"id\": \"test-clone\",\n \"snapshot\": {\n \"id\": \"dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711\",\n \"createdAt\": \"2023-05-09T21:27:11Z\",\n \"dataStateAt\": \"2023-05-09T21:27:11Z\",\n \"physicalSize\": 120832,\n \"logicalSize\": 11518021632,\n \"pool\": \"dblab_pool/dataset_2\",\n \"numClones\": 3\n },\n \"branch\": \"\",\n \"protected\": false,\n \"deleteAt\": null,\n \"createdAt\": \"2023-05-16T06:12:30Z\",\n \"status\": {\n \"code\": \"OK\",\n \"message\": \"Clone is ready to accept Postgres connections.\"\n },\n \"db\": {\n \"connStr\": \"host=branching.aws.postgres.ai port=6004 user=tester dbname=postgres\",\n \"host\": \"branching.aws.postgres.ai\",\n \"port\": \"6004\",\n \"username\": \"tester\",\n \"password\": \"\",\n \"dbName\": \"postgres\"\n },\n \"metadata\": {\n \"cloneDiffSize\": 486400,\n \"logicalSize\": 11518030336,\n \"cloningTime\": 1.57552338,\n \"maxIdleMinutes\": 120\n }\n }\n]" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clones", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clones" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Create a clone", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/clone - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/clone - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[POST]::/clone - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"name\":{\"type\":\"string\"},\"snapshot\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"dataStateAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"physicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"pool\":{\"type\":\"string\"},\"numClones\":{\"type\":\"integer\",\"format\":\"int\"}}},\"protected\":{\"type\":\"boolean\",\"default\":false},\"deleteAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"status\":{\"required\":[\"code\",\"message\"],\"type\":\"object\",\"properties\":{\"code\":{\"type\":\"string\",\"description\":\"Status code\"},\"message\":{\"type\":\"string\",\"description\":\"Status description\"}}},\"db\":{\"type\":\"object\",\"properties\":{\"connStr\":{\"type\":\"string\"},\"host\":{\"type\":\"string\"},\"port\":{\"type\":\"string\"},\"username\":{\"type\":\"string\"},\"password\":{\"type\":\"string\"}}},\"metadata\":{\"type\":\"object\",\"properties\":{\"cloneDiffSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"cloningTime\":{\"type\":\"integer\",\"format\":\"float64\"},\"maxIdleMinutes\":{\"type\":\"integer\",\"format\":\"int64\"}}}}}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[POST]::/clone - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"id\": \"magna cupidatat\",\n \"snapshot\": {\n \"id\": \"veniam\"\n },\n \"branch\": \"incididunt aliquip\",\n \"protected\": null,\n \"db\": {\n \"username\": \"Duis Lorem\",\n \"password\": \"culpa non velit ut\",\n \"restricted\": null,\n \"db_name\": \"dolore qui ut\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone" + ] + } + }, + "response": [ + { + "name": "Created a new clone", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"id\": \"magna cupidatat\",\n \"snapshot\": {\n \"id\": \"veniam\"\n },\n \"branch\": \"incididunt aliquip\",\n \"protected\": null,\n \"db\": {\n \"username\": \"Duis Lorem\",\n \"password\": \"culpa non velit ut\",\n \"restricted\": null,\n \"db_name\": \"dolore qui ut\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone" + ] + } + }, + "status": "Created", + "code": 201, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"test-clone-2\",\n \"snapshot\": {\n \"id\": \"dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711\",\n \"createdAt\": \"2023-05-09T21:27:11Z\",\n \"dataStateAt\": \"2023-05-09T21:27:11Z\",\n \"physicalSize\": 120832,\n \"logicalSize\": 11518021632,\n \"pool\": \"dblab_pool/dataset_2\",\n \"numClones\": 3\n },\n \"branch\": \"\",\n \"protected\": false,\n \"deleteAt\": null,\n \"createdAt\": \"2023-05-16T06:12:52Z\",\n \"status\": {\n \"code\": \"CREATING\",\n \"message\": \"Clone is being created.\"\n },\n \"db\": {\n \"connStr\": \"\",\n \"host\": \"\",\n \"port\": \"\",\n \"username\": \"tester\",\n \"password\": \"\",\n \"dbName\": \"postgres\"\n },\n \"metadata\": {\n \"cloneDiffSize\": 0,\n \"logicalSize\": 0,\n \"cloningTime\": 0,\n \"maxIdleMinutes\": 0\n }\n}" + }, + { + "name": "Returned an error caused by invalid request", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"id\": \"magna cupidatat\",\n \"snapshot\": {\n \"id\": \"veniam\"\n },\n \"branch\": \"incididunt aliquip\",\n \"protected\": null,\n \"db\": {\n \"username\": \"Duis Lorem\",\n \"password\": \"culpa non velit ut\",\n \"restricted\": null,\n \"db_name\": \"dolore qui ut\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"BAD_REQUEST\",\n \"message\": \"clone with such ID already exists\"\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"id\": \"magna cupidatat\",\n \"snapshot\": {\n \"id\": \"veniam\"\n },\n \"branch\": \"incididunt aliquip\",\n \"protected\": null,\n \"db\": {\n \"username\": \"Duis Lorem\",\n \"password\": \"culpa non velit ut\",\n \"restricted\": null,\n \"db_name\": \"dolore qui ut\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Retrieve a clone", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/clone/:id - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/clone/:id - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[GET]::/clone/:id - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"name\":{\"type\":\"string\"},\"snapshot\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"dataStateAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"physicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"pool\":{\"type\":\"string\"},\"numClones\":{\"type\":\"integer\",\"format\":\"int\"}}},\"protected\":{\"type\":\"boolean\",\"default\":false},\"deleteAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"status\":{\"required\":[\"code\",\"message\"],\"type\":\"object\",\"properties\":{\"code\":{\"type\":\"string\",\"description\":\"Status code\"},\"message\":{\"type\":\"string\",\"description\":\"Status description\"}}},\"db\":{\"type\":\"object\",\"properties\":{\"connStr\":{\"type\":\"string\"},\"host\":{\"type\":\"string\"},\"port\":{\"type\":\"string\"},\"username\":{\"type\":\"string\"},\"password\":{\"type\":\"string\"}}},\"metadata\":{\"type\":\"object\",\"properties\":{\"cloneDiffSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"cloningTime\":{\"type\":\"integer\",\"format\":\"float64\"},\"maxIdleMinutes\":{\"type\":\"integer\",\"format\":\"int64\"}}}}}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[GET]::/clone/:id - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + }, + "description": "Retrieves the information for the specified clone." + }, + "response": [ + { + "name": "Returned detailed information for the specified clone", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"test-clone\",\n \"snapshot\": {\n \"id\": \"dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711\",\n \"createdAt\": \"2023-05-09T21:27:11Z\",\n \"dataStateAt\": \"2023-05-09T21:27:11Z\",\n \"physicalSize\": 120832,\n \"logicalSize\": 11518021632,\n \"pool\": \"dblab_pool/dataset_2\",\n \"numClones\": 3\n },\n \"branch\": \"\",\n \"protected\": false,\n \"deleteAt\": null,\n \"createdAt\": \"2023-05-16T06:12:30Z\",\n \"status\": {\n \"code\": \"OK\",\n \"message\": \"Clone is ready to accept Postgres connections.\"\n },\n \"db\": {\n \"connStr\": \"host=branching.aws.postgres.ai port=6004 user=tester dbname=postgres\",\n \"host\": \"branching.aws.postgres.ai\",\n \"port\": \"6004\",\n \"username\": \"tester\",\n \"password\": \"\",\n \"dbName\": \"postgres\"\n },\n \"metadata\": {\n \"cloneDiffSize\": 486400,\n \"logicalSize\": 11518030336,\n \"cloningTime\": 1.57552338,\n \"maxIdleMinutes\": 120\n }\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + }, + { + "name": "Not found", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"NOT_FOUND\",\n \"message\": \"Requested object does not exist. Specify your request.\"\n}" + } + ] + }, + { + "name": "Delete a clone", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[DELETE]::/clone/:id - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[DELETE]::/clone/:id - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[DELETE]::/clone/:id - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + }, + "description": "Permanently delete the specified clone. It cannot be undone." + }, + "response": [ + { + "name": "Successfully deleted the specified clone", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "\"OK\"" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + }, + { + "name": "Not found", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"NOT_FOUND\",\n \"message\": \"Requested object does not exist. Specify your request.\"\n}" + } + ] + }, + { + "name": "Update a clone", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[PATCH]::/clone/:id - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[PATCH]::/clone/:id - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[PATCH]::/clone/:id - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"name\":{\"type\":\"string\"},\"snapshot\":{\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"dataStateAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"physicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"pool\":{\"type\":\"string\"},\"numClones\":{\"type\":\"integer\",\"format\":\"int\"}}},\"protected\":{\"type\":\"boolean\",\"default\":false},\"deleteAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"createdAt\":{\"type\":\"string\",\"format\":\"date-time\"},\"status\":{\"required\":[\"code\",\"message\"],\"type\":\"object\",\"properties\":{\"code\":{\"type\":\"string\",\"description\":\"Status code\"},\"message\":{\"type\":\"string\",\"description\":\"Status description\"}}},\"db\":{\"type\":\"object\",\"properties\":{\"connStr\":{\"type\":\"string\"},\"host\":{\"type\":\"string\"},\"port\":{\"type\":\"string\"},\"username\":{\"type\":\"string\"},\"password\":{\"type\":\"string\"}}},\"metadata\":{\"type\":\"object\",\"properties\":{\"cloneDiffSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"logicalSize\":{\"type\":\"integer\",\"format\":\"int64\"},\"cloningTime\":{\"type\":\"integer\",\"format\":\"float64\"},\"maxIdleMinutes\":{\"type\":\"integer\",\"format\":\"int64\"}}}}}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[PATCH]::/clone/:id - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "PATCH", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"protected\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + }, + "description": "Updates the specified clone by setting the values of the parameters passed. Currently, only one paramater is supported: 'protected'." + }, + "response": [ + { + "name": "Successfully updated the specified clone", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"protected\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"test-clone-2\",\n \"snapshot\": {\n \"id\": \"dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711\",\n \"createdAt\": \"2023-05-09T21:27:11Z\",\n \"dataStateAt\": \"2023-05-09T21:27:11Z\",\n \"physicalSize\": 120832,\n \"logicalSize\": 11518021632,\n \"pool\": \"dblab_pool/dataset_2\",\n \"numClones\": 2\n },\n \"branch\": \"\",\n \"protected\": true,\n \"deleteAt\": null,\n \"createdAt\": \"2023-05-16T06:12:52Z\",\n \"status\": {\n \"code\": \"OK\",\n \"message\": \"Clone is ready to accept Postgres connections.\"\n },\n \"db\": {\n \"connStr\": \"host=branching.aws.postgres.ai port=6005 user=tester dbname=postgres\",\n \"host\": \"branching.aws.postgres.ai\",\n \"port\": \"6005\",\n \"username\": \"tester\",\n \"password\": \"\",\n \"dbName\": \"postgres\"\n },\n \"metadata\": {\n \"cloneDiffSize\": 561664,\n \"logicalSize\": 11518030336,\n \"cloningTime\": 1.5250661829999999,\n \"maxIdleMinutes\": 120\n }\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"protected\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone/:id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Reset a clone", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/clone/:id/reset - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/clone/:id/reset - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[POST]::/clone/:id/reset - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"snapshotID\": \"ut nulla Duis in in\",\n \"latest\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone/:id/reset", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id", + "reset" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + }, + "description": "Reset the specified clone to a previously stored state. This can be done by specifying a particular snapshot ID or using the 'latest' flag. All changes made after the snapshot are discarded during the reset, unless those changes were preserved in a snapshot. All database connections will be reset, requiring users and applications to reconnect. The duration of the reset operation is comparable to the creation of a new clone. However, unlike creating a new clone, the reset operation retains the database credentials and does not change the port. Consequently, users and applications can continue to use the same database credentials post-reset, though reconnection will be necessary. Please note that any unsaved changes will be irretrievably lost during this operation, so ensure necessary data is backed up in a snapshot prior to resetting the clone." + }, + "response": [ + { + "name": "Successfully reset the state of the specified clone", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"snapshotID\": \"ut nulla Duis in in\",\n \"latest\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone/:id/reset", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id", + "reset" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "\"OK\"" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"snapshotID\": \"ut nulla Duis in in\",\n \"latest\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/clone/:id/reset", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "clone", + ":id", + "reset" + ], + "variable": [ + { + "key": "id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + } + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + } + ] + }, + { + "name": "Branches", + "item": [ + { + "name": "List all branches", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/branches - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/branches - Content-Type is */*\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"*/*\");", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "url": { + "raw": "{{baseUrl}}/branches", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branches" + ] + }, + "description": "Return a list of all available branches (named pointers to snapshots)." + }, + "response": [ + { + "name": "Returned a list of all available branches", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "url": { + "raw": "{{baseUrl}}/branches", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branches" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "[\n {\n \"name\": \"my-1\",\n \"parent\": \"main\",\n \"dataStateAt\": \"20230224202652\",\n \"snapshotID\": \"dblab_pool/dataset_2/main/20230224202652@20230224202652\"\n },\n {\n \"name\": \"nik-test-branch\",\n \"parent\": \"-\",\n \"dataStateAt\": \"20230509212711\",\n \"snapshotID\": \"dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711\"\n },\n {\n \"name\": \"main\",\n \"parent\": \"-\",\n \"dataStateAt\": \"20230224202652\",\n \"snapshotID\": \"dblab_pool/dataset_2/main/20230224202652@20230224202652\"\n }\n]" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/branches", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branches" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Create a branch", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/branch/create - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/branch/create - Content-Type is */*\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"*/*\");", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "*/*" + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"branchName\": \"aute do laborum\",\n \"baseBranch\": \"tempor aliqua consectetur\",\n \"snapshotID\": \"mollit velit\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch" + ] + } + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"branchName\": \"aute do laborum\",\n \"baseBranch\": \"tempor aliqua consectetur\",\n \"snapshotID\": \"mollit velit\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"name\": \"cillum in laborum\"\n}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"branchName\": \"aute do laborum\",\n \"baseBranch\": \"tempor aliqua consectetur\",\n \"snapshotID\": \"mollit velit\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\",\n \"detail\": \"occaecat\",\n \"hint\": \"anim\"\n}" + } + ] + }, + { + "name": "Delete a branch", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/branch/delete - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/branch/delete - Content-Type is */*\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"*/*\");", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "*/*" + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "" + }, + "url": { + "raw": "{{baseUrl}}/branch/:branchName", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + ":branchName" + ], + "variable": [ + { + "key": "branchName", + "value": "" + } + ] + }, + "description": "Permanently delete the specified branch. It cannot be undone." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"branchName\": \"dolore aliqua laboris offi\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/delete", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "delete" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"status\": \"irure pariatur Excepteur occaecat ullamco\",\n \"message\": \"in enim tempor\"\n}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"branchName\": \"dolore aliqua laboris offi\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/delete", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "delete" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\",\n \"detail\": \"occaecat\",\n \"hint\": \"anim\"\n}" + } + ] + }, + { + "name": "Retrieve a branch log", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/branch/log - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/branch/log - Content-Type is */*\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"*/*\");", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "protocolProfileBehavior": { + "disableBodyPruning": true + }, + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "*/*" + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"branchName\": \"in exercitation eiusmod voluptate eu\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/:branchName/log", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + ":branchName", + "log" + ], + "variable": [ + { + "key": "branchName", + "value": "" + } + ] + }, + "description": "Retrieve a log of the specified branch (history of snapshots)." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"branchName\": \"in exercitation eiusmod voluptate eu\"\n}" + }, + "url": { + "raw": "{{baseUrl}}/branch/log", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "branch", + "log" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "[\n {\n \"id\": \"commodo enim\",\n \"parent\": \"laboris anim labore adipisi\",\n \"child\": \"consequat\",\n \"branch\": [\n \"ullamco ad cillum proident\",\n \"ea elit tempor nostrud\"\n ],\n \"root\": \"sunt\",\n \"dataStateAt\": \"2013-09-01T22:20:46.803Z\",\n \"message\": \"et sit\"\n },\n {\n \"id\": \"nisi cillum est deserunt\",\n \"parent\": \"pariatur Lorem\",\n \"child\": \"eu labore do deserunt\",\n \"branch\": [\n \"officia dolor\",\n \"dolor cillum eu culpa ut\"\n ],\n \"root\": \"exercitation aute\",\n \"dataStateAt\": \"1963-05-08T18:09:20.040Z\",\n \"message\": \"est Excepteur mollit nostrud\"\n }\n]" + } + ] + } + ] + }, + { + "name": "Admin", + "item": [ + { + "name": "Get config", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/admin/config - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/admin/config - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[GET]::/admin/config - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"object\"}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[GET]::/admin/config - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/config", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config" + ] + }, + "description": "Retrieve the DBLab configuration. All sensitive values are masked. Only limited set of configuration parameters is returned – only those that can be changed via API (unless reconfiguration via API is disabled by admin). The result is provided in JSON format." + }, + "response": [ + { + "name": "Returned configuration", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/config", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"databaseConfigs\": {\n \"configs\": {\n \"shared_buffers\": \"1GB\",\n \"shared_preload_libraries\": \"pg_stat_statements, pg_stat_kcache, auto_explain, logerrors\"\n }\n },\n \"databaseContainer\": {\n \"dockerImage\": \"registry.gitlab.com/postgres-ai/se-images/supabase:15\"\n },\n \"global\": {\n \"debug\": true\n },\n \"retrieval\": {\n \"refresh\": {\n \"timetable\": \"0 1 * * 0\"\n },\n \"spec\": {\n \"logicalDump\": {\n \"options\": {\n \"customOptions\": [],\n \"databases\": {\n \"test_small\": {}\n },\n \"parallelJobs\": 4,\n \"source\": {\n \"connection\": {\n \"dbname\": \"test_small\",\n \"host\": \"dev1.postgres.ai\",\n \"port\": 6666,\n \"username\": \"john\"\n }\n }\n }\n },\n \"logicalRestore\": {\n \"options\": {\n \"customOptions\": [\n \"--no-tablespaces\",\n \"--no-privileges\",\n \"--no-owner\",\n \"--exit-on-error\"\n ],\n \"parallelJobs\": 4\n }\n }\n }\n }\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/config", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Set config", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/admin/config - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/admin/config - Content-Type is application/json\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");", + "});", + "", + "// Validate if response has JSON Body ", + "pm.test(\"[POST]::/admin/config - Response has JSON Body\", function () {", + " pm.response.to.have.jsonBody();", + "});", + "", + "// Response Validation", + "const schema = {\"type\":\"object\"}", + "", + "// Validate if response matches JSON schema ", + "pm.test(\"[POST]::/admin/config - Schema is valid\", function() {", + " pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/config", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config" + ] + }, + "description": "Set specific configurations for the DBLab instance using this endpoint. The returned configuration parameters are limited to those that can be modified via the API (unless the API-based reconfiguration has been disabled by an administrator). The result will be provided in JSON format." + }, + "response": [ + { + "name": "Successfully saved configuration parameters", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/config", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/config", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"BAD_REQUEST\",\n \"message\": \"configuration management via UI/API disabled by admin\"\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/config", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Get full config (YAML)", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[GET]::/admin/config.yaml - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[GET]::/admin/config.yaml - Content-Type is application/yaml\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/yaml\");", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/yaml" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/config.yaml", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config.yaml" + ] + }, + "description": "Retrieve the DBLab configuration in YAML format. All sensitive values are masked. This method allows seeing the entire configuration file and can be helpful for reviewing configuration and setting up workflows to automate DBLab provisioning and configuration." + }, + "response": [ + { + "name": "Returned configuration (YAML)", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/yaml" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/config.yaml", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config.yaml" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "application/yaml" + } + ], + "cookie": [], + "body": "" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/config.yaml", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "config.yaml" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Test source database", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/admin/test-db-source - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"host\": \"veniam\",\n \"port\": \"tempor\",\n \"dbname\": \"et tempor in\",\n \"username\": \"minim ir\",\n \"password\": \"nisi ut incididunt in mollit\",\n \"db_list\": [\n \"veniam exercitation dolore\",\n \"do nisi in occaecat\"\n ]\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/test-db-source", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "test-db-source" + ] + } + }, + "response": [ + { + "name": "Successful operation", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"host\": \"adipisicing dolor\",\n \"port\": \"elit\",\n \"dbname\": \"cupidatat in veniam laborum dolore\",\n \"username\": \"sint\",\n \"password\": \"cillum nisi consectetur\",\n \"db_list\": [\n \"ad quis\",\n \"aliqua nisi\"\n ]\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/test-db-source", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "test-db-source" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"host\": \"adipisicing dolor\",\n \"port\": \"elit\",\n \"dbname\": \"cupidatat in veniam laborum dolore\",\n \"username\": \"sint\",\n \"password\": \"cillum nisi consectetur\",\n \"db_list\": [\n \"ad quis\",\n \"aliqua nisi\"\n ]\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/test-db-source", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "test-db-source" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"BAD_REQUEST\",\n \"message\": \"configuration management via UI/API disabled by admin\"\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"host\": \"adipisicing dolor\",\n \"port\": \"elit\",\n \"dbname\": \"cupidatat in veniam laborum dolore\",\n \"username\": \"sint\",\n \"password\": \"cillum nisi consectetur\",\n \"db_list\": [\n \"ad quis\",\n \"aliqua nisi\"\n ]\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/admin/test-db-source", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "test-db-source" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + }, + { + "name": "Test source database", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "// Validate status 2xx ", + "pm.test(\"[POST]::/admin/ws-auth - Status code is 2xx\", function () {", + " pm.response.to.be.success;", + "});", + "", + "// Validate if response header has matching content-type", + "pm.test(\"[POST]::/admin/ws-auth - Content-Type is */*\", function () {", + " pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"*/*\");", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "{{verificationToken}}", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/ws-auth", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "ws-auth" + ] + } + }, + "response": [ + { + "name": "Successful operation", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "*/*" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/ws-auth", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "ws-auth" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "*/*" + } + ], + "cookie": [], + "body": "{\n \"token\": \"velit ut minim\"\n}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/ws-auth", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "ws-auth" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"BAD_REQUEST\",\n \"message\": \"configuration management via UI/API disabled by admin\"\n}" + }, + { + "name": "Unauthorized access", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/admin/ws-auth", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "admin", + "ws-auth" + ] + } + }, + "status": "Unauthorized", + "code": 401, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"UNAUTHORIZED\",\n \"message\": \"Check your verification token.\"\n}" + } + ] + } + ] + }, + { + "name": "Observation", + "item": [ + { + "name": "Start observing", + "event": [ + { + "listen": "test", + "script": { + "type": "text/javascript", + "exec": [ + "// Validate status 2xx \npm.test(\"[POST]::/observation/start - Status code is 2xx\", function () {\n pm.response.to.be.success;\n});\n", + "// Validate if response header has matching content-type\npm.test(\"[POST]::/observation/start - Content-Type is application/json\", function () {\n pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");\n});\n", + "// Validate if response has JSON Body \npm.test(\"[POST]::/observation/start - Response has JSON Body\", function () {\n pm.response.to.have.jsonBody();\n});\n", + "// Response Validation\nconst schema = {\"type\":\"object\",\"properties\":{\"session_id\":{\"type\":\"integer\",\"format\":\"int64\"},\"started_at\":{\"type\":\"string\",\"format\":\"date-time\"},\"finished_at\":{\"type\":\"string\",\"format\":\"date-time\"},\"config\":{\"type\":\"object\",\"properties\":{\"observation_interval\":{\"type\":\"integer\",\"format\":\"int64\"},\"max_lock_duration\":{\"type\":\"integer\",\"format\":\"int64\"},\"max_duration\":{\"type\":\"integer\",\"format\":\"int64\"}}},\"tags\":{\"type\":\"object\",\"properties\":{}},\"artifacts\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"result\":{\"type\":\"object\",\"properties\":{\"status\":{\"type\":\"string\"},\"intervals\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"started_at\":{\"type\":\"string\",\"format\":\"date-time\"},\"duration\":{\"type\":\"integer\",\"format\":\"int64\"},\"warning\":{\"type\":\"string\"}}}},\"summary\":{\"type\":\"object\",\"properties\":{\"total_duration\":{\"type\":\"integer\",\"format\":\"float64\"},\"total_intervals\":{\"type\":\"integer\",\"format\":\"int\"},\"warning_intervals\":{\"type\":\"integer\",\"format\":\"int\"},\"checklist\":{\"type\":\"object\",\"properties\":{\"overall_success\":{\"type\":\"boolean\"},\"session_duration_acceptable\":{\"type\":\"boolean\"},\"no_long_dangerous_locks\":{\"type\":\"boolean\"}}}}}}}}}\n\n// Validate if response matches JSON schema \npm.test(\"[POST]::/observation/start - Schema is valid\", function() {\n pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});\n});\n" + ] + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"clone_id\": \"ut sit irure\",\n \"config\": {\n \"observation_interval\": 33950905,\n \"max_lock_duration\": 82462220,\n \"max_duration\": 54143470\n },\n \"tags\": {},\n \"db_name\": \"magna esse dolore\"\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/observation/start", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "start" + ] + }, + "description": "[EXPERIMENTAL] Start an observation session for the specified clone. Observation sessions help detect dangerous (long-lasting, exclusive) locks in CI/CD pipelines. One of common scenarios is using observation sessions to test schema changes (DB migrations)." + }, + "response": [ + { + "name": "Successful operation", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"clone_id\": \"ut sit irure\",\n \"config\": {\n \"observation_interval\": 33950905,\n \"max_lock_duration\": 82462220,\n \"max_duration\": 54143470\n },\n \"tags\": {},\n \"db_name\": \"magna esse dolore\"\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/observation/start", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "start" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"session_id\": -41566390,\n \"started_at\": \"1991-02-14T03:01:06.417Z\",\n \"finished_at\": \"2018-05-30T06:18:09.119Z\",\n \"config\": {\n \"observation_interval\": 76803835,\n \"max_lock_duration\": -6633155,\n \"max_duration\": -968293\n },\n \"tags\": {},\n \"artifacts\": [\n \"aliqua do\",\n \"consectetur amet tempor eiusmod\"\n ],\n \"result\": {\n \"status\": \"qui adipisicing velit aute\",\n \"intervals\": [\n {\n \"started_at\": \"2008-06-20T07:35:49.463Z\",\n \"duration\": 34650553,\n \"warning\": \"velit nulla ex\"\n },\n {\n \"started_at\": \"1994-03-12T02:59:52.189Z\",\n \"duration\": 10020998,\n \"warning\": \"ipsum laborum\"\n }\n ],\n \"summary\": {\n \"total_duration\": -51894451,\n \"total_intervals\": -93757197,\n \"warning_intervals\": 95087393,\n \"checklist\": {\n \"overall_success\": false,\n \"session_duration_acceptable\": true,\n \"no_long_dangerous_locks\": false\n }\n }\n }\n}" + }, + { + "name": "Not found", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"clone_id\": \"ut sit irure\",\n \"config\": {\n \"observation_interval\": 33950905,\n \"max_lock_duration\": 82462220,\n \"max_duration\": 54143470\n },\n \"tags\": {},\n \"db_name\": \"magna esse dolore\"\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/observation/start", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "start" + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"NOT_FOUND\",\n \"message\": \"Requested object does not exist. Specify your request.\"\n}" + } + ] + }, + { + "name": "Stop observing", + "event": [ + { + "listen": "test", + "script": { + "type": "text/javascript", + "exec": [ + "// Validate status 2xx \npm.test(\"[POST]::/observation/stop - Status code is 2xx\", function () {\n pm.response.to.be.success;\n});\n", + "// Validate if response header has matching content-type\npm.test(\"[POST]::/observation/stop - Content-Type is application/json\", function () {\n pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");\n});\n", + "// Validate if response has JSON Body \npm.test(\"[POST]::/observation/stop - Response has JSON Body\", function () {\n pm.response.to.have.jsonBody();\n});\n", + "// Response Validation\nconst schema = {\"type\":\"object\",\"properties\":{\"session_id\":{\"type\":\"integer\",\"format\":\"int64\"},\"started_at\":{\"type\":\"string\",\"format\":\"date-time\"},\"finished_at\":{\"type\":\"string\",\"format\":\"date-time\"},\"config\":{\"type\":\"object\",\"properties\":{\"observation_interval\":{\"type\":\"integer\",\"format\":\"int64\"},\"max_lock_duration\":{\"type\":\"integer\",\"format\":\"int64\"},\"max_duration\":{\"type\":\"integer\",\"format\":\"int64\"}}},\"tags\":{\"type\":\"object\",\"properties\":{}},\"artifacts\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"result\":{\"type\":\"object\",\"properties\":{\"status\":{\"type\":\"string\"},\"intervals\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"started_at\":{\"type\":\"string\",\"format\":\"date-time\"},\"duration\":{\"type\":\"integer\",\"format\":\"int64\"},\"warning\":{\"type\":\"string\"}}}},\"summary\":{\"type\":\"object\",\"properties\":{\"total_duration\":{\"type\":\"integer\",\"format\":\"float64\"},\"total_intervals\":{\"type\":\"integer\",\"format\":\"int\"},\"warning_intervals\":{\"type\":\"integer\",\"format\":\"int\"},\"checklist\":{\"type\":\"object\",\"properties\":{\"overall_success\":{\"type\":\"boolean\"},\"session_duration_acceptable\":{\"type\":\"boolean\"},\"no_long_dangerous_locks\":{\"type\":\"boolean\"}}}}}}}}}\n\n// Validate if response matches JSON schema \npm.test(\"[POST]::/observation/stop - Schema is valid\", function() {\n pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});\n});\n" + ] + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"clone_id\": \"proident cillum nostrud officia\",\n \"overall_error\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/observation/stop", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "stop" + ] + }, + "description": "[EXPERIMENTAL] Stop the previously started observation session." + }, + "response": [ + { + "name": "Successful operation", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"clone_id\": \"proident cillum nostrud officia\",\n \"overall_error\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/observation/stop", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "stop" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"session_id\": 9614128,\n \"started_at\": \"1993-11-12T01:24:57.933Z\",\n \"finished_at\": \"1953-01-01T04:06:59.652Z\",\n \"config\": {\n \"observation_interval\": -46635741,\n \"max_lock_duration\": -53938384,\n \"max_duration\": 85779944\n },\n \"tags\": {},\n \"artifacts\": [\n \"deseru\",\n \"in ullamco veniam\"\n ],\n \"result\": {\n \"status\": \"ut ea l\",\n \"intervals\": [\n {\n \"started_at\": \"1943-07-24T05:03:49.697Z\",\n \"duration\": -45788381,\n \"warning\": \"Ut qui occaecat\"\n },\n {\n \"started_at\": \"1973-02-08T19:49:36.906Z\",\n \"duration\": 78310177,\n \"warning\": \"dolore amet mollit velit\"\n }\n ],\n \"summary\": {\n \"total_duration\": 89098265,\n \"total_intervals\": -25796081,\n \"warning_intervals\": -74609996,\n \"checklist\": {\n \"overall_success\": false,\n \"session_duration_acceptable\": true,\n \"no_long_dangerous_locks\": false\n }\n }\n }\n}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "POST", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"clone_id\": \"proident cillum nostrud officia\",\n \"overall_error\": false\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/observation/stop", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "stop" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\",\n \"detail\": \"occaecat\",\n \"hint\": \"anim\"\n}" + } + ] + }, + { + "name": "Get observation summary", + "event": [ + { + "listen": "test", + "script": { + "type": "text/javascript", + "exec": [ + "// Validate status 2xx \npm.test(\"[GET]::/observation/summary/:clone_id/:session_id - Status code is 2xx\", function () {\n pm.response.to.be.success;\n});\n", + "// Validate if response header has matching content-type\npm.test(\"[GET]::/observation/summary/:clone_id/:session_id - Content-Type is application/json\", function () {\n pm.expect(pm.response.headers.get(\"Content-Type\")).to.include(\"application/json\");\n});\n", + "// Validate if response has JSON Body \npm.test(\"[GET]::/observation/summary/:clone_id/:session_id - Response has JSON Body\", function () {\n pm.response.to.have.jsonBody();\n});\n", + "// Response Validation\nconst schema = {\"type\":\"object\",\"properties\":{\"session_id\":{\"type\":\"integer\",\"format\":\"int64\"},\"clone_id\":{\"type\":\"string\"},\"duration\":{\"type\":\"object\",\"properties\":{}},\"db_size\":{\"type\":\"object\",\"properties\":{}},\"locks\":{\"type\":\"object\",\"properties\":{}},\"log_errors\":{\"type\":\"object\",\"properties\":{}},\"artifact_types\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}}}\n\n// Validate if response matches JSON schema \npm.test(\"[GET]::/observation/summary/:clone_id/:session_id - Schema is valid\", function() {\n pm.response.to.have.jsonSchema(schema,{unknownFormats: [\"int32\", \"int64\", \"float\", \"double\"]});\n});\n" + ] + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/observation/summary/:clone_id/:session_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "summary", + ":clone_id", + ":session_id" + ], + "variable": [ + { + "key": "clone_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + }, + { + "key": "session_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Session ID" + } + ] + }, + "description": "[EXPERIMENTAL] Collect the observation summary info." + }, + "response": [ + { + "name": "Successful operation", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/observation/summary/:clone_id/:session_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "summary", + ":clone_id", + ":session_id" + ], + "variable": [ + { + "key": "clone_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + }, + { + "key": "session_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Session ID" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"session_id\": 55155718,\n \"clone_id\": \"cupidatat laborum consequat Lorem officia\",\n \"duration\": {},\n \"db_size\": {},\n \"locks\": {},\n \"log_errors\": {},\n \"artifact_types\": [\n \"laboris anim Ut enim\",\n \"ullamco in esse nostrud Exc\"\n ]\n}" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/observation/summary/:clone_id/:session_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "summary", + ":clone_id", + ":session_id" + ], + "variable": [ + { + "key": "clone_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + }, + { + "key": "session_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Session ID" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\",\n \"detail\": \"occaecat\",\n \"hint\": \"anim\"\n}" + } + ] + }, + { + "name": "Download an observation artifact", + "event": [ + { + "listen": "test", + "script": { + "type": "text/javascript", + "exec": [ + "// Validate status 2xx \npm.test(\"[GET]::/observation/download/:artifact_type/:clone_id/:session_id - Status code is 2xx\", function () {\n pm.response.to.be.success;\n});\n" + ] + } + } + ], + "request": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/observation/download/:artifact_type/:clone_id/:session_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "download", + ":artifact_type", + ":clone_id", + ":session_id" + ], + "variable": [ + { + "key": "artifact_type", + "value": "Ut magna qui deserunt", + "description": "(Required) Type of the requested artifact" + }, + { + "key": "clone_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + }, + { + "key": "session_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Session ID" + } + ] + }, + "description": "[EXPERIMENTAL] Download an artifact for the specified clone and observation session." + }, + "response": [ + { + "name": "Successful operation", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + } + ], + "url": { + "raw": "{{baseUrl}}/observation/download/:artifact_type/:clone_id/:session_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "download", + ":artifact_type", + ":clone_id", + ":session_id" + ], + "variable": [ + { + "key": "artifact_type", + "value": "Ut magna qui deserunt", + "description": "(Required) Type of the requested artifact" + }, + { + "key": "clone_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + }, + { + "key": "session_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Session ID" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + }, + { + "name": "Bad request", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "Verification-Token", + "value": "Ut magna qui deserunt", + "description": "(Required) " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/observation/download/:artifact_type/:clone_id/:session_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "observation", + "download", + ":artifact_type", + ":clone_id", + ":session_id" + ], + "variable": [ + { + "key": "artifact_type", + "value": "Ut magna qui deserunt", + "description": "(Required) Type of the requested artifact" + }, + { + "key": "clone_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Clone ID" + }, + { + "key": "session_id", + "value": "Ut magna qui deserunt", + "description": "(Required) Session ID" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"code\": \"incididunt minim nulla\",\n \"message\": \"qui fugiat\",\n \"detail\": \"occaecat\",\n \"hint\": \"anim\"\n}" + } + ] + } + ] + } + ], + "variable": [ + { + "key": "baseUrl", + "value": "https://branching.aws.postgres.ai:446/api", + "type": "string" + } + ] +} \ No newline at end of file diff --git a/engine/api/postman/portman-cli.json b/engine/api/postman/portman-cli.json new file mode 100644 index 00000000..89b27ed2 --- /dev/null +++ b/engine/api/postman/portman-cli.json @@ -0,0 +1,10 @@ +{ + "baseUrL": "http://branching.aws.postgres.ai:446/api", + "verificationToken": "demo-token", + "local": "engine/api/swagger-spec/dblab_openapi.yaml", + "output": "engine/api/postman/output.json", + "envFile": "engine/api/postman/portman.env", + "includeTests": true, + "syncPostman": true, + "runNewman": false +} diff --git a/engine/api/swagger-spec/dblab_openapi.yaml b/engine/api/swagger-spec/dblab_openapi.yaml new file mode 100644 index 00000000..2a9c16cc --- /dev/null +++ b/engine/api/swagger-spec/dblab_openapi.yaml @@ -0,0 +1,1919 @@ +# OpenAPI spec for DBLab API +# Useful links: +# - validate and test: https://editor.swagger.io/ +# - official reference location for this API: https://dblab.readme.io/ +# - GitHub (give us a ⭐️): https://github.com/postgres-ai/database-lab-engine + +openapi: 3.0.1 +info: + title: DBLab API + description: This page provides the OpenAPI specification for the Database Lab (DBLab) + API, previously recognized as the DLE API (Database Lab Engine API). + contact: + name: DBLab API Support + url: https://postgres.ai/contact + email: api@postgres.ai + license: + name: Apache 2.0 + url: https://github.com/postgres-ai/database-lab-engine/blob/dle-4-0/LICENSE + version: 4.0.0 +externalDocs: + description: DBLab Docs + url: https://gitlab.com/postgres-ai/docs/tree/master/docs/database-lab + +servers: + - url: "https://demo.dblab.dev/api" + description: "DBLab 4.0 demo server (with DB branching support); token: 'demo-token'" + x-examples: + Verification-Token: "demo-token" + - url: "https://demo.aws.postgres.ai:446/api" + description: "DBLab 3.x demo server; token: 'demo-token'" + x-examples: + Verification-Token: "demo-token" + - url: "{scheme}://{host}:{port}/{basePath}" + description: "Any DBLab accessed locally / through SSH port forwarding" + variables: + scheme: + enum: + - "https" + - "http" + default: "http" + description: "'http' for local connections and SSH port forwarding; + 'https' for everything else." + host: + default: "localhost" + description: "where DBLab server is installed. Use 'localhost' to work locally + or when SSH port forwarding is used." + port: + default: "2346" + description: "Port to access DBLab UI or API. Originally, '2345' is used for + direct work with API and '2346' – with UI. However, with UI, API is also available, + at ':2346/api'." + basePath: + default: "api" + description: "basePath value to access API. Use empty when working with API port + (2345 by default), or '/api' when working with UI port ('2346' by default)." + x-examples: + Verification-Token: "custom_example_token" + +tags: +- name: DBLab + description: "DBLab API Reference – database branching, instant cloning, and more. + DBLab CLI and UI rely on DBLab API." + externalDocs: + description: "DBLab Docs - tutorials, howtos, references." + url: https://postgres.ai/docs/reference-guides/database-lab-engine-api-reference + +paths: + /status: + get: + tags: + - Instance + summary: DBLab instance status and detailed information + description: "Retrieves detailed information about the DBLab instance: status, version, + clones, snapshots, etc." + operationId: status + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: Returned detailed information about the DBLab instance + content: + application/json: + schema: + $ref: '#/components/schemas/Instance' + example: + status: + code: OK + message: Instance is ready + engine: + version: v4.0.0-alpha.5-20230516-0224 + edition: standard + billingActive: true + instanceID: chhfqfcnvrvc73d0lij0 + startedAt: '2023-05-16T03:50:19Z' + telemetry: true + disableConfigModification: false + pools: + - name: dblab_pool/dataset_1 + mode: zfs + dataStateAt: '' + status: empty + cloneList: [] + fileSystem: + mode: zfs + size: 30685528064 + free: 30685282816 + used: 245248 + dataSize: 12288 + usedBySnapshots: 0 + usedByClones: 219648 + compressRatio: 1 + - name: dblab_pool/dataset_2 + mode: zfs + dataStateAt: '' + status: empty + cloneList: [] + fileSystem: + mode: zfs + size: 30685528064 + free: 30685282816 + used: 245248 + dataSize: 12288 + usedBySnapshots: 0 + usedByClones: 219648 + compressRatio: 1 + - name: dblab_pool/dataset_3 + mode: zfs + dataStateAt: '' + status: empty + cloneList: [] + fileSystem: + mode: zfs + size: 30685528064 + free: 30685282816 + used: 245248 + dataSize: 12288 + usedBySnapshots: 0 + usedByClones: 219648 + compressRatio: 1 + cloning: + expectedCloningTime: 0 + numClones: 0 + clones: [] + retrieving: + mode: logical + status: pending + lastRefresh: + nextRefresh: + alerts: {} + activity: + provisioner: + dockerImage: postgresai/extended-postgres:15 + containerConfig: + shm-size: 1gb + synchronization: + status: + code: Not available + message: '' + lastReplayedLsn: '' + lastReplayedLsnAt: '' + replicationLag: 0 + replicationUptime: 0 + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /snapshots: + get: + tags: + - Snapshots + summary: List all snapshots + description: Return a list of all available snapshots. + operationId: snapshots + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + - name: branch + in: query + required: false + schema: + type: string + responses: + 200: + description: Returned a list of snapshots + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Snapshot' + example: + - id: dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711 + createdAt: '2023-05-09T21:27:11Z' + dataStateAt: '2023-05-09T21:27:11Z' + physicalSize: 0 + logicalSize: 11518021632 + pool: dblab_pool/dataset_2 + numClones: 1 + - id: dblab_pool/dataset_2/nik-test-branch/20230307171959@20230307171959 + createdAt: '2023-03-07T17:19:59Z' + dataStateAt: '2023-03-07T17:19:59Z' + physicalSize: 151552 + logicalSize: 11518015488 + pool: dblab_pool/dataset_2 + numClones: 1 + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /full-refresh: + post: + tags: + - Instance + summary: Trigger full data refresh + description: "Initiates a full data refresh." + operationId: refresh + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: Full refresh has been initiated + content: + application/json: + schema: + $ref: '#/components/schemas/FullRefresh' + example: + status: OK + message: Full refresh started + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /snapshot: + post: + tags: + - Snapshots + summary: Create a snapshot + description: "Create a new snapshot from the current state of the selected pool. + This snapshot can later be used to create clones or new branches." + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + description: "Optional parameters for snapshot creation. + If no pool name is provided, the first available pool is used." + content: + '*/*': + schema: + type: object + properties: + poolName: + type: string + description: Name of the pool to create snapshot in. + required: false + responses: + 200: + description: OK + content: + '*/*': + schema: + $ref: '#/components/schemas/Snapshot' + 400: + description: Bad request + content: + '*/*': + schema: + $ref: '#/components/schemas/Error' + x-codegen-request-body-name: body + /snapshot/{id}: + delete: + tags: + - Snapshots + summary: Delete a snapshot + description: "Permanently delete the specified snapshot. + If the snapshot has dependent clones or datasets, `force=true` can be provided as a query parameter." + parameters: + - name: id + in: path + required: true + description: The ID of the snapshot to delete. + schema: + type: string + pattern: '.*' + - name: force + in: query + required: false + description: Force deletion even if dependent clones or datasets exist. + schema: + type: boolean + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: OK + content: + '*/*': + schema: + $ref: '#/components/schemas/ResponseStatus' + 400: + description: Bad request + content: + '*/*': + schema: + $ref: '#/components/schemas/Error' + /clones: + get: + tags: + - Clones + summary: List all clones + description: Return a list of all available clones (database endpoints). + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: Returned a list of all available clones + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Clone' + example: + - id: test-clone-2 + snapshot: + id: dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711 + createdAt: '2023-05-09T21:27:11Z' + dataStateAt: '2023-05-09T21:27:11Z' + physicalSize: 120832 + logicalSize: 11518021632 + pool: dblab_pool/dataset_2 + numClones: 3 + branch: '' + protected: false + deleteAt: + createdAt: '2023-05-16T06:12:52Z' + status: + code: OK + message: Clone is ready to accept Postgres connections. + db: + connStr: host=branching.aws.postgres.ai port=6005 user=tester dbname=postgres + host: branching.aws.postgres.ai + port: '6005' + username: tester + password: '' + dbName: postgres + metadata: + cloneDiffSize: 484352 + logicalSize: 11518029312 + cloningTime: 1.5250661829999999 + maxIdleMinutes: 120 + - id: test-clone + snapshot: + id: dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711 + createdAt: '2023-05-09T21:27:11Z' + dataStateAt: '2023-05-09T21:27:11Z' + physicalSize: 120832 + logicalSize: 11518021632 + pool: dblab_pool/dataset_2 + numClones: 3 + branch: '' + protected: false + deleteAt: + createdAt: '2023-05-16T06:12:30Z' + status: + code: OK + message: Clone is ready to accept Postgres connections. + db: + connStr: host=branching.aws.postgres.ai port=6004 user=tester dbname=postgres + host: branching.aws.postgres.ai + port: '6004' + username: tester + password: '' + dbName: postgres + metadata: + cloneDiffSize: 486400 + logicalSize: 11518030336 + cloningTime: 1.57552338 + maxIdleMinutes: 120 + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /clone: + post: + tags: + - Clones + summary: Create a clone + operationId: createClone + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + description: Clone object + content: + application/json: + schema: + $ref: '#/components/schemas/CreateClone' + required: true + responses: + 201: + description: Created a new clone + content: + application/json: + schema: + $ref: '#/components/schemas/Clone' + example: + id: test-clone-2 + snapshot: + id: dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711 + createdAt: '2023-05-09T21:27:11Z' + dataStateAt: '2023-05-09T21:27:11Z' + physicalSize: 120832 + logicalSize: 11518021632 + pool: dblab_pool/dataset_2 + numClones: 3 + branch: '' + protected: false + deleteAt: + createdAt: '2023-05-16T06:12:52Z' + status: + code: CREATING + message: Clone is being created. + db: + connStr: '' + host: '' + port: '' + username: tester + password: '' + dbName: postgres + metadata: + cloneDiffSize: 0 + logicalSize: 0 + cloningTime: 0 + maxIdleMinutes: 0 + 400: + description: Returned an error caused by invalid request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "BAD_REQUEST" + message: "clone with such ID already exists" + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + x-codegen-request-body-name: body + /clone/{id}: + get: + tags: + - Clones + summary: Retrieve a clone + description: Retrieves the information for the specified clone. + operationId: getClone + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + - name: id + in: path + description: Clone ID + required: true + schema: + type: string + responses: + 200: + description: Returned detailed information for the specified clone + content: + application/json: + schema: + $ref: '#/components/schemas/Clone' + example: + id: test-clone + snapshot: + id: dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711 + createdAt: '2023-05-09T21:27:11Z' + dataStateAt: '2023-05-09T21:27:11Z' + physicalSize: 120832 + logicalSize: 11518021632 + pool: dblab_pool/dataset_2 + numClones: 3 + branch: '' + protected: false + deleteAt: + createdAt: '2023-05-16T06:12:30Z' + status: + code: OK + message: Clone is ready to accept Postgres connections. + db: + connStr: host=branching.aws.postgres.ai port=6004 user=tester dbname=postgres + host: branching.aws.postgres.ai + port: '6004' + username: tester + password: '' + dbName: postgres + metadata: + cloneDiffSize: 486400 + logicalSize: 11518030336 + cloningTime: 1.57552338 + maxIdleMinutes: 120 + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + 404: + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: NOT_FOUND + message: Requested object does not exist. Specify your request. + delete: + tags: + - Clones + summary: Delete a clone + description: Permanently delete the specified clone. It cannot be undone. + operationId: deleteClone + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + - name: id + in: path + description: Clone ID + required: true + schema: + type: string + responses: + 200: + description: Successfully deleted the specified clone + content: + application/json: + example: + "OK" + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + 404: + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: NOT_FOUND + message: Requested object does not exist. Specify your request. + patch: + tags: + - Clones + summary: Update a clone + description: "Updates the specified clone by setting the values of the parameters passed. + Currently, only one paramater is supported: 'protected'." + operationId: updateClone + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + - name: id + in: path + description: Clone ID + required: true + schema: + type: string + requestBody: + description: Clone object + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateClone' + required: true + responses: + 200: + description: Successfully updated the specified clone + content: + application/json: + schema: + $ref: '#/components/schemas/Clone' + example: + id: test-clone-2 + snapshot: + id: dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711 + createdAt: '2023-05-09T21:27:11Z' + dataStateAt: '2023-05-09T21:27:11Z' + physicalSize: 120832 + logicalSize: 11518021632 + pool: dblab_pool/dataset_2 + numClones: 2 + branch: '' + protected: true + deleteAt: + createdAt: '2023-05-16T06:12:52Z' + status: + code: OK + message: Clone is ready to accept Postgres connections. + db: + connStr: host=branching.aws.postgres.ai port=6005 user=tester dbname=postgres + host: branching.aws.postgres.ai + port: '6005' + username: tester + password: '' + dbName: postgres + metadata: + cloneDiffSize: 561664 + logicalSize: 11518030336 + cloningTime: 1.5250661829999999 + maxIdleMinutes: 120 + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + #404: # TODO: fix it in engine (currently returns 500) + # description: Not found + # content: + # application/json: + # schema: + # $ref: '#/components/schemas/Error' + # example: + # code: NOT_FOUND + # message: Requested object does not exist. Specify your request. + x-codegen-request-body-name: body + /clone/{id}/reset: + post: + tags: + - Clones + summary: Reset a clone + description: "Reset the specified clone to a previously stored state. + This can be done by specifying a particular snapshot ID or using the 'latest' flag. + All changes made after the snapshot are discarded during the reset, unless those + changes were preserved in a snapshot. All database connections will be reset, + requiring users and applications to reconnect. The duration of the reset operation + is comparable to the creation of a new clone. However, unlike creating a new clone, + the reset operation retains the database credentials and does not change the port. + Consequently, users and applications can continue to use the same database credentials + post-reset, though reconnection will be necessary. Please note that any unsaved changes + will be irretrievably lost during this operation, so ensure necessary data is backed up + in a snapshot prior to resetting the clone." + operationId: resetClone + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + - name: id + in: path + description: Clone ID + required: true + schema: + type: string + requestBody: + description: Reset object + content: + application/json: + schema: + $ref: '#/components/schemas/ResetClone' + required: false + responses: + 200: + description: Successfully reset the state of the specified clone + content: + application/json: + example: + "OK" + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + #404: # TODO: fix it in engine (currently returns 500) + # description: Not found + # content: + # application/json: + # schema: + # $ref: '#/components/schemas/Error' + x-codegen-request-body-name: body + /branches: + get: + tags: + - Branches + summary: List all branches + description: Return a list of all available branches (named pointers to snapshots). + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: Returned a list of all available branches + content: + '*/*': + schema: + type: array + items: + $ref: '#/components/schemas/Branch' + example: + - name: my-1 + parent: main + dataStateAt: '20230224202652' + snapshotID: dblab_pool/dataset_2/main/20230224202652@20230224202652 + - name: nik-test-branch + parent: "-" + dataStateAt: '20230509212711' + snapshotID: dblab_pool/dataset_2/nik-test-branch/20230509212711@20230509212711 + - name: main + parent: "-" + dataStateAt: '20230224202652' + snapshotID: dblab_pool/dataset_2/main/20230224202652@20230224202652 + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /branch/snapshot/{id}: + get: + tags: + - Snapshots + summary: Retrieve a snapshot + description: Retrieves the information for the specified snapshot. + parameters: + - name: id + in: path + description: ID of the branch snapshot + required: true + schema: + type: string + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: OK + content: + '*/*': + schema: + $ref: '#/components/schemas/SnapshotDetails' + 400: + description: Bad request + content: + '*/*': + schema: + $ref: '#/components/schemas/Error' + /branch: + post: + tags: + - Branches + summary: Create a branch + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + content: + '*/*': + schema: + type: object + properties: + branchName: + type: string + description: The name of the new branch. + baseBranch: + type: string + description: "The name of parent branch user to create a new branch. + Must not be specified if 'snapshotID' is specified." + snapshotID: + type: string + description: "The ID of the snapshot used to create a new branch. + Must not be specified if 'baseBranch' is specified." + required: true + responses: + 200: + description: OK + content: + '*/*': + schema: + type: object + properties: + name: + type: string + 400: + description: Bad request + content: + '*/*': + schema: + $ref: '#/components/schemas/Error' + x-codegen-request-body-name: body + /branch/snapshot: + post: + tags: + - Snapshots + summary: Create a snapshot + description: "Create a new snapshot using the specified clone. After a snapshot + has been created, the original clone can be deleted in order to free up compute resources, if necessary. + The snapshot created by this endpoint can be used later to create one or more new clones." + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + description: "Parameters necessary for snapshot creation: 'cloneID' – the + ID of the clone, 'message' – description of the snapshot" + content: + '*/*': + schema: + type: object + properties: + cloneID: + type: string + message: + type: string + required: true + responses: + 200: + description: OK + content: + '*/*': + schema: + type: object + properties: + snapshotID: + type: string + 400: + description: Bad request + content: + '*/*': + schema: + $ref: '#/components/schemas/Error' + x-codegen-request-body-name: body + /branch/{branchName}: + delete: + tags: + - Branches + summary: Delete a branch + description: "Permanently delete the specified branch. It cannot be undone." + parameters: + - name: branchName + in: path + required: true + schema: + type: string + description: "The name of the branch to be deleted." + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: OK + content: + '*/*': + schema: + $ref: '#/components/schemas/ResponseStatus' + 400: + description: Bad request + content: + '*/*': + schema: + $ref: '#/components/schemas/Error' + x-codegen-request-body-name: body + /branch/{branchName}/log: + get: + tags: + - Branches + summary: Retrieve a branch log + description: Retrieve a log of the specified branch (history of snapshots). + parameters: + - name: branchName + in: path + required: true + schema: + type: string + description: The name of the branch. + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: OK + content: + '*/*': + schema: + type: array + items: + $ref: '#/components/schemas/SnapshotDetails' + x-codegen-request-body-name: body + /instance/retrieval: + get: + tags: + - Instance + summary: Data refresh status + description: 'Report a status of the data refresh subsystem (also known as + "data retrieval"): timestamps of the previous and next refresh runs, status, messages.' + operationId: instanceRetrieval + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: Reported a status of the data retrieval subsystem + content: + application/json: + schema: + $ref: '#/components/schemas/Retrieving' + example: + mode: logical + status: pending + lastRefresh: + nextRefresh: + alerts: {} + activity: + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Instance' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /healthz: + get: + tags: + - Instance + summary: Service health check + description: "Check the overall health and availability of the API system. + This endpoint does not require the 'Verification-Token' header." + operationId: healthz + responses: + 200: + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Engine' + example: + version: "v4.0.0-alpha.5-20230516-0224" + edition: "standard" + instanceID: "chhfqfcnvrvc73d0lij0" + /admin/config: + get: + tags: + - Admin + summary: Get config + description: "Retrieve the DBLab configuration. All sensitive values are masked. + Only limited set of configuration parameters is returned – only those that can be + changed via API (unless reconfiguration via API is disabled by admin). The result + is provided in JSON format." + operationId: getConfig + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: Returned configuration + content: + application/json: + schema: + $ref: '#/components/schemas/Config' + example: + databaseConfigs: + configs: + shared_buffers: 1GB + shared_preload_libraries: pg_stat_statements, pg_stat_kcache, auto_explain, logerrors + databaseContainer: + dockerImage: registry.gitlab.com/postgres-ai/se-images/supabase:15 + global: + debug: true + retrieval: + refresh: + timetable: 0 1 * * 0 + spec: + logicalDump: + options: + customOptions: [] + databases: + test_small: {} + parallelJobs: 4 + source: + connection: + dbname: test_small + host: dev1.postgres.ai + port: 6666 + username: john + logicalRestore: + options: + customOptions: + - "--no-tablespaces" + - "--no-privileges" + - "--no-owner" + - "--exit-on-error" + parallelJobs: 4 + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Instance' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + post: + tags: + - Admin + summary: Set config + description: "Set specific configurations for the DBLab instance using this endpoint. + The returned configuration parameters are limited to those that can be modified + via the API (unless the API-based reconfiguration has been disabled by an administrator). + The result will be provided in JSON format." + operationId: setConfig + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + description: Set configuration object + content: + application/json: + schema: + $ref: '#/components/schemas/Config' + required: true + responses: + 200: + description: Successfully saved configuration parameters + content: + application/json: + schema: + $ref: '#/components/schemas/Config' + 400: + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: BAD_REQUEST + message: configuration management via UI/API disabled by admin + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Instance' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + x-codegen-request-body-name: body + /admin/config.yaml: + get: + tags: + - Admin + summary: Get full config (YAML) + description: "Retrieve the DBLab configuration in YAML format. All sensitive values are masked. + This method allows seeing the entire configuration file and can be helpful for + reviewing configuration and setting up workflows to automate DBLab provisioning + and configuration." + operationId: getConfigYaml + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: "Returned configuration (YAML)" + content: + application/yaml: + schema: + $ref: '#/components/schemas/Config' + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Instance' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /admin/test-db-source: + post: + tags: + - Admin + summary: Test source database + operationId: testDBConnection1 + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + description: Connection DB object + content: + application/json: + schema: + $ref: '#/components/schemas/Connection' + required: true + responses: + 200: + description: Successful operation + content: {} + 400: + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: BAD_REQUEST + message: configuration management via UI/API disabled by admin + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Instance' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + x-codegen-request-body-name: body + /admin/ws-auth: + post: + tags: + - Admin + summary: Test source database + operationId: testDBConnection2 + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + responses: + 200: + description: Successful operation + content: + '*/*': + schema: + $ref: '#/components/schemas/WSToken' + 400: + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: BAD_REQUEST + message: configuration management via UI/API disabled by admin + 401: + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/Instance' + example: + code: "UNAUTHORIZED" + message: "Check your verification token." + /observation/start: + post: + tags: + - Observation + summary: Start observing + description: "[EXPERIMENTAL] Start an observation session for the specified clone. + Observation sessions help detect dangerous (long-lasting, exclusive) locks in CI/CD pipelines. + One of common scenarios is using observation sessions to test schema changes (DB migrations)." + operationId: startObservation + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + description: Start observation object + content: + application/json: + schema: + $ref: '#/components/schemas/StartObservationRequest' + required: true + responses: + 200: + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/ObservationSession' + 404: + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: NOT_FOUND + message: Requested object does not exist. Specify your request. + x-codegen-request-body-name: body + /observation/stop: + post: + tags: + - Observation + summary: Stop observing + description: "[EXPERIMENTAL] Stop the previously started observation session." + operationId: stopObservation + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + requestBody: + description: Stop observation object + content: + application/json: + schema: + $ref: '#/components/schemas/StopObservationRequest' + required: true + responses: + 200: + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/ObservationSession' + 400: + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + x-codegen-request-body-name: body + /observation/summary/{clone_id}/{session_id}: + get: + tags: + - Observation + summary: Get observation summary + description: "[EXPERIMENTAL] Collect the observation summary info." + operationId: summaryObservation + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + - name: clone_id + in: path + description: Clone ID + required: true + schema: + type: string + - name: session_id + in: path + description: Session ID + required: true + schema: + type: string + responses: + 200: + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/ObservationSummaryArtifact' + 400: + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /observation/download/{artifact_type}/{clone_id}/{session_id}: + get: + tags: + - Observation + summary: Download an observation artifact + description: "[EXPERIMENTAL] Download an artifact for the specified clone and observation session." + operationId: downloadObservationArtifact + parameters: + - name: Verification-Token + in: header + required: true + schema: + type: string + - name: artifact_type + in: path + description: Type of the requested artifact + required: true + schema: + type: string + - name: clone_id + in: path + description: Clone ID + required: true + schema: + type: string + - name: session_id + in: path + description: Session ID + required: true + schema: + type: string + responses: + 200: + description: Successful operation + content: {} + 400: + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + +components: + schemas: + Instance: + type: object + properties: + status: + $ref: '#/components/schemas/Status' + engine: + $ref: '#/components/schemas/Engine' + pools: + type: array + items: + $ref: '#/components/schemas/PoolEntry' + cloning: + $ref: '#/components/schemas/Cloning' + retrieving: + $ref: '#/components/schemas/Retrieving' + provisioner: + $ref: '#/components/schemas/Provisioner' + synchronization: + $ref: '#/components/schemas/Synchronization' + Status: + required: + - code + - message + type: object + properties: + code: + type: string + description: Status code + message: + type: string + description: Status description + Engine: + type: object + properties: + version: + type: string + edition: + type: string + billingActive: + type: string + instanceID: + type: string + startedAt: + type: string + format: date-time + telemetry: + type: boolean + disableConfigModification: + type: boolean + PoolEntry: + type: object + properties: + name: + type: string + mode: + type: string + dataStateAt: + type: string + format: date-time + status: + type: string + cloneList: + type: array + items: + type: string + fileSystem: + $ref: '#/components/schemas/FileSystem' + FileSystem: + type: object + properties: + mode: + type: string + free: + type: integer + format: int64 + size: + type: integer + format: int64 + used: + type: integer + format: int64 + dataSize: + type: integer + format: int64 + usedBySnapshots: + type: integer + format: int64 + usedByClones: + type: integer + format: int64 + compressRatio: + type: integer + format: float64 + Cloning: + type: object + properties: + expectedCloningTime: + type: integer + format: float64 + numClones: + type: integer + format: int64 + clones: + type: array + items: + $ref: '#/components/schemas/Clone' + Retrieving: + type: object + properties: + mode: + type: string + status: + type: string + lastRefresh: + type: string + format: date-time + nextRefresh: + type: string + format: date-time + alerts: + type: array + items: + type: string + activity: + $ref: '#/components/schemas/Activity' + Activity: + type: object + properties: + source: + type: array + items: + $ref: '#/components/schemas/PGActivityEvent' + target: + type: array + items: + $ref: '#/components/schemas/PGActivityEvent' + PGActivityEvent: + type: object + properties: + user: + type: string + query: + type: string + duration: + type: number + waitEventType: + type: string + waitEvent: + type: string + Provisioner: + type: object + properties: + dockerImage: + type: string + containerConfig: + type: object + properties: {} + Synchronization: + type: object + properties: + status: + $ref: '#/components/schemas/Status' + startedAt: + type: string + format: date-time + lastReplayedLsn: + type: string + lastReplayedLsnAt: + type: string + format: date-time + replicationLag: + type: string + replicationUptime: + type: integer + Snapshot: + type: object + properties: + id: + type: string + createdAt: + type: string + format: date-time + dataStateAt: + type: string + format: date-time + physicalSize: + type: integer + format: int64 + logicalSize: + type: integer + format: int64 + pool: + type: string + numClones: + type: integer + format: int + Database: + type: object + properties: + connStr: + type: string + host: + type: string + port: + type: string + username: + type: string + password: + type: string + Clone: + type: object + properties: + id: + type: string + name: + type: string + snapshot: + $ref: '#/components/schemas/Snapshot' + protected: + type: boolean + default: false + deleteAt: + type: string + format: date-time + createdAt: + type: string + format: date-time + status: + $ref: '#/components/schemas/Status' + db: + $ref: '#/components/schemas/Database' + metadata: + $ref: '#/components/schemas/CloneMetadata' + CloneMetadata: + type: object + properties: + cloneDiffSize: + type: integer + format: int64 + logicalSize: + type: integer + format: int64 + cloningTime: + type: integer + format: float64 + maxIdleMinutes: + type: integer + format: int64 + CreateClone: + type: object + properties: + id: + type: string + snapshot: + type: object + properties: + id: + type: string + branch: + type: string + protected: + type: boolean + default: + db: + type: object + properties: + username: + type: string + password: + type: string + restricted: + type: boolean + default: + db_name: + type: string + ResetClone: + type: object + properties: + snapshotID: + type: string + latest: + type: boolean + default: false + description: "Define what snapshot needs to be used when resseting the clone. + 'snapshotID' allows specifying the exact snapshot, while 'latest' allows using + the latest snapshot among all available snapshots. The latter method can be + helpful when the exact snapshot ID is now known." + UpdateClone: + type: object + properties: + protected: + type: boolean + default: false + StartObservationRequest: + type: object + properties: + clone_id: + type: string + config: + $ref: '#/components/schemas/ObservationConfig' + tags: + type: object + properties: {} + db_name: + type: string + ObservationConfig: + type: object + properties: + observation_interval: + type: integer + format: int64 + max_lock_duration: + type: integer + format: int64 + max_duration: + type: integer + format: int64 + ObservationSession: + type: object + properties: + session_id: + type: integer + format: int64 + started_at: + type: string + format: date-time + finished_at: + type: string + format: date-time + config: + $ref: '#/components/schemas/ObservationConfig' + tags: + type: object + properties: {} + artifacts: + type: array + items: + type: string + result: + $ref: '#/components/schemas/ObservationResult' + ObservationResult: + type: object + properties: + status: + type: string + intervals: + type: array + items: + $ref: '#/components/schemas/ObservationInterval' + summary: + $ref: '#/components/schemas/ObservationSummary' + ObservationInterval: + type: object + properties: + started_at: + type: string + format: date-time + duration: + type: integer + format: int64 + warning: + type: string + ObservationSummary: + type: object + properties: + total_duration: + type: integer + format: float64 + total_intervals: + type: integer + format: int + warning_intervals: + type: integer + format: int + checklist: + $ref: '#/components/schemas/ObservationChecklist' + ObservationChecklist: + type: object + properties: + overall_success: + type: boolean + session_duration_acceptable: + type: boolean + no_long_dangerous_locks: + type: boolean + StopObservationRequest: + type: object + properties: + clone_id: + type: string + overall_error: + type: boolean + SummaryObservationRequest: + type: object + properties: + clone_id: + type: string + session_id: + type: string + ObservationSummaryArtifact: + type: object + properties: + session_id: + type: integer + format: int64 + clone_id: + type: string + duration: + type: object + properties: {} + db_size: + type: object + properties: {} + locks: + type: object + properties: {} + log_errors: + type: object + properties: {} + artifact_types: + type: array + items: + type: string + Error: + type: object + properties: + code: + type: string + message: + type: string + detail: + type: string + hint: + type: string + ResponseStatus: + type: object + properties: + status: + type: string + message: + type: string + Config: + type: object + Connection: + type: object + properties: + host: + type: string + port: + type: string + dbname: + type: string + username: + type: string + password: + type: string + db_list: + type: array + items: + type: string + WSToken: + type: object + properties: + token: + type: string + description: WebSocket token + Branch: + type: object + properties: + name: + type: string + parent: + type: string + dataStateAt: + type: string + format: date-time + snapshotID: + type: string + SnapshotDetails: + type: object + properties: + id: + type: string + parent: + type: string + child: + type: string + branch: + type: array + items: + type: string + root: + type: string + dataStateAt: + type: string + format: date-time + message: + type: string + FullRefresh: + type: object + properties: + status: + type: string + example: OK + message: + type: string + example: Full refresh started diff --git a/engine/api/swagger-spec/dblab_server_swagger.yaml b/engine/api/swagger-spec/dblab_server_swagger.yaml index 5242ed8a..8d44307a 100644 --- a/engine/api/swagger-spec/dblab_server_swagger.yaml +++ b/engine/api/swagger-spec/dblab_server_swagger.yaml @@ -22,7 +22,7 @@ externalDocs: servers: - url: "https://demo.dblab.dev/api" - description: "DBLab 3.x demo server; token: 'demo-token'" + description: "DBLab demo server; token: 'demo-token'" x-examples: Verification-Token: "demo-token" - url: "{scheme}://{host}:{port}/{basePath}" diff --git a/engine/api/swagger-ui/swagger-initializer.js b/engine/api/swagger-ui/swagger-initializer.js index 03966101..c5e40fbe 100644 --- a/engine/api/swagger-ui/swagger-initializer.js +++ b/engine/api/swagger-ui/swagger-initializer.js @@ -3,7 +3,7 @@ window.onload = function() { // the following lines will be replaced by docker/configurator, when it runs in a docker-container window.ui = SwaggerUIBundle({ - url: "api/swagger-spec/dblab_server_swagger.yaml", + url: "api/swagger-spec/dblab_openapi.yaml", dom_id: '#swagger-ui', deepLinking: true, presets: [ diff --git a/engine/cmd/cli/commands/branch/actions.go b/engine/cmd/cli/commands/branch/actions.go new file mode 100644 index 00000000..6aa71232 --- /dev/null +++ b/engine/cmd/cli/commands/branch/actions.go @@ -0,0 +1,342 @@ +/* +2022 © Postgres.ai +*/ + +// Package branch provides commands to manage DLE branches. +package branch + +import ( + "errors" + "fmt" + "os" + "strings" + "text/template" + "time" + + "github.com/urfave/cli/v2" + + "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands" + "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands/config" + "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" + "gitlab.com/postgres-ai/database-lab/v3/pkg/models" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util" +) + +const ( + defaultBranch = "main" + + snapshotTemplate = `{{range .}}snapshot {{.ID}} {{.Branch | formatBranch}} +DataStateAt: {{.DataStateAt | formatDSA }}{{if and (ne .Message "-") (ne .Message "")}} + {{.Message}}{{end}} + +{{end}}` +) + +// Create a new template and parse the letter into it. +var logTemplate = template.Must(template.New("branchLog").Funcs( + template.FuncMap{ + "formatDSA": func(dsa string) string { + p, err := time.Parse(util.DataStateAtFormat, dsa) + if err != nil { + return "" + } + return p.Format(time.RFC1123Z) + }, + "formatBranch": func(dsa []string) string { + if len(dsa) == 0 { + return "" + } + + return "(HEAD -> " + strings.Join(dsa, ", ") + ")" + }, + }).Parse(snapshotTemplate)) + +func switchLocalContext(branchName string) error { + dirname, err := config.GetDirname() + if err != nil { + return err + } + + filename := config.BuildFileName(dirname) + + cfg, err := config.Load(filename) + if err != nil && !os.IsNotExist(err) { + return err + } + + if len(cfg.Environments) == 0 { + return errors.New("no environments found. Use `dblab init` to create a new environment before branching") + } + + currentEnv := cfg.Environments[cfg.CurrentEnvironment] + currentEnv.Branching.CurrentBranch = branchName + + cfg.Environments[cfg.CurrentEnvironment] = currentEnv + + if err := config.SaveConfig(filename, cfg); err != nil { + return commands.ToActionError(err) + } + + return err +} + +func list(cliCtx *cli.Context) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + // Create a new branch. + if branchName := cliCtx.Args().First(); branchName != "" { + return create(cliCtx) + } + + // Delete branch. + if branchName := cliCtx.String("delete"); branchName != "" { + return deleteBranch(cliCtx) + } + + // List branches. + branches, err := dblabClient.ListBranches(cliCtx.Context) + if err != nil { + return err + } + + if len(branches) == 0 { + _, err = fmt.Fprintln(cliCtx.App.Writer, "No branches found") + return err + } + + formatted := formatBranchList(cliCtx, branches) + + _, err = fmt.Fprint(cliCtx.App.Writer, formatted) + + return err +} + +func formatBranchList(cliCtx *cli.Context, branches []string) string { + baseBranch := getBaseBranch(cliCtx) + + s := strings.Builder{} + + for _, branch := range branches { + var prefixStar = " " + + if baseBranch == branch { + prefixStar = "* " + branch = "\033[1;32m" + branch + "\033[0m" + } + + s.WriteString(prefixStar + branch + "\n") + } + + return s.String() +} + +func switchBranch(cliCtx *cli.Context) error { + branchName := cliCtx.Args().First() + + if branchName == "" { + return errors.New("branch name must not be empty") + } + + if err := isBranchExist(cliCtx, branchName); err != nil { + return fmt.Errorf("cannot confirm if branch exists: %w", err) + } + + if err := switchLocalContext(branchName); err != nil { + return commands.ToActionError(err) + } + + _, err := fmt.Fprintf(cliCtx.App.Writer, "Switched to branch '%s'\n", branchName) + + return err +} + +func isBranchExist(cliCtx *cli.Context, branchName string) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + branches, err := dblabClient.ListBranches(cliCtx.Context) + if err != nil { + return err + } + + for _, branch := range branches { + if branch == branchName { + return nil + } + } + + return fmt.Errorf("invalid reference: %s", branchName) +} + +func create(cliCtx *cli.Context) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + branchName := cliCtx.Args().First() + + baseBranch := cliCtx.String("parent-branch") + snapshotID := cliCtx.String("snapshot-id") + + if baseBranch != "" && snapshotID != "" { + return commands.NewActionError("either --parent-branch or --snapshot-id must be specified") + } + + if baseBranch == "" { + baseBranch = getBaseBranch(cliCtx) + } + + branchRequest := types.BranchCreateRequest{ + BranchName: branchName, + BaseBranch: baseBranch, + SnapshotID: snapshotID, + } + + branch, err := dblabClient.CreateBranch(cliCtx.Context, branchRequest) + if err != nil { + return err + } + + if err := switchLocalContext(branchName); err != nil { + return commands.ToActionError(err) + } + + _, err = fmt.Fprintf(cliCtx.App.Writer, "Switched to new branch '%s'\n", branch.Name) + + return err +} + +func getBaseBranch(cliCtx *cli.Context) string { + baseBranch := cliCtx.String(commands.CurrentBranch) + + if baseBranch == "" { + baseBranch = defaultBranch + } + + return baseBranch +} + +func deleteBranch(cliCtx *cli.Context) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + branchName := cliCtx.String("delete") + + branching, err := getBranchingFromEnv() + if err != nil { + return err + } + + if branching.CurrentBranch == branchName { + return fmt.Errorf("cannot delete branch %q because it is the current one", branchName) + } + + if err = dblabClient.DeleteBranch(cliCtx.Context, types.BranchDeleteRequest{ + BranchName: branchName, + }); err != nil { + return err + } + + if err := switchLocalContext(defaultBranch); err != nil { + return commands.ToActionError(err) + } + + _, err = fmt.Fprintf(cliCtx.App.Writer, "Deleted branch '%s'\n", branchName) + + return err +} + +func commit(cliCtx *cli.Context) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + cloneID := cliCtx.String("clone-id") + message := cliCtx.String("message") + + snapshotRequest := types.SnapshotCloneCreateRequest{ + CloneID: cloneID, + Message: message, + } + + snapshot, err := dblabClient.CreateSnapshotForBranch(cliCtx.Context, snapshotRequest) + if err != nil { + return err + } + + _, err = fmt.Fprintf(cliCtx.App.Writer, "Created new snapshot '%s'\n", snapshot.SnapshotID) + + return err +} + +func history(cliCtx *cli.Context) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + branchName := cliCtx.Args().First() + + if branchName == "" { + branchName = getBaseBranch(cliCtx) + } + + logRequest := types.LogRequest{BranchName: branchName} + + snapshots, err := dblabClient.BranchLog(cliCtx.Context, logRequest) + if err != nil { + return err + } + + formattedLog, err := formatSnapshotLog(snapshots) + if err != nil { + return err + } + + _, err = fmt.Fprint(cliCtx.App.Writer, formattedLog) + + return err +} + +func getBranchingFromEnv() (config.Branching, error) { + branching := config.Branching{} + + dirname, err := config.GetDirname() + if err != nil { + return branching, err + } + + filename := config.BuildFileName(dirname) + + cfg, err := config.Load(filename) + if err != nil && !os.IsNotExist(err) { + return branching, err + } + + if len(cfg.Environments) == 0 { + return branching, errors.New("no environments found. Use `dblab init` to create a new environment before branching") + } + + branching = cfg.Environments[cfg.CurrentEnvironment].Branching + + return branching, nil +} + +func formatSnapshotLog(snapshots []models.SnapshotDetails) (string, error) { + sb := &strings.Builder{} + + if err := logTemplate.Execute(sb, snapshots); err != nil { + return "", fmt.Errorf("executing template: %w", err) + } + + return sb.String(), nil +} diff --git a/engine/cmd/cli/commands/branch/command_list.go b/engine/cmd/cli/commands/branch/command_list.go new file mode 100644 index 00000000..90087824 --- /dev/null +++ b/engine/cmd/cli/commands/branch/command_list.go @@ -0,0 +1,62 @@ +/* +2020 © Postgres.ai +*/ + +package branch + +import ( + "github.com/urfave/cli/v2" +) + +// List provides commands for getting started. +func List() []*cli.Command { + return []*cli.Command{ + { + Name: "branch", + Usage: "list, create, or delete branches", + Action: list, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "delete", + Aliases: []string{"d"}, + }, + &cli.StringFlag{ + Name: "parent-branch", + Usage: "specify branch name as starting point for new branch; cannot be used together with --snapshot-id", + }, + &cli.StringFlag{ + Name: "snapshot-id", + Usage: "specify snapshot ID is starting point for new branch; cannot be used together with --parent-branch", + }, + }, + ArgsUsage: "BRANCH_NAME", + }, + { + Name: "switch", + Usage: "switch to a specified branch", + Action: switchBranch, + }, + { + Name: "commit", + Usage: "create a new snapshot containing the current state of data and the given log message describing the changes", + Action: commit, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "clone-id", + Usage: "clone ID", + }, + &cli.StringFlag{ + Name: "message", + Usage: "use the given message as the commit message", + Aliases: []string{"m"}, + }, + }, + }, + { + Name: "log", + Usage: "shows the snapshot logs", + Action: history, + ArgsUsage: "BRANCH_NAME", + }, + } +} diff --git a/engine/cmd/cli/commands/client.go b/engine/cmd/cli/commands/client.go index cde42073..d4e45f2d 100644 --- a/engine/cmd/cli/commands/client.go +++ b/engine/cmd/cli/commands/client.go @@ -24,6 +24,7 @@ const ( FwLocalPortKey = "forwarding-local-port" IdentityFileKey = "identity-file" TZKey = "tz" + CurrentBranch = "current-branch" ) // ClientByCLIContext creates a new Database Lab API client. diff --git a/engine/cmd/cli/commands/clone/actions.go b/engine/cmd/cli/commands/clone/actions.go index 6946470d..3eca7e3f 100644 --- a/engine/cmd/cli/commands/clone/actions.go +++ b/engine/cmd/cli/commands/clone/actions.go @@ -105,6 +105,7 @@ func create(cliCtx *cli.Context) error { Restricted: cliCtx.Bool("restricted"), DBName: cliCtx.String("db-name"), }, + Branch: cliCtx.String("branch"), } if cliCtx.IsSet("snapshot-id") { @@ -125,6 +126,11 @@ func create(cliCtx *cli.Context) error { return err } + if clone.Branch != "" { + _, err = fmt.Fprintln(cliCtx.App.Writer, buildCloneOutput(clone)) + return err + } + viewClone, err := convertCloneView(clone) if err != nil { return err @@ -140,6 +146,37 @@ func create(cliCtx *cli.Context) error { return err } +func buildCloneOutput(clone *models.Clone) string { + const ( + outputAlign = 2 + id = "ID" + branch = "Branch" + snapshot = "Snapshot" + connectionString = "Connection string" + maxNameLen = len(connectionString) + ) + + s := strings.Builder{} + + s.WriteString(id + ":" + strings.Repeat(" ", maxNameLen-len(id)+outputAlign)) + s.WriteString(clone.ID) + s.WriteString("\n") + + s.WriteString(branch + ":" + strings.Repeat(" ", maxNameLen-len(branch)+outputAlign)) + s.WriteString(clone.Branch) + s.WriteString("\n") + + s.WriteString(snapshot + ":" + strings.Repeat(" ", maxNameLen-len(snapshot)+outputAlign)) + s.WriteString(clone.Snapshot.ID) + s.WriteString("\n") + + s.WriteString(connectionString + ":" + strings.Repeat(" ", maxNameLen-len(connectionString)+outputAlign)) + s.WriteString(clone.DB.ConnStr) + s.WriteString("\n") + + return s.String() +} + // update runs a request to update an existing clone. func update(cliCtx *cli.Context) error { dblabClient, err := commands.ClientByCLIContext(cliCtx) diff --git a/engine/cmd/cli/commands/clone/command_list.go b/engine/cmd/cli/commands/clone/command_list.go index 44dc35fd..15cd8953 100644 --- a/engine/cmd/cli/commands/clone/command_list.go +++ b/engine/cmd/cli/commands/clone/command_list.go @@ -19,7 +19,7 @@ const ( func CommandList() []*cli.Command { return []*cli.Command{{ Name: "clone", - Usage: "manages clones", + Usage: "create, update, delete, reset, or retrieve clone", Subcommands: []*cli.Command{ { Name: "list", @@ -64,6 +64,10 @@ func CommandList() []*cli.Command { Name: "snapshot-id", Usage: "snapshot ID (optional)", }, + &cli.StringFlag{ + Name: "branch", + Usage: "branch name (optional)", + }, &cli.BoolFlag{ Name: "protected", Usage: "mark instance as protected from deletion", diff --git a/engine/cmd/cli/commands/config/environment.go b/engine/cmd/cli/commands/config/environment.go index 4e6146e6..0130a604 100644 --- a/engine/cmd/cli/commands/config/environment.go +++ b/engine/cmd/cli/commands/config/environment.go @@ -11,6 +11,9 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands" ) +// DefaultBranch defines the name of data branch. +const DefaultBranch = "main" + // CLIConfig defines a format of CLI configuration. type CLIConfig struct { CurrentEnvironment string `yaml:"current_environment" json:"current_environment"` @@ -26,6 +29,7 @@ type Environment struct { Insecure bool `yaml:"insecure" json:"insecure"` RequestTimeout Duration `yaml:"request_timeout,omitempty" json:"request_timeout,omitempty"` Forwarding Forwarding `yaml:"forwarding" json:"forwarding"` + Branching Branching `yaml:"branching" json:"branching"` } // Forwarding defines configuration for port forwarding. @@ -40,6 +44,11 @@ type Settings struct { TZ string `yaml:"tz" json:"tz"` } +// Branching defines branching context. +type Branching struct { + CurrentBranch string `yaml:"current_branch" json:"current_branch"` +} + // AddEnvironmentToConfig adds a new environment to CLIConfig. func AddEnvironmentToConfig(c *cli.Context, cfg *CLIConfig, environmentID string) error { if environmentID == "" { @@ -60,6 +69,13 @@ func AddEnvironmentToConfig(c *cli.Context, cfg *CLIConfig, environmentID string LocalPort: c.String(commands.FwLocalPortKey), IdentityFile: c.String(commands.IdentityFileKey), }, + Branching: Branching{ + CurrentBranch: c.String(commands.CurrentBranch), + }, + } + + if env.Branching.CurrentBranch == "" { + env.Branching.CurrentBranch = DefaultBranch } if cfg.Environments == nil { @@ -117,6 +133,10 @@ func updateEnvironmentInConfig(c *cli.Context, cfg *CLIConfig, environmentID str newEnvironment.Forwarding.IdentityFile = c.String(commands.IdentityFileKey) } + if c.IsSet(commands.CurrentBranch) { + newEnvironment.Branching.CurrentBranch = c.String(commands.CurrentBranch) + } + if newEnvironment == environment { return errors.New("config unchanged. Set different option values to update.") // nolint } diff --git a/engine/cmd/cli/commands/config/file.go b/engine/cmd/cli/commands/config/file.go index 0b04e0cc..67ffbc53 100644 --- a/engine/cmd/cli/commands/config/file.go +++ b/engine/cmd/cli/commands/config/file.go @@ -8,6 +8,7 @@ import ( "os" "os/user" "path" + "path/filepath" "gopkg.in/yaml.v2" ) @@ -16,6 +17,12 @@ const ( dblabDir = ".dblab" configPath = "cli" configFilename = "cli.yml" + envs = "envs" +) + +const ( + branches = "branches" + snapshots = "snapshots" ) // GetDirname returns the CLI config path located in the current user's home directory. @@ -40,19 +47,35 @@ func GetFilename() (string, error) { return BuildFileName(dirname), nil } +// BuildBranchPath builds a path to the branch dir. +func BuildBranchPath(dirname string) string { + return filepath.Join(dirname, envs, branches) +} + +// BuildSnapshotPath builds a path to the snapshot dir. +func BuildSnapshotPath(dirname string) string { + return filepath.Join(dirname, envs, snapshots) +} + // BuildFileName builds a config filename. func BuildFileName(dirname string) string { return path.Join(dirname, configFilename) } +// BuildEnvsDirName builds envs directory name. +func BuildEnvsDirName(dirname string) string { + return path.Join(dirname, envs) +} + // Load loads a CLI config by a provided filename. func Load(filename string) (*CLIConfig, error) { + cfg := &CLIConfig{} + configData, err := os.ReadFile(filename) if err != nil { - return nil, err + return cfg, err } - cfg := &CLIConfig{} if err := yaml.Unmarshal(configData, cfg); err != nil { return nil, err } diff --git a/engine/cmd/cli/commands/global/actions.go b/engine/cmd/cli/commands/global/actions.go index 35fe83a5..1de794fa 100644 --- a/engine/cmd/cli/commands/global/actions.go +++ b/engine/cmd/cli/commands/global/actions.go @@ -10,7 +10,6 @@ import ( "net/url" "os" - "github.com/pkg/errors" "github.com/urfave/cli/v2" "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands" @@ -25,7 +24,7 @@ func initCLI(c *cli.Context) error { } if err := os.MkdirAll(dirname, 0755); err != nil { - return errors.Wrapf(err, "Cannot create config directory %s", dirname) + return fmt.Errorf("cannot create config directory %s: %w", dirname, err) } filename := config.BuildFileName(dirname) diff --git a/engine/cmd/cli/commands/global/command_list.go b/engine/cmd/cli/commands/global/command_list.go index f36fafa7..c665684e 100644 --- a/engine/cmd/cli/commands/global/command_list.go +++ b/engine/cmd/cli/commands/global/command_list.go @@ -58,7 +58,7 @@ func List() []*cli.Command { }, { Name: "port-forward", - Usage: "start port forwarding to the Database Lab instance", + Usage: "start port forwarding to the DBLab instance", Before: commands.CheckForwardingServerURL, Action: forward, }, diff --git a/engine/cmd/cli/commands/instance/actions.go b/engine/cmd/cli/commands/instance/actions.go index ab0689d0..c4bafb65 100644 --- a/engine/cmd/cli/commands/instance/actions.go +++ b/engine/cmd/cli/commands/instance/actions.go @@ -66,3 +66,20 @@ func health(cliCtx *cli.Context) error { return err } + +// refresh runs a request to initiate a full refresh. +func refresh(cliCtx *cli.Context) error { + client, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + response, err := client.FullRefresh(cliCtx.Context) + if err != nil { + return err + } + + _, err = fmt.Fprintln(cliCtx.App.Writer, response.Message) + + return err +} diff --git a/engine/cmd/cli/commands/instance/command_list.go b/engine/cmd/cli/commands/instance/command_list.go index 164a46c4..07d9ec8e 100644 --- a/engine/cmd/cli/commands/instance/command_list.go +++ b/engine/cmd/cli/commands/instance/command_list.go @@ -13,7 +13,7 @@ func CommandList() []*cli.Command { return []*cli.Command{ { Name: "instance", - Usage: "displays instance info", + Usage: "display instance info", Subcommands: []*cli.Command{ { Name: "status", @@ -25,6 +25,11 @@ func CommandList() []*cli.Command { Usage: "display instance's version", Action: health, }, + { + Name: "full-refresh", + Usage: "initiate full refresh", + Action: refresh, + }, }, }, } diff --git a/engine/cmd/cli/commands/snapshot/actions.go b/engine/cmd/cli/commands/snapshot/actions.go index 0ac175a5..1f4c7dd0 100644 --- a/engine/cmd/cli/commands/snapshot/actions.go +++ b/engine/cmd/cli/commands/snapshot/actions.go @@ -7,11 +7,14 @@ package snapshot import ( "encoding/json" + "errors" "fmt" "github.com/urfave/cli/v2" "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands" + "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi" + "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" ) @@ -44,3 +47,83 @@ func list(cliCtx *cli.Context) error { return err } + +// create runs a request to create a new snapshot. +func create(cliCtx *cli.Context) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + cloneID := cliCtx.String("clone-id") + + var commandResponse []byte + + if cloneID != "" { + commandResponse, err = createFromClone(cliCtx, dblabClient) + } else { + commandResponse, err = createOnPool(cliCtx, dblabClient) + } + + if err != nil { + return err + } + + _, err = fmt.Fprintln(cliCtx.App.Writer, string(commandResponse)) + + return err +} + +// createOnPool runs a request to create a new snapshot. +func createOnPool(cliCtx *cli.Context, client *dblabapi.Client) ([]byte, error) { + snapshotRequest := types.SnapshotCreateRequest{ + PoolName: cliCtx.String("pool"), + } + + snapshot, err := client.CreateSnapshot(cliCtx.Context, snapshotRequest) + if err != nil { + return nil, err + } + + return json.MarshalIndent(snapshot, "", " ") +} + +// createFromClone runs a request to create a new snapshot from clone. +func createFromClone(cliCtx *cli.Context, client *dblabapi.Client) ([]byte, error) { + cloneID := cliCtx.String("clone-id") + message := cliCtx.String("message") + + snapshotRequest := types.SnapshotCloneCreateRequest{ + CloneID: cloneID, + Message: message, + } + + snapshot, err := client.CreateSnapshotFromClone(cliCtx.Context, snapshotRequest) + if err != nil { + return nil, err + } + + return json.MarshalIndent(snapshot, "", " ") +} + +// deleteSnapshot runs a request to delete existing snapshot. +func deleteSnapshot(cliCtx *cli.Context) error { + dblabClient, err := commands.ClientByCLIContext(cliCtx) + if err != nil { + return err + } + + snapshotID := cliCtx.Args().First() + + snapshotRequest := types.SnapshotDestroyRequest{ + SnapshotID: snapshotID, + } + + if err := dblabClient.DeleteSnapshot(cliCtx.Context, snapshotRequest); err != nil { + return errors.Unwrap(err) + } + + _, err = fmt.Fprintf(cliCtx.App.Writer, "Deleted snapshot '%s'\n", snapshotID) + + return err +} diff --git a/engine/cmd/cli/commands/snapshot/command_list.go b/engine/cmd/cli/commands/snapshot/command_list.go index 3fd6e3cb..bda2b865 100644 --- a/engine/cmd/cli/commands/snapshot/command_list.go +++ b/engine/cmd/cli/commands/snapshot/command_list.go @@ -6,6 +6,8 @@ package snapshot import ( "github.com/urfave/cli/v2" + + "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands" ) // CommandList returns available commands for a snapshot management. @@ -13,14 +15,48 @@ func CommandList() []*cli.Command { return []*cli.Command{ { Name: "snapshot", - Usage: "manage snapshots", + Usage: "create, retrieve, or delete snapshot", Subcommands: []*cli.Command{ { Name: "list", Usage: "list all existing snapshots", Action: list, }, + { + Name: "create", + Usage: "create a snapshot", + Action: create, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "pool", + Usage: "pool name", + }, + &cli.StringFlag{ + Name: "clone-id", + Usage: "create a snapshot from existing clone", + }, + &cli.StringFlag{ + Name: "message", + Usage: "optional message for new snapshot created from existing clone", + }, + }, + }, + { + Name: "delete", + Usage: "delete existing snapshot", + Action: deleteSnapshot, + ArgsUsage: "SNAPSHOT_ID", + Before: checkSnapshotIDBefore, + }, }, }, } } + +func checkSnapshotIDBefore(c *cli.Context) error { + if c.NArg() == 0 { + return commands.NewActionError("SNAPSHOT_ID argument is required") + } + + return nil +} diff --git a/engine/cmd/cli/main.go b/engine/cmd/cli/main.go index 5bd6c4ed..41ca8789 100644 --- a/engine/cmd/cli/main.go +++ b/engine/cmd/cli/main.go @@ -10,6 +10,7 @@ import ( "github.com/urfave/cli/v2" "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands" + "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands/branch" "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands/clone" "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands/config" "gitlab.com/postgres-ai/database-lab/v3/cmd/cli/commands/global" @@ -31,6 +32,9 @@ func main() { // Config commands. global.List(), + // Branching. + branch.List(), + // Database Lab API. clone.CommandList(), instance.CommandList(), @@ -81,6 +85,11 @@ func main() { Usage: "run in debug mode", EnvVars: []string{"DBLAB_CLI_DEBUG"}, }, + &cli.StringFlag{ + Name: "current-branch", + Usage: "current branch", + EnvVars: []string{"DBLAB_CLI_CURRENT_BRANCH"}, + }, }, EnableBashCompletion: true, } @@ -158,6 +167,16 @@ func loadEnvironmentParams(c *cli.Context) error { return err } } + + currentBranch := config.DefaultBranch + + if env.Branching.CurrentBranch != "" { + currentBranch = env.Branching.CurrentBranch + } + + if err := c.Set(commands.CurrentBranch, currentBranch); err != nil { + return err + } } return nil diff --git a/engine/cmd/database-lab/main.go b/engine/cmd/database-lab/main.go index bd90ef52..edce91b7 100644 --- a/engine/cmd/database-lab/main.go +++ b/engine/cmd/database-lab/main.go @@ -37,6 +37,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/srv" "gitlab.com/postgres-ai/database-lab/v3/internal/srv/ws" "gitlab.com/postgres-ai/database-lab/v3/internal/telemetry" + "gitlab.com/postgres-ai/database-lab/v3/internal/webhooks" "gitlab.com/postgres-ai/database-lab/v3/pkg/config" "gitlab.com/postgres-ai/database-lab/v3/pkg/config/global" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" @@ -57,7 +58,7 @@ func main() { } logFilter := log.GetFilter() - logFilter.ReloadLogRegExp([]string{cfg.Server.VerificationToken, cfg.Platform.AccessToken, cfg.Platform.OrgKey}) + logFilter.ReloadLogRegExp(maskedSecrets(cfg)) config.ApplyGlobals(cfg) @@ -112,6 +113,11 @@ func main() { tm := telemetry.New(platformSvc, engProps.InstanceID) + webhookChan := make(chan webhooks.EventTyper, 1) + whs := webhooks.NewService(&cfg.Webhooks, webhookChan) + + go whs.Run(ctx) + pm := pool.NewPoolManager(&cfg.PoolManager, runner) if err = pm.ReloadPools(); err != nil { log.Err(err.Error()) @@ -147,7 +153,7 @@ func main() { shutdownDatabaseLabEngine(context.Background(), docker, &cfg.Global.Database, engProps.InstanceID, pm.First()) } - cloningSvc := cloning.NewBase(&cfg.Cloning, provisioner, tm, observingChan) + cloningSvc := cloning.NewBase(&cfg.Cloning, &cfg.Global, provisioner, tm, observingChan, webhookChan) if err = cloningSvc.Run(ctx); err != nil { log.Err(err) emergencyShutdown() @@ -178,11 +184,12 @@ func main() { server, logCleaner, logFilter, + whs, ) } server := srv.NewServer(&cfg.Server, &cfg.Global, &engProps, docker, cloningSvc, provisioner, retrievalSvc, platformSvc, - billingSvc, obs, pm, tm, tokenHolder, logFilter, embeddedUI, reloadConfigFn) + billingSvc, obs, pm, tm, tokenHolder, logFilter, embeddedUI, reloadConfigFn, webhookChan) server.InitHandlers() @@ -195,7 +202,7 @@ func main() { if cfg.EmbeddedUI.Enabled { go func() { if err := embeddedUI.Run(ctx); err != nil { - log.Err("Failed to start embedded UI container:", err.Error()) + log.Err("failed to start embedded UI container:", err.Error()) return } }() @@ -230,19 +237,19 @@ func main() { go setReloadListener(ctx, engProps, provisioner, billingSvc, retrievalSvc, pm, cloningSvc, platformSvc, embeddedUI, server, - logCleaner, logFilter) + logCleaner, logFilter, whs) go billingSvc.CollectUsage(ctx, systemMetrics) if err := retrievalSvc.Run(ctx); err != nil { - log.Err("Failed to run the data retrieval service:", err) + log.Err("failed to run data retrieval service:", err) log.Msg(contactSupport) } defer retrievalSvc.Stop() if err := logCleaner.ScheduleLogCleanupJob(cfg.Diagnostic); err != nil { - log.Err("Failed to schedule a cleanup job of the diagnostic logs collector", err) + log.Err("failed to schedule cleanup job of diagnostic logs collector", err) } <-shutdownCh @@ -312,13 +319,14 @@ func getEngineProperties(ctx context.Context, docker *client.Client, cfg *config func reloadConfig(ctx context.Context, engProp global.EngineProps, provisionSvc *provision.Provisioner, billingSvc *billing.Billing, retrievalSvc *retrieval.Retrieval, pm *pool.Manager, cloningSvc *cloning.Base, platformSvc *platform.Service, - embeddedUI *embeddedui.UIManager, server *srv.Server, cleaner *diagnostic.Cleaner, filtering *log.Filtering) error { + embeddedUI *embeddedui.UIManager, server *srv.Server, cleaner *diagnostic.Cleaner, filtering *log.Filtering, + whs *webhooks.Service) error { cfg, err := config.LoadConfiguration() if err != nil { return err } - filtering.ReloadLogRegExp([]string{cfg.Server.VerificationToken, cfg.Platform.AccessToken, cfg.Platform.OrgKey}) + filtering.ReloadLogRegExp(maskedSecrets(cfg)) config.ApplyGlobals(cfg) if err := provision.IsValidConfig(cfg.Provision); err != nil { @@ -354,17 +362,19 @@ func reloadConfig(ctx context.Context, engProp global.EngineProps, provisionSvc provisionSvc.Reload(cfg.Provision, dbCfg) retrievalSvc.Reload(ctx, newRetrievalConfig) - cloningSvc.Reload(cfg.Cloning) + cloningSvc.Reload(cfg.Cloning, cfg.Global) platformSvc.Reload(newPlatformSvc) billingSvc.Reload(newPlatformSvc.Client) server.Reload(cfg.Server) + whs.Reload(&cfg.Webhooks) return nil } func setReloadListener(ctx context.Context, engProp global.EngineProps, provisionSvc *provision.Provisioner, billingSvc *billing.Billing, retrievalSvc *retrieval.Retrieval, pm *pool.Manager, cloningSvc *cloning.Base, platformSvc *platform.Service, - embeddedUI *embeddedui.UIManager, server *srv.Server, cleaner *diagnostic.Cleaner, logFilter *log.Filtering) { + embeddedUI *embeddedui.UIManager, server *srv.Server, cleaner *diagnostic.Cleaner, logFilter *log.Filtering, + whs *webhooks.Service) { reloadCh := make(chan os.Signal, 1) signal.Notify(reloadCh, syscall.SIGHUP) @@ -376,8 +386,8 @@ func setReloadListener(ctx context.Context, engProp global.EngineProps, provisio pm, cloningSvc, platformSvc, embeddedUI, server, - cleaner, logFilter); err != nil { - log.Err("Failed to reload configuration:", err) + cleaner, logFilter, whs); err != nil { + log.Err("failed to reload configuration:", err) continue } @@ -397,11 +407,11 @@ func shutdownDatabaseLabEngine(ctx context.Context, docker *client.Client, dbCfg log.Msg("Stopping auxiliary containers") if err := cont.StopControlContainers(ctx, docker, dbCfg, instanceID, fsm); err != nil { - log.Err("Failed to stop control containers", err) + log.Err("failed to stop control containers", err) } if err := cont.CleanUpSatelliteContainers(ctx, docker, instanceID); err != nil { - log.Err("Failed to stop satellite containers", err) + log.Err("failed to stop satellite containers", err) } log.Msg("Auxiliary containers have been stopped") @@ -412,3 +422,19 @@ func removeObservingClones(obsCh chan string, obs *observer.Observer) { obs.RemoveObservingClone(cloneID) } } + +func maskedSecrets(cfg *config.Config) []string { + maskedSecrets := []string{ + cfg.Server.VerificationToken, + cfg.Platform.AccessToken, + cfg.Platform.OrgKey, + } + + for _, webhookCfg := range cfg.Webhooks.Hooks { + if webhookCfg.Secret != "" { + maskedSecrets = append(maskedSecrets, webhookCfg.Secret) + } + } + + return maskedSecrets +} diff --git a/engine/cmd/runci/main.go b/engine/cmd/runci/main.go index 60af0beb..47905644 100644 --- a/engine/cmd/runci/main.go +++ b/engine/cmd/runci/main.go @@ -32,7 +32,7 @@ func main() { cfg, err := runci.LoadConfiguration() if err != nil { - log.Errf("Failed to load config: %v", err) + log.Errf("failed to load config: %v", err) return } @@ -40,7 +40,7 @@ func main() { log.Dbg("Config loaded: ", cfg) if cfg.App.VerificationToken == "" { - log.Err("DB Migration Checker is insecure since the Verification Token is empty") + log.Err("migration checker is insecure since verification token is empty") return } diff --git a/engine/configs/config.example.logical_generic.yml b/engine/configs/config.example.logical_generic.yml index e08d1c28..63cca1e5 100644 --- a/engine/configs/config.example.logical_generic.yml +++ b/engine/configs/config.example.logical_generic.yml @@ -186,9 +186,6 @@ retrieval: # Timetable is to be defined in crontab format: https://en.wikipedia.org/wiki/Cron#Overview timetable: "0 0 * * 1" - # Skip data refresh while the retrieval starts. - skipStartRefresh: false - # The jobs section must not contain physical and logical restore jobs simultaneously. jobs: - logicalDump @@ -246,9 +243,6 @@ retrieval: # If your source database has 4 vCPUs or less, and you don't want to saturate them, use 2 or 1. parallelJobs: 4 - # Ignore errors that occurred during logical data dump. Do not ignore by default. - ignoreErrors: false - # Options for direct restore to Database Lab Engine instance. # Uncomment this if you prefer restoring from the dump on the fly. In this case, # you do not need to use "logicalRestore" job. Keep in mind that unlike "logicalRestore", @@ -257,6 +251,9 @@ retrieval: # immediateRestore: # # Enable immediate restore. # enabled: true + # # Restore data even if the Postgres directory (`global.dataDir`) is not empty. + # # Note the existing data will be overwritten. + # forceInit: false # # Option to adjust PostgreSQL configuration for a logical dump job. # # It's useful if a dumped database contains non-standard extensions. # <<: *db_configs @@ -284,8 +281,10 @@ retrieval: # If your machine with DLE has 4 vCPUs or less, and you don't want to saturate them, use 2 or 1. parallelJobs: 4 - # Ignore errors that occurred during logical data restore. Do not ignore by default. - ignoreErrors: false + + # Restore data even if the Postgres directory (`global.dataDir`) is not empty. + # Note the existing data will be overwritten. + forceInit: false # Option to adjust PostgreSQL configuration for a logical restore job # It's useful if a restored database contains non-standard extensions. @@ -410,3 +409,11 @@ platform: # "select \\d+": "***" # "[a-z0-9._%+\\-]+(@[a-z0-9.\\-]+\\.[a-z]{2,4})": "***$1" # +# Webhooks configuration. +#webhooks: +# hooks: +# - url: "" +# secret: "" # (optional) Sent with the request in the `DBLab-Webhook-Token` HTTP header. +# trigger: +# - clone_create +# - clone_reset \ No newline at end of file diff --git a/engine/configs/config.example.logical_rds_iam.yml b/engine/configs/config.example.logical_rds_iam.yml index 507779e4..7cb4e446 100644 --- a/engine/configs/config.example.logical_rds_iam.yml +++ b/engine/configs/config.example.logical_rds_iam.yml @@ -185,9 +185,6 @@ retrieval: # Timetable is to be defined in crontab format: https://en.wikipedia.org/wiki/Cron#Overview timetable: "0 0 * * 1" - # Skip data refresh while the retrieval starts. - skipStartRefresh: false - # The jobs section must not contain physical and logical restore jobs simultaneously. jobs: - logicalDump @@ -242,9 +239,6 @@ retrieval: # If your source database has 4 vCPUs or less, and you don't want to saturate them, use 2 or 1. parallelJobs: 4 - # Ignore errors that occurred during logical data dump. Do not ignore by default. - ignoreErrors: false - # Options for direct restore to Database Lab Engine instance. # Uncomment this if you prefer restoring from the dump on the fly. In this case, # you do not need to use "logicalRestore" job. Keep in mind that unlike "logicalRestore", @@ -253,6 +247,9 @@ retrieval: # immediateRestore: # # Enable immediate restore. # enabled: true + # # Restore data even if the Postgres directory (`global.dataDir`) is not empty. + # # Note the existing data will be overwritten. + # forceInit: false # # Option to adjust PostgreSQL configuration for a logical dump job. # # It's useful if a dumped database contains non-standard extensions. # <<: *db_configs @@ -279,8 +276,9 @@ retrieval: # If your machine with DLE has 4 vCPUs or less, and you don't want to saturate them, use 2 or 1. parallelJobs: 4 - # Ignore errors that occurred during logical data restore. Do not ignore by default. - ignoreErrors: false + # Restore data even if the Postgres directory (`global.dataDir`) is not empty. + # Note the existing data will be overwritten. + forceInit: false # Option to adjust PostgreSQL configuration for a logical restore job # It's useful if a restored database contains non-standard extensions. @@ -410,3 +408,11 @@ platform: # "select \\d+": "***" # "[a-z0-9._%+\\-]+(@[a-z0-9.\\-]+\\.[a-z]{2,4})": "***$1" # +# Webhooks configuration. +#webhooks: +# hooks: +# - url: "" +# secret: "" # (optional) Sent with the request in the `DBLab-Webhook-Token` HTTP header. +# trigger: +# - clone_create +# - clone_reset \ No newline at end of file diff --git a/engine/configs/config.example.physical_generic.yml b/engine/configs/config.example.physical_generic.yml index ebf29392..bd0399f0 100644 --- a/engine/configs/config.example.physical_generic.yml +++ b/engine/configs/config.example.physical_generic.yml @@ -357,3 +357,11 @@ platform: # "select \\d+": "***" # "[a-z0-9._%+\\-]+(@[a-z0-9.\\-]+\\.[a-z]{2,4})": "***$1" # +# Webhooks configuration. +#webhooks: +# hooks: +# - url: "" +# secret: "" # (optional) Sent with the request in the `DBLab-Webhook-Token` HTTP header. +# trigger: +# - clone_create +# - clone_reset \ No newline at end of file diff --git a/engine/configs/config.example.physical_pgbackrest.yml b/engine/configs/config.example.physical_pgbackrest.yml index 447272e9..6f07a141 100644 --- a/engine/configs/config.example.physical_pgbackrest.yml +++ b/engine/configs/config.example.physical_pgbackrest.yml @@ -375,3 +375,11 @@ platform: # "regexp": "replace" # "select \\d+": "***" # "[a-z0-9._%+\\-]+(@[a-z0-9.\\-]+\\.[a-z]{2,4})": "***$1" +# Webhooks configuration. +#webhooks: +# hooks: +# - url: "" +# secret: "" # (optional) Sent with the request in the `DBLab-Webhook-Token` HTTP header. +# trigger: +# - clone_create +# - clone_reset \ No newline at end of file diff --git a/engine/configs/config.example.physical_walg.yml b/engine/configs/config.example.physical_walg.yml index 17a4167c..c9683f07 100644 --- a/engine/configs/config.example.physical_walg.yml +++ b/engine/configs/config.example.physical_walg.yml @@ -348,3 +348,11 @@ platform: # "regexp": "replace" # "select \\d+": "***" # "[a-z0-9._%+\\-]+(@[a-z0-9.\\-]+\\.[a-z]{2,4})": "***$1" +# Webhooks configuration. +#webhooks: +# hooks: +# - url: "" +# secret: "" # (optional) Sent with the request in the `DBLab-Webhook-Token` HTTP header. +# trigger: +# - clone_create +# - clone_reset \ No newline at end of file diff --git a/engine/internal/cloning/base.go b/engine/internal/cloning/base.go index 0a69a9aa..e5edb759 100644 --- a/engine/internal/cloning/base.go +++ b/engine/internal/cloning/base.go @@ -7,6 +7,7 @@ package cloning import ( "context" "database/sql" + stderrors "errors" "fmt" "sort" "strconv" @@ -23,7 +24,9 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/provision" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/resources" "gitlab.com/postgres-ai/database-lab/v3/internal/telemetry" + "gitlab.com/postgres-ai/database-lab/v3/internal/webhooks" "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" + "gitlab.com/postgres-ai/database-lab/v3/pkg/config/global" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" "gitlab.com/postgres-ai/database-lab/v3/pkg/util" @@ -32,8 +35,6 @@ import ( const ( idleCheckDuration = 5 * time.Minute - - defaultDatabaseName = "postgres" ) // Config contains a cloning configuration. @@ -45,22 +46,27 @@ type Config struct { // Base provides cloning service. type Base struct { config *Config + global *global.Config cloneMutex sync.RWMutex clones map[string]*CloneWrapper snapshotBox SnapshotBox provision *provision.Provisioner tm *telemetry.Agent observingCh chan string + webhookCh chan webhooks.EventTyper } // NewBase instances a new Base service. -func NewBase(cfg *Config, provision *provision.Provisioner, tm *telemetry.Agent, observingCh chan string) *Base { +func NewBase(cfg *Config, global *global.Config, provision *provision.Provisioner, tm *telemetry.Agent, + observingCh chan string, whCh chan webhooks.EventTyper) *Base { return &Base{ config: cfg, + global: global, clones: make(map[string]*CloneWrapper), provision: provision, tm: tm, observingCh: observingCh, + webhookCh: whCh, snapshotBox: SnapshotBox{ items: make(map[string]*models.Snapshot), }, @@ -68,8 +74,9 @@ func NewBase(cfg *Config, provision *provision.Provisioner, tm *telemetry.Agent, } // Reload reloads base cloning configuration. -func (c *Base) Reload(cfg Config) { +func (c *Base) Reload(cfg Config, global global.Config) { *c.config = cfg + *c.global = global } // Run initializes and runs cloning component. @@ -79,11 +86,11 @@ func (c *Base) Run(ctx context.Context) error { } if _, err := c.GetSnapshots(); err != nil { - log.Err("No available snapshots: ", err) + log.Err("no available snapshots:", err) } if err := c.RestoreClonesState(); err != nil { - log.Err("Failed to load stored sessions:", err) + log.Err("failed to load stored sessions:", err) } c.restartCloneContainers(ctx) @@ -109,7 +116,7 @@ func (c *Base) cleanupInvalidClones() error { c.cloneMutex.Lock() for _, clone := range c.clones { - keepClones[util.GetCloneName(clone.Session.Port)] = struct{}{} + keepClones[clone.Clone.ID] = struct{}{} } c.cloneMutex.Unlock() @@ -123,6 +130,16 @@ func (c *Base) cleanupInvalidClones() error { return nil } +// GetLatestSnapshot returns the latest snapshot. +func (c *Base) GetLatestSnapshot() (*models.Snapshot, error) { + snapshot, err := c.getLatestSnapshot() + if err != nil { + return nil, fmt.Errorf("failed to find the latest snapshot: %w", err) + } + + return snapshot, err +} + // CreateClone creates a new clone. func (c *Base) CreateClone(cloneRequest *types.CloneCreateRequest) (*models.Clone, error) { cloneRequest.ID = strings.TrimSpace(cloneRequest.ID) @@ -154,9 +171,14 @@ func (c *Base) CreateClone(cloneRequest *types.CloneCreateRequest) (*models.Clon } } + if cloneRequest.Branch == "" { + cloneRequest.Branch = snapshot.Branch + } + clone := &models.Clone{ ID: cloneRequest.ID, Snapshot: snapshot, + Branch: cloneRequest.Branch, Protected: cloneRequest.Protected, CreatedAt: models.NewLocalTime(createdAt), Status: models.Status{ @@ -167,6 +189,7 @@ func (c *Base) CreateClone(cloneRequest *types.CloneCreateRequest) (*models.Clon Username: cloneRequest.DB.Username, DBName: cloneRequest.DB.DBName, }, + Revision: cloneRequest.Revision, } w := NewCloneWrapper(clone, createdAt) @@ -181,19 +204,19 @@ func (c *Base) CreateClone(cloneRequest *types.CloneCreateRequest) (*models.Clon AvailableDB: cloneRequest.DB.DBName, } - c.incrementCloneNumber(clone.Snapshot.ID) + c.IncrementCloneNumber(clone.Snapshot.ID) go func() { - session, err := c.provision.StartSession(clone.Snapshot.ID, ephemeralUser, cloneRequest.ExtraConf) + session, err := c.provision.StartSession(clone, ephemeralUser, cloneRequest.ExtraConf) if err != nil { // TODO(anatoly): Empty room case. - log.Errf("Failed to start session: %v.", err) + log.Errf("failed to start session: %v", err) if updateErr := c.UpdateCloneStatus(cloneID, models.Status{ Code: models.StatusFatal, Message: errors.Cause(err).Error(), }); updateErr != nil { - log.Errf("Failed to update clone status: %v", updateErr) + log.Errf("failed to update clone status: %v", updateErr) } return @@ -201,6 +224,18 @@ func (c *Base) CreateClone(cloneRequest *types.CloneCreateRequest) (*models.Clon c.fillCloneSession(cloneID, session) c.SaveClonesState() + + c.webhookCh <- webhooks.CloneEvent{ + BasicEvent: webhooks.BasicEvent{ + EventType: webhooks.CloneCreatedEvent, + EntityID: cloneID, + }, + Host: c.config.AccessHost, + Port: session.Port, + Username: clone.DB.Username, + DBName: clone.DB.DBName, + ContainerName: cloneID, + } }() return clone, nil @@ -212,7 +247,7 @@ func (c *Base) fillCloneSession(cloneID string, session *resources.Session) { w, ok := c.clones[cloneID] if !ok { - log.Errf("Clone %q not found", cloneID) + log.Errf("clone %q not found", cloneID) return } @@ -225,15 +260,14 @@ func (c *Base) fillCloneSession(cloneID string, session *resources.Session) { Message: models.CloneMessageOK, } - dbName := clone.DB.DBName - if dbName == "" { - dbName = defaultDatabaseName + if dbName := clone.DB.DBName; dbName == "" { + clone.DB.DBName = c.global.Database.Name() } clone.DB.Port = strconv.FormatUint(uint64(session.Port), 10) clone.DB.Host = c.config.AccessHost clone.DB.ConnStr = fmt.Sprintf("host=%s port=%s user=%s dbname=%s", - clone.DB.Host, clone.DB.Port, clone.DB.Username, dbName) + clone.DB.Host, clone.DB.Port, clone.DB.Username, clone.DB.DBName) clone.Metadata = models.CloneMetadata{ CloningTime: w.TimeStartedAt.Sub(w.TimeCreatedAt).Seconds(), @@ -271,10 +305,30 @@ func (c *Base) DestroyClone(cloneID string) error { return models.New(models.ErrCodeNotFound, "clone not found") } + if err := c.destroyPreChecks(cloneID, w); err != nil { + if stderrors.Is(err, errNoSession) { + return nil + } + + return err + } + + go c.destroyClone(cloneID, w) + + return nil +} + +var errNoSession = errors.New("no clone session") + +func (c *Base) destroyPreChecks(cloneID string, w *CloneWrapper) error { if w.Clone.Protected && w.Clone.Status.Code != models.StatusFatal { return models.New(models.ErrCodeBadRequest, "clone is protected") } + if c.hasDependentSnapshots(w) { + log.Warn("clone has dependent snapshots", cloneID) + } + if err := c.UpdateCloneStatus(cloneID, models.Status{ Code: models.StatusDeleting, Message: models.CloneMessageDeleting, @@ -289,34 +343,65 @@ func (c *Base) DestroyClone(cloneID string) error { c.decrementCloneNumber(w.Clone.Snapshot.ID) } - return nil + return errNoSession } - go func() { - if err := c.provision.StopSession(w.Session); err != nil { - log.Errf("Failed to delete a clone: %v.", err) + return nil +} - if updateErr := c.UpdateCloneStatus(cloneID, models.Status{ - Code: models.StatusFatal, - Message: errors.Cause(err).Error(), - }); updateErr != nil { - log.Errf("Failed to update clone status: %v", updateErr) - } +func (c *Base) DestroyCloneSync(cloneID string) error { + w, ok := c.findWrapper(cloneID) + if !ok { + return models.New(models.ErrCodeNotFound, "clone not found") + } - return + if err := c.destroyPreChecks(cloneID, w); err != nil { + if stderrors.Is(err, errNoSession) { + return nil } - c.deleteClone(cloneID) + return err + } - if w.Clone.Snapshot != nil { - c.decrementCloneNumber(w.Clone.Snapshot.ID) + c.destroyClone(cloneID, w) + + return nil +} + +func (c *Base) destroyClone(cloneID string, w *CloneWrapper) { + if err := c.provision.StopSession(w.Session, w.Clone); err != nil { + log.Errf("failed to delete clone: %v", err) + + if updateErr := c.UpdateCloneStatus(cloneID, models.Status{ + Code: models.StatusFatal, + Message: errors.Cause(err).Error(), + }); updateErr != nil { + log.Errf("failed to update clone status: %v", updateErr) } - c.observingCh <- cloneID - c.SaveClonesState() - }() + return + } - return nil + c.deleteClone(cloneID) + + if w.Clone.Snapshot != nil { + c.decrementCloneNumber(w.Clone.Snapshot.ID) + } + c.observingCh <- cloneID + + c.SaveClonesState() + + c.webhookCh <- webhooks.CloneEvent{ + BasicEvent: webhooks.BasicEvent{ + EventType: webhooks.CloneDeleteEvent, + EntityID: cloneID, + }, + Host: c.config.AccessHost, + Port: w.Session.Port, + Username: w.Clone.DB.Username, + DBName: w.Clone.DB.DBName, + ContainerName: cloneID, + } } // GetClone returns clone by ID. @@ -337,10 +422,10 @@ func (c *Base) refreshCloneMetadata(w *CloneWrapper) { return } - sessionState, err := c.provision.GetSessionState(w.Session) + sessionState, err := c.provision.GetSessionState(w.Session, w.Clone.Branch, w.Clone.ID) if err != nil { // Session not ready yet. - log.Err(fmt.Errorf("failed to get a session state: %w", err)) + log.Err(fmt.Errorf("failed to get session state: %w", err)) return } @@ -384,6 +469,21 @@ func (c *Base) UpdateCloneStatus(cloneID string, status models.Status) error { return nil } +// UpdateCloneSnapshot updates clone snapshot. +func (c *Base) UpdateCloneSnapshot(cloneID string, snapshot *models.Snapshot) error { + c.cloneMutex.Lock() + defer c.cloneMutex.Unlock() + + w, ok := c.clones[cloneID] + if !ok { + return errors.Errorf("clone %q not found", cloneID) + } + + w.Clone.Snapshot = snapshot + + return nil +} + // ResetClone resets clone to chosen snapshot. func (c *Base) ResetClone(cloneID string, resetOptions types.ResetCloneRequest) error { w, ok := c.findWrapper(cloneID) @@ -418,6 +518,18 @@ func (c *Base) ResetClone(cloneID string, resetOptions types.ResetCloneRequest) return errors.Wrap(err, "failed to update clone status") } + if c.hasDependentSnapshots(w) { + log.Warn("clone has dependent snapshots", cloneID) + c.cloneMutex.Lock() + w.Clone.Revision++ + w.Clone.HasDependent = true + c.cloneMutex.Unlock() + } else { + c.cloneMutex.Lock() + w.Clone.HasDependent = false + c.cloneMutex.Unlock() + } + go func() { var originalSnapshotID string @@ -425,9 +537,9 @@ func (c *Base) ResetClone(cloneID string, resetOptions types.ResetCloneRequest) originalSnapshotID = w.Clone.Snapshot.ID } - snapshot, err := c.provision.ResetSession(w.Session, snapshotID) + snapshot, err := c.provision.ResetSession(w.Session, w.Clone, snapshotID) if err != nil { - log.Errf("Failed to reset clone: %v", err) + log.Errf("failed to reset clone: %v", err) if updateErr := c.UpdateCloneStatus(cloneID, models.Status{ Code: models.StatusFatal, @@ -443,7 +555,7 @@ func (c *Base) ResetClone(cloneID string, resetOptions types.ResetCloneRequest) w.Clone.Snapshot = snapshot c.cloneMutex.Unlock() c.decrementCloneNumber(originalSnapshotID) - c.incrementCloneNumber(snapshot.ID) + c.IncrementCloneNumber(snapshot.ID) if err := c.UpdateCloneStatus(cloneID, models.Status{ Code: models.StatusOK, @@ -454,6 +566,18 @@ func (c *Base) ResetClone(cloneID string, resetOptions types.ResetCloneRequest) c.SaveClonesState() + c.webhookCh <- webhooks.CloneEvent{ + BasicEvent: webhooks.BasicEvent{ + EventType: webhooks.CloneResetEvent, + EntityID: cloneID, + }, + Host: c.config.AccessHost, + Port: w.Session.Port, + Username: w.Clone.DB.Username, + DBName: w.Clone.DB.DBName, + ContainerName: cloneID, + } + c.tm.SendEvent(context.Background(), telemetry.CloneResetEvent, telemetry.CloneCreated{ ID: util.HashID(w.Clone.ID), CloningTime: w.Clone.Metadata.CloningTime, @@ -486,6 +610,16 @@ func (c *Base) GetSnapshots() ([]models.Snapshot, error) { return c.getSnapshotList(), nil } +// GetSnapshotByID returns snapshot by ID. +func (c *Base) GetSnapshotByID(snapshotID string) (*models.Snapshot, error) { + return c.getSnapshotByID(snapshotID) +} + +// ReloadSnapshots reloads snapshot list. +func (c *Base) ReloadSnapshots() error { + return c.fetchSnapshots() +} + // GetClones returns the list of clones descend ordered by creation time. func (c *Base) GetClones() []*models.Clone { clones := make([]*models.Clone, 0, c.lenClones()) @@ -495,7 +629,7 @@ func (c *Base) GetClones() []*models.Clone { if cloneWrapper.Clone.Snapshot != nil { snapshot, err := c.getSnapshotByID(cloneWrapper.Clone.Snapshot.ID) if err != nil { - log.Err("Snapshot not found: ", cloneWrapper.Clone.Snapshot.ID) + log.Err("snapshot not found: ", cloneWrapper.Clone.Snapshot.ID) } if snapshot != nil { @@ -595,7 +729,7 @@ func (c *Base) destroyIdleClones(ctx context.Context) { default: isIdleClone, err := c.isIdleClone(cloneWrapper) if err != nil { - log.Errf("Failed to check the idleness of clone %s: %v.", cloneWrapper.Clone.ID, err) + log.Errf("failed to check idleness of clone %s: %v", cloneWrapper.Clone.ID, err) continue } @@ -603,7 +737,7 @@ func (c *Base) destroyIdleClones(ctx context.Context) { log.Msg(fmt.Sprintf("Idle clone %q is going to be removed.", cloneWrapper.Clone.ID)) if err = c.DestroyClone(cloneWrapper.Clone.ID); err != nil { - log.Errf("Failed to destroy clone: %v.", err) + log.Errf("failed to destroy clone: %v", err) continue } } @@ -618,7 +752,8 @@ func (c *Base) isIdleClone(wrapper *CloneWrapper) (bool, error) { idleDuration := time.Duration(c.config.MaxIdleMinutes) * time.Minute minimumTime := currentTime.Add(-idleDuration) - if wrapper.Clone.Protected || wrapper.Clone.Status.Code == models.StatusExporting || wrapper.TimeStartedAt.After(minimumTime) { + if wrapper.Clone.Protected || wrapper.Clone.Status.Code == models.StatusExporting || wrapper.TimeStartedAt.After(minimumTime) || + c.hasDependentSnapshots(wrapper) { return false, nil } @@ -632,10 +767,11 @@ func (c *Base) isIdleClone(wrapper *CloneWrapper) (bool, error) { return false, errors.New("failed to get clone session") } - if _, err := c.provision.LastSessionActivity(session, minimumTime); err != nil { + if _, err := c.provision.LastSessionActivity(session, wrapper.Clone.Branch, wrapper.Clone.ID, wrapper.Clone.Revision, + minimumTime); err != nil { if err == pglog.ErrNotFound { - log.Dbg(fmt.Sprintf("Not found recent activity for the session: %q. Clone name: %q", - session.ID, util.GetCloneName(session.Port))) + log.Dbg(fmt.Sprintf("Not found recent activity for session: %q. Clone name: %q", + session.ID, wrapper.Clone.ID)) return hasNotQueryActivity(session) } @@ -660,7 +796,7 @@ func hasNotQueryActivity(session *resources.Session) (bool, error) { defer func() { if err := db.Close(); err != nil { - log.Err("Cannot close database connection.") + log.Err("cannot close database connection") } }() diff --git a/engine/internal/cloning/snapshots.go b/engine/internal/cloning/snapshots.go index 6e353182..43044308 100644 --- a/engine/internal/cloning/snapshots.go +++ b/engine/internal/cloning/snapshots.go @@ -6,12 +6,14 @@ package cloning import ( "sort" + "strings" "sync" "github.com/pkg/errors" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" ) // SnapshotBox contains instance snapshots. @@ -30,13 +32,13 @@ func (c *Base) fetchSnapshots() error { var latestSnapshot *models.Snapshot snapshots := make(map[string]*models.Snapshot, len(entries)) - cloneCounter := c.cloneCounter() + cloneCounters := c.counterClones() for _, entry := range entries { - numClones := 0 + cloneList := []string{} - if num, ok := cloneCounter[entry.ID]; ok { - numClones = num + if foundList, ok := cloneCounters[entry.ID]; ok { + cloneList = foundList } currentSnapshot := &models.Snapshot{ @@ -46,7 +48,10 @@ func (c *Base) fetchSnapshots() error { PhysicalSize: entry.Used, LogicalSize: entry.LogicalReferenced, Pool: entry.Pool, - NumClones: numClones, + Branch: entry.Branch, + NumClones: len(cloneList), + Clones: cloneList, + Message: entry.Message, } snapshots[entry.ID] = currentSnapshot @@ -60,20 +65,21 @@ func (c *Base) fetchSnapshots() error { return nil } -func (c *Base) cloneCounter() map[string]int { - cloneCounter := make(map[string]int) +func (c *Base) counterClones() map[string][]string { + clones := make(map[string][]string, 0) c.cloneMutex.RLock() for cloneName := range c.clones { if c.clones[cloneName] != nil && c.clones[cloneName].Clone.Snapshot != nil { - cloneCounter[c.clones[cloneName].Clone.Snapshot.ID]++ + snapshotID := c.clones[cloneName].Clone.Snapshot.ID + clones[snapshotID] = append(clones[snapshotID], cloneName) } } c.cloneMutex.RUnlock() - return cloneCounter + return clones } func (c *Base) resetSnapshots(snapshotMap map[string]*models.Snapshot, latestSnapshot *models.Snapshot) { @@ -128,13 +134,14 @@ func (c *Base) getSnapshotByID(snapshotID string) (*models.Snapshot, error) { return snapshot, nil } -func (c *Base) incrementCloneNumber(snapshotID string) { +// IncrementCloneNumber increases clone counter by 1. +func (c *Base) IncrementCloneNumber(snapshotID string) { c.snapshotBox.snapshotMutex.Lock() defer c.snapshotBox.snapshotMutex.Unlock() snapshot, ok := c.snapshotBox.items[snapshotID] if !ok { - log.Err("Snapshot not found:", snapshotID) + log.Err("snapshot not found:", snapshotID) return } @@ -147,18 +154,32 @@ func (c *Base) decrementCloneNumber(snapshotID string) { snapshot, ok := c.snapshotBox.items[snapshotID] if !ok { - log.Err("Snapshot not found:", snapshotID) + log.Err("snapshot not found:", snapshotID) return } if snapshot.NumClones == 0 { - log.Err("The number of clones for the snapshot is negative. Snapshot ID:", snapshotID) + log.Err("number of clones for snapshot is negative. Snapshot ID:", snapshotID) return } snapshot.NumClones-- } +// GetCloneNumber counts snapshot clones. +func (c *Base) GetCloneNumber(snapshotID string) int { + c.snapshotBox.snapshotMutex.Lock() + defer c.snapshotBox.snapshotMutex.Unlock() + + snapshot, ok := c.snapshotBox.items[snapshotID] + if !ok { + log.Err("snapshot not found:", snapshotID) + return 0 + } + + return snapshot.NumClones +} + func (c *Base) getSnapshotList() []models.Snapshot { c.snapshotBox.snapshotMutex.RLock() defer c.snapshotBox.snapshotMutex.RUnlock() @@ -181,3 +202,18 @@ func (c *Base) getSnapshotList() []models.Snapshot { return snapshots } + +func (c *Base) hasDependentSnapshots(w *CloneWrapper) bool { + c.snapshotBox.snapshotMutex.RLock() + defer c.snapshotBox.snapshotMutex.RUnlock() + + poolName := branching.CloneName(w.Clone.Snapshot.Pool, w.Clone.Branch, w.Clone.ID, w.Clone.Revision) + + for name := range c.snapshotBox.items { + if strings.HasPrefix(name, poolName) { + return true + } + } + + return false +} diff --git a/engine/internal/cloning/snapshots_test.go b/engine/internal/cloning/snapshots_test.go index 7e4ac8c0..2034d023 100644 --- a/engine/internal/cloning/snapshots_test.go +++ b/engine/internal/cloning/snapshots_test.go @@ -110,7 +110,7 @@ func TestCloneCounter(t *testing.T) { require.Nil(t, err) require.Equal(t, 0, snapshot.NumClones) - c.incrementCloneNumber("testSnapshotID") + c.IncrementCloneNumber("testSnapshotID") snapshot, err = c.getSnapshotByID("testSnapshotID") require.Nil(t, err) require.Equal(t, 1, snapshot.NumClones) @@ -158,11 +158,13 @@ func TestInitialCloneCounter(t *testing.T) { c.clones["test_clone002"] = cloneWrapper02 c.clones["test_clone003"] = cloneWrapper03 - counters := c.cloneCounter() + counters := c.counterClones() - require.Equal(t, 2, len(counters)) - require.Equal(t, 2, counters["testSnapshotID"]) - require.Equal(t, 1, counters["testSnapshotID2"]) + require.Len(t, counters, 2) + require.Len(t, counters["testSnapshotID"], 2) + require.Len(t, counters["testSnapshotID2"], 1) + require.Len(t, counters["testSnapshotID3"], 0) + require.ElementsMatch(t, []string{"test_clone001", "test_clone002"}, counters["testSnapshotID"]) } func TestLatestSnapshots(t *testing.T) { diff --git a/engine/internal/cloning/storage.go b/engine/internal/cloning/storage.go index 558b111d..6244a628 100644 --- a/engine/internal/cloning/storage.go +++ b/engine/internal/cloning/storage.go @@ -55,18 +55,18 @@ func (c *Base) restartCloneContainers(ctx context.Context) { continue } - cloneName := util.GetCloneName(wrapper.Session.Port) + cloneName := wrapper.Clone.ID if c.provision.IsCloneRunning(ctx, cloneName) { continue } if err := c.provision.ReconnectClone(ctx, cloneName); err != nil { - log.Err(fmt.Sprintf("Clone container %s cannot be reconnected to the internal network: %s", cloneName, err)) + log.Err(fmt.Sprintf("clone container %s cannot be reconnected to internal network: %s", cloneName, err)) continue } if err := c.provision.StartCloneContainer(ctx, cloneName); err != nil { - log.Err(fmt.Sprintf("Clone container %s cannot start: %s", cloneName, err)) + log.Err(fmt.Sprintf("clone container %s cannot start: %s", cloneName, err)) continue } @@ -102,11 +102,11 @@ func (c *Base) filterRunningClones(ctx context.Context) { snapshotCache[snapshot.ID] = struct{}{} } - if !c.provision.IsCloneRunning(ctx, util.GetCloneName(wrapper.Session.Port)) { + if !c.provision.IsCloneRunning(ctx, wrapper.Clone.ID) { delete(c.clones, cloneID) } - c.incrementCloneNumber(wrapper.Clone.Snapshot.ID) + c.IncrementCloneNumber(wrapper.Clone.Snapshot.ID) } } @@ -114,11 +114,11 @@ func (c *Base) filterRunningClones(ctx context.Context) { func (c *Base) SaveClonesState() { sessionsPath, err := util.GetMetaPath(sessionsFilename) if err != nil { - log.Err("failed to get path of a sessions file", err) + log.Err("failed to get path of sessions file", err) } if err := c.saveClonesState(sessionsPath); err != nil { - log.Err("Failed to save the state of running clones", err) + log.Err("failed to save state of running clones", err) } } diff --git a/engine/internal/cloning/storage_test.go b/engine/internal/cloning/storage_test.go index 4df70a22..70036449 100644 --- a/engine/internal/cloning/storage_test.go +++ b/engine/internal/cloning/storage_test.go @@ -122,7 +122,7 @@ func TestSavingSessionState(t *testing.T) { prov, err := newProvisioner() assert.NoError(t, err) - s := NewBase(nil, prov, &telemetry.Agent{}, nil) + s := NewBase(nil, nil, prov, &telemetry.Agent{}, nil, nil) err = s.saveClonesState(f.Name()) assert.NoError(t, err) @@ -166,7 +166,7 @@ func TestFilter(t *testing.T) { assert.NoError(t, err) defer func() { _ = os.Remove(filepath) }() - s := NewBase(nil, prov, &telemetry.Agent{}, nil) + s := NewBase(nil, nil, prov, &telemetry.Agent{}, nil, nil) s.filterRunningClones(context.Background()) assert.Equal(t, 0, len(s.clones)) diff --git a/engine/internal/diagnostic/logs.go b/engine/internal/diagnostic/logs.go index 25fc552b..5649d14d 100644 --- a/engine/internal/diagnostic/logs.go +++ b/engine/internal/diagnostic/logs.go @@ -87,13 +87,13 @@ func CollectContainerDiagnostics(ctx context.Context, client *client.Client, con err = collectContainerLogs(ctx, client, diagnosticsDir, containerName) if err != nil { - log.Warn("Failed to collect container logs ", containerName, err) + log.Warn("failed to collect container logs ", containerName, err) } err = collectPostgresLogs(ctx, client, diagnosticsDir, containerName, dbDataDir) if err != nil { - log.Warn("Failed to collect Postgres logs ", containerName, err) + log.Warn("failed to collect Postgres logs ", containerName, err) } } @@ -107,7 +107,7 @@ func collectContainersOutput(ctx context.Context, client *client.Client, diagnos for _, containerName := range containerList { err = collectContainerLogs(ctx, client, diagnosticDir, containerName) if err != nil { - log.Warn("Failed to collect container logs ", containerName, err) + log.Warn("failed to collect container logs ", containerName, err) } } @@ -236,7 +236,7 @@ func extractTar(dir string, reader *tar.Reader, header *tar.Header) error { defer func() { if err := f.Close(); err != nil { - log.Err("Failed to close TAR stream", err) + log.Err("failed to close TAR stream", err) } }() @@ -255,14 +255,14 @@ func cleanLogsFunc(logRetentionDays int) func() { log.Dbg("Cleaning old logs", logsDir) if err != nil { - log.Err("Failed to fetch logs dir", err) + log.Err("failed to fetch logs dir", err) return } err = cleanupLogsDir(logsDir, logRetentionDays) if err != nil { - log.Err("Failed to fetch logs dir", err) + log.Err("failed to fetch logs dir", err) return } } @@ -273,7 +273,7 @@ func cleanupLogsDir(logsDir string, logRetentionDays int) error { dirList, err := os.ReadDir(logsDir) if err != nil { - log.Err("Failed list logs directories", err) + log.Err("failed to list logs directories", err) return err } @@ -285,7 +285,7 @@ func cleanupLogsDir(logsDir string, logRetentionDays int) error { dirTime, err := time.Parse(timeFormat, name) if err != nil { - log.Warn("Failed to parse time", name, err) + log.Warn("failed to parse time", name, err) continue } @@ -296,7 +296,7 @@ func cleanupLogsDir(logsDir string, logRetentionDays int) error { log.Dbg("Removing old logs directory", name) if err = os.RemoveAll(path.Join(logsDir, name)); err != nil { - log.Err("Directory removal failed", err) + log.Err("directory removal failed", err) } } diff --git a/engine/internal/observer/observer.go b/engine/internal/observer/observer.go index 25bdf0ef..563b5d03 100644 --- a/engine/internal/observer/observer.go +++ b/engine/internal/observer/observer.go @@ -12,7 +12,6 @@ import ( "io" "os" "regexp" - "strconv" "sync" "time" @@ -80,13 +79,8 @@ func NewObserver(dockerClient *client.Client, cfg *Config, pm *pool.Manager) *Ob // GetCloneLog gets clone logs. // TODO (akartasov): Split log to chunks. -func (o *Observer) GetCloneLog(ctx context.Context, port string, obsClone *ObservingClone) ([]byte, error) { - clonePort, err := strconv.Atoi(port) - if err != nil { - return nil, errors.Wrap(err, "failed to parse clone port") - } - - fileSelector := pglog.NewSelector(obsClone.pool.ClonePath(uint(clonePort))) +func (o *Observer) GetCloneLog(ctx context.Context, obsClone *ObservingClone) ([]byte, error) { + fileSelector := pglog.NewSelector(obsClone.pool.ClonePath(obsClone.branch, obsClone.cloneID, obsClone.revision)) fileSelector.SetMinimumTime(obsClone.session.StartedAt) if err := fileSelector.DiscoverLogDir(); err != nil { @@ -127,7 +121,7 @@ func (o *Observer) processCSVLogFile(ctx context.Context, buf io.Writer, filenam defer func() { if err := logFile.Close(); err != nil { - log.Errf("Failed to close a CSV log file: %s", err.Error()) + log.Errf("failed to close CSV log file: %s", err.Error()) } }() @@ -193,11 +187,13 @@ func (o *Observer) maskLogs(entry []string, maskedFieldIndexes []int) { } // AddObservingClone adds a new observing session to storage. -func (o *Observer) AddObservingClone(cloneID string, port uint, session *ObservingClone) { +func (o *Observer) AddObservingClone(cloneID, branch string, revision int, port uint, session *ObservingClone) { o.sessionMu.Lock() defer o.sessionMu.Unlock() session.pool = o.pm.First().Pool() session.cloneID = cloneID + session.branch = branch + session.revision = revision session.port = port o.storage[cloneID] = session diff --git a/engine/internal/observer/observing_clone.go b/engine/internal/observer/observing_clone.go index dc85387e..a46cfd6c 100644 --- a/engine/internal/observer/observing_clone.go +++ b/engine/internal/observer/observing_clone.go @@ -43,6 +43,8 @@ var maskedFields = map[string]struct{}{ type ObservingClone struct { pool *resources.Pool cloneID string + branch string + revision int port uint superUserDB *pgx.Conn @@ -217,7 +219,7 @@ func (c *ObservingClone) RunSession() error { defer func() { if err := c.db.Close(ctx); err != nil { - log.Err("Failed to close a database connection after observation for SessionID: ", c.session.SessionID) + log.Err("failed to close database connection after observation for SessionID: ", c.session.SessionID) } }() @@ -254,7 +256,7 @@ func (c *ObservingClone) RunSession() error { log.Dbg("Stop observation for SessionID: ", c.session.SessionID) if err := c.storeArtifacts(); err != nil { - log.Err("Failed to store artifacts: ", err) + log.Err("failed to store artifacts: ", err) } c.done <- struct{}{} @@ -479,7 +481,7 @@ func (c *ObservingClone) currentArtifactsSessionPath() string { } func (c *ObservingClone) artifactsSessionPath(sessionID uint64) string { - return path.Join(c.pool.ObserverDir(c.port), c.cloneID, strconv.FormatUint(sessionID, 10)) + return path.Join(c.pool.ObserverDir(c.branch, c.cloneID, c.revision), c.cloneID, strconv.FormatUint(sessionID, 10)) } // CheckPerformanceRequirements checks monitoring data and returns an error if any of performance requires was not satisfied. diff --git a/engine/internal/observer/sql.go b/engine/internal/observer/sql.go index 8db4d99c..88fc4623 100644 --- a/engine/internal/observer/sql.go +++ b/engine/internal/observer/sql.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "path" - "strconv" "strings" "github.com/jackc/pgx/v4" @@ -17,16 +16,11 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/engine/postgres/tools/defaults" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" - "gitlab.com/postgres-ai/database-lab/v3/pkg/util" ) // InitConnection creates a new connection to the clone database. func InitConnection(clone *models.Clone, socketDir string) (*pgx.Conn, error) { - host, err := unixSocketDir(socketDir, clone.DB.Port) - if err != nil { - return nil, errors.Wrap(err, "failed to parse clone port") - } - + host := unixSocketDir(socketDir, clone.ID) connectionStr := buildConnectionString(clone, host) conn, err := pgx.Connect(context.Background(), connectionStr) @@ -73,13 +67,8 @@ func runQuery(ctx context.Context, db *pgx.Conn, query string, args ...interface return result.String(), nil } -func unixSocketDir(socketDir, portStr string) (string, error) { - port, err := strconv.ParseUint(portStr, 10, 64) - if err != nil { - return "", err - } - - return path.Join(socketDir, util.GetCloneName(uint(port))), nil +func unixSocketDir(socketDir, cloneID string) string { + return path.Join(socketDir, cloneID) } func buildConnectionString(clone *models.Clone, socketDir string) string { diff --git a/engine/internal/provision/databases/postgres/postgres.go b/engine/internal/provision/databases/postgres/postgres.go index 74df3d87..5ca5bb94 100644 --- a/engine/internal/provision/databases/postgres/postgres.go +++ b/engine/internal/provision/databases/postgres/postgres.go @@ -99,7 +99,7 @@ func Start(r runners.Runner, c *resources.AppConfig) error { _, err = pgctlPromote(r, c) if err != nil { - if runnerError := Stop(r, c.Pool, c.CloneName); runnerError != nil { + if runnerError := Stop(r, c.Pool, c.CloneName, strconv.FormatUint(uint64(c.Port), 10)); runnerError != nil { log.Err(runnerError) } @@ -107,7 +107,7 @@ func Start(r runners.Runner, c *resources.AppConfig) error { } } } else { - log.Err("Currently cannot connect to Postgres: ", out, err) + log.Err("currently cannot connect to Postgres: ", out, err) } cnt++ @@ -115,7 +115,7 @@ func Start(r runners.Runner, c *resources.AppConfig) error { if cnt > waitPostgresTimeout { collectDiagnostics(c) - if runnerErr := Stop(r, c.Pool, c.CloneName); runnerErr != nil { + if runnerErr := Stop(r, c.Pool, c.CloneName, strconv.FormatUint(uint64(c.Port), 10)); runnerErr != nil { log.Err(runnerErr) } @@ -138,7 +138,7 @@ func collectDiagnostics(c *resources.AppConfig) { } // Stop stops Postgres instance. -func Stop(r runners.Runner, p *resources.Pool, name string) error { +func Stop(r runners.Runner, p *resources.Pool, name, port string) error { log.Dbg("Stopping Postgres container...") if _, err := docker.RemoveContainer(r, name); err != nil { @@ -151,8 +151,8 @@ func Stop(r runners.Runner, p *resources.Pool, name string) error { log.Msg("docker container was not found, ignore", err) } - if _, err := r.Run("rm -rf " + p.SocketCloneDir(name) + "/*"); err != nil { - return errors.Wrap(err, "failed to clean unix socket directory") + if _, err := r.Run("rm -rf " + p.SocketCloneDir(name) + "/.*" + port); err != nil { + return errors.Wrap(err, "failed to clean Unix socket directory") } return nil @@ -186,6 +186,33 @@ func getPgConnStr(host, dbname, username string, port uint) string { return sb.String() } +// runExistsSQL executes simple SQL commands which returns one bool value. +func runExistsSQL(command, connStr string) (bool, error) { + db, err := sql.Open("postgres", connStr) + + if err != nil { + return false, fmt.Errorf("cannot connect to database: %w", err) + } + + var result bool + + row := db.QueryRow(command) + err = row.Scan(&result) + + defer func() { + err := db.Close() + if err != nil { + log.Err("cannot close database connection") + } + }() + + if err != nil && err == sql.ErrNoRows { + return false, nil + } + + return result, err +} + // runSimpleSQL executes simple SQL commands which returns one string value. func runSimpleSQL(command, connStr string) (string, error) { db, err := sql.Open("postgres", connStr) @@ -201,7 +228,7 @@ func runSimpleSQL(command, connStr string) (string, error) { defer func() { err := db.Close() if err != nil { - log.Err("Cannot close database connection.") + log.Err("cannot close database connection") } }() diff --git a/engine/internal/provision/databases/postgres/postgres_mgmt.go b/engine/internal/provision/databases/postgres/postgres_mgmt.go index a9562a95..718354cb 100644 --- a/engine/internal/provision/databases/postgres/postgres_mgmt.go +++ b/engine/internal/provision/databases/postgres/postgres_mgmt.go @@ -82,10 +82,18 @@ func CreateUser(c *resources.AppConfig, user resources.EphemeralUser) error { dbName = user.AvailableDB } + // check user + pgConnStr := getPgConnStr(c.Host, dbName, c.DB.Username, c.Port) + + userExists, err := runExistsSQL(userExistsQuery(user.Name), pgConnStr) + if err != nil { + return fmt.Errorf("failed to check if user exists: %w", err) + } + if user.Restricted { - // create restricted user - query = restrictedUserQuery(user.Name, user.Password) - out, err := runSimpleSQL(query, getPgConnStr(c.Host, dbName, c.DB.Username, c.Port)) + // Create or alter restricted user. + query = restrictedUserQuery(user.Name, user.Password, userExists) + out, err := runSimpleSQL(query, pgConnStr) if err != nil { return fmt.Errorf("failed to create restricted user: %w", err) @@ -93,8 +101,18 @@ func CreateUser(c *resources.AppConfig, user resources.EphemeralUser) error { log.Dbg("Restricted user has been created: ", out) - // set restricted user as owner for database objects - databaseList, err := runSQLSelectQuery(selectAllDatabases, getPgConnStr(c.Host, dbName, c.DB.Username, c.Port)) + // Change user ownership. + query = restrictedUserOwnershipQuery(user.Name, user.Password) + out, err = runSimpleSQL(query, pgConnStr) + + if err != nil { + return fmt.Errorf("failed to create restricted user: %w", err) + } + + log.Dbg("Database ownership has been changed: ", out) + + // Set restricted user as owner for database objects. + databaseList, err := runSQLSelectQuery(selectAllDatabases, pgConnStr) if err != nil { return fmt.Errorf("failed list all databases: %w", err) @@ -111,26 +129,47 @@ func CreateUser(c *resources.AppConfig, user resources.EphemeralUser) error { log.Dbg("Objects restriction applied", database, out) } } else { - query = superuserQuery(user.Name, user.Password) + query = superuserQuery(user.Name, user.Password, userExists) - out, err := runSimpleSQL(query, getPgConnStr(c.Host, dbName, c.DB.Username, c.Port)) + out, err := runSimpleSQL(query, pgConnStr) if err != nil { return fmt.Errorf("failed to create superuser: %w", err) } - log.Dbg("Super user has been created: ", out) + log.Dbg("Superuser has been created: ", out) + + return nil } return nil } -func superuserQuery(username, password string) string { - return fmt.Sprintf(`create user %s with password %s login superuser;`, pq.QuoteIdentifier(username), pq.QuoteLiteral(password)) +func superuserQuery(username, password string, exists bool) string { + if exists { + return fmt.Sprintf(`alter role %s with password %s login superuser;`, + pq.QuoteIdentifier(username), pq.QuoteLiteral(password)) + } + + return fmt.Sprintf(`create user %s with password %s login superuser;`, + pq.QuoteIdentifier(username), pq.QuoteLiteral(password)) +} + +func restrictedUserQuery(username, password string, exists bool) string { + if exists { + return fmt.Sprintf(`alter role %s with password %s login;`, + pq.QuoteIdentifier(username), pq.QuoteLiteral(password)) + } + + return fmt.Sprintf(`create user %s with password %s login;`, + pq.QuoteIdentifier(username), pq.QuoteLiteral(password)) +} + +func userExistsQuery(username string) string { + return fmt.Sprintf(`select exists (select from pg_roles where rolname = %s)`, pq.QuoteLiteral(username)) } const restrictionUserCreationTemplate = ` --- create a new user -create user @username with password @password login; +-- change owner do $$ declare new_owner text; @@ -307,7 +346,7 @@ end $$; ` -func restrictedUserQuery(username, password string) string { +func restrictedUserOwnershipQuery(username, password string) string { repl := strings.NewReplacer( "@usernameStr", pq.QuoteLiteral(username), "@username", pq.QuoteIdentifier(username), diff --git a/engine/internal/provision/databases/postgres/postgres_mgmt_test.go b/engine/internal/provision/databases/postgres/postgres_mgmt_test.go index e510484f..332e582d 100644 --- a/engine/internal/provision/databases/postgres/postgres_mgmt_test.go +++ b/engine/internal/provision/databases/postgres/postgres_mgmt_test.go @@ -11,45 +11,89 @@ import ( ) func TestSuperuserQuery(t *testing.T) { + const ( + user = "user1" + userTest = "user.test\"" + pwd = "pwd" + pwdQuote = "pwd\\'--" + ) + + t.Run("username and password must be quoted", func(t *testing.T) { + assert.Equal(t, `create user "user1" with password 'pwd' login superuser;`, superuserQuery(user, pwd, false)) + }) + + t.Run("username and password must be quoted", func(t *testing.T) { + assert.Equal(t, `alter role "user1" with password 'pwd' login superuser;`, superuserQuery(user, pwd, true)) + }) + + t.Run("special chars must be quoted", func(t *testing.T) { + + assert.Equal(t, `create user "user.test""" with password E'pwd\\''--' login superuser;`, + superuserQuery(userTest, pwdQuote, false)) + }) + + t.Run("special chars must be quoted", func(t *testing.T) { + assert.Equal(t, `alter role "user.test""" with password E'pwd\\''--' login superuser;`, + superuserQuery(userTest, pwdQuote, true)) + }) +} + +func TestRestrictedUserQuery(t *testing.T) { t.Run("username and password must be quoted", func(t *testing.T) { user := "user1" pwd := "pwd" - assert.Equal(t, `create user "user1" with password 'pwd' login superuser;`, superuserQuery(user, pwd)) + query := restrictedUserQuery(user, pwd, false) + + assert.Contains(t, query, `create user "user1" with password 'pwd' login;`) + }) + + t.Run("username and password must be quoted", func(t *testing.T) { + user := "user1" + pwd := "pwd" + query := restrictedUserQuery(user, pwd, true) + + assert.Contains(t, query, `alter role "user1" with password 'pwd' login;`) + }) + + t.Run("special chars must be quoted", func(t *testing.T) { + user := "user.test\"" + pwd := "pwd\\'--" + query := restrictedUserQuery(user, pwd, false) + + assert.Contains(t, query, `create user "user.test""" with password E'pwd\\''--' login;`) }) t.Run("special chars must be quoted", func(t *testing.T) { user := "user.test\"" pwd := "pwd\\'--" - assert.Equal(t, `create user "user.test""" with password E'pwd\\''--' login superuser;`, superuserQuery(user, pwd)) + query := restrictedUserQuery(user, pwd, true) + + assert.Contains(t, query, `alter role "user.test""" with password E'pwd\\''--' login;`) }) } -func TestRestrictedUserQuery(t *testing.T) { +func TestRestrictedUserOwnershipQuery(t *testing.T) { t.Run("username and password must be quoted", func(t *testing.T) { user := "user1" pwd := "pwd" - query := restrictedUserQuery(user, pwd) + query := restrictedUserOwnershipQuery(user, pwd) - assert.Contains(t, query, `create user "user1" with password 'pwd' login;`) assert.Contains(t, query, `new_owner := 'user1'`) - }) t.Run("special chars must be quoted", func(t *testing.T) { user := "user.test\"" pwd := "pwd\\'--" - query := restrictedUserQuery(user, pwd) + query := restrictedUserOwnershipQuery(user, pwd) - assert.Contains(t, query, `create user "user.test""" with password E'pwd\\''--' login;`) assert.Contains(t, query, `new_owner := 'user.test"'`) }) t.Run("change owner of all databases", func(t *testing.T) { user := "user.test" pwd := "pwd" - query := restrictedUserQuery(user, pwd) + query := restrictedUserOwnershipQuery(user, pwd) assert.Contains(t, query, `select datname from pg_catalog.pg_database where not datistemplat`) }) - } diff --git a/engine/internal/provision/databases/postgres/postgres_test.go b/engine/internal/provision/databases/postgres/postgres_test.go index 5484ae0d..b82c8cbd 100644 --- a/engine/internal/provision/databases/postgres/postgres_test.go +++ b/engine/internal/provision/databases/postgres/postgres_test.go @@ -67,7 +67,7 @@ func TestRemoveContainers(t *testing.T) { })). Return("", nil) - err := Stop(runner, p, "test_clone") + err := Stop(runner, p, "test_clone", "6200") assert.Equal(t, tc.err, errors.Cause(err)) } diff --git a/engine/internal/provision/docker/docker_test.go b/engine/internal/provision/docker/docker_test.go index ef7287e5..edf43e39 100644 --- a/engine/internal/provision/docker/docker_test.go +++ b/engine/internal/provision/docker/docker_test.go @@ -40,11 +40,12 @@ func TestVolumesBuilding(t *testing.T) { { appConfig: &resources.AppConfig{ CloneName: "dblab_clone_6000", + Branch: "main", + Revision: 0, Pool: &resources.Pool{ Name: "dblab_pool", PoolDirName: "dblab_pool", MountDir: "/var/lib/dblab/", - CloneSubDir: "clones", DataSubDir: "data", SocketSubDir: "sockets", }, @@ -61,7 +62,7 @@ func TestVolumesBuilding(t *testing.T) { }, expectedVolumes: []string{ "--volume /var/lib/dblab/dblab_pool/sockets/dblab_clone_6000:/var/lib/dblab/dblab_pool/sockets/dblab_clone_6000:rshared", - "--volume /var/lib/dblab/dblab_pool/clones/dblab_clone_6000:/var/lib/dblab/dblab_pool/clones/dblab_clone_6000:rshared", + "--volume /var/lib/dblab/dblab_pool/branch/main/dblab_clone_6000/r0:/var/lib/dblab/dblab_pool/branch/main/dblab_clone_6000/r0:rshared", }, }, } @@ -80,7 +81,9 @@ func TestDefaultVolumes(t *testing.T) { pool.SocketSubDir = "socket" appConfig := &resources.AppConfig{ - Pool: pool, + Pool: pool, + Branch: "main", + Revision: 0, } unixSocketCloneDir, volumes := createDefaultVolumes(appConfig) @@ -91,7 +94,7 @@ func TestDefaultVolumes(t *testing.T) { assert.Equal(t, 2, len(volumes)) assert.ElementsMatch(t, []string{ - "--volume /tmp/test/default:/tmp/test/default", + "--volume /tmp/test/default/branch/main/r0:/tmp/test/default/branch/main/r0", "--volume /tmp/test/default/socket:/tmp/test/default/socket"}, volumes) } diff --git a/engine/internal/provision/mode_local.go b/engine/internal/provision/mode_local.go index 9bbb9243..7bc89cab 100644 --- a/engine/internal/provision/mode_local.go +++ b/engine/internal/provision/mode_local.go @@ -34,7 +34,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/engine/postgres/tools/fs" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" - "gitlab.com/postgres-ai/database-lab/v3/pkg/util" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" "gitlab.com/postgres-ai/database-lab/v3/pkg/util/networks" "gitlab.com/postgres-ai/database-lab/v3/pkg/util/pglog" ) @@ -151,9 +151,9 @@ func (p *Provisioner) ContainerOptions() models.ContainerOptions { } // StartSession starts a new session. -func (p *Provisioner) StartSession(snapshotID string, user resources.EphemeralUser, +func (p *Provisioner) StartSession(clone *models.Clone, user resources.EphemeralUser, extraConfig map[string]string) (*resources.Session, error) { - snapshot, err := p.getSnapshot(snapshotID) + snapshot, err := p.getSnapshot(clone.Snapshot.ID) if err != nil { return nil, errors.Wrap(err, "failed to get snapshots") } @@ -163,7 +163,7 @@ func (p *Provisioner) StartSession(snapshotID string, user resources.EphemeralUs return nil, errors.New("failed to get a free port") } - name := util.GetCloneName(port) + name := clone.ID fsm, err := p.pm.GetFSManager(snapshot.Pool) if err != nil { @@ -174,7 +174,7 @@ func (p *Provisioner) StartSession(snapshotID string, user resources.EphemeralUs defer func() { if err != nil { - p.revertSession(fsm, name) + p.revertSession(fsm, clone.Branch, name, strconv.FormatUint(uint64(port), 10), clone.Revision) if portErr := p.FreePort(port); portErr != nil { log.Err(portErr) @@ -182,11 +182,11 @@ func (p *Provisioner) StartSession(snapshotID string, user resources.EphemeralUs } }() - if err = fsm.CreateClone(name, snapshot.ID); err != nil { + if err = fsm.CreateClone(clone.Branch, name, snapshot.ID, clone.Revision); err != nil { return nil, errors.Wrap(err, "failed to create clone") } - appConfig := p.getAppConfig(fsm.Pool(), name, port) + appConfig := p.getAppConfig(fsm.Pool(), clone.Branch, name, clone.Revision, port) appConfig.SetExtraConf(extraConfig) if err := fs.CleanupLogsDir(appConfig.DataDir()); err != nil { @@ -217,20 +217,16 @@ func (p *Provisioner) StartSession(snapshotID string, user resources.EphemeralUs } // StopSession stops an existing session. -func (p *Provisioner) StopSession(session *resources.Session) error { +func (p *Provisioner) StopSession(session *resources.Session, clone *models.Clone) error { fsm, err := p.pm.GetFSManager(session.Pool) if err != nil { return errors.Wrap(err, "failed to find a filesystem manager of this session") } - name := util.GetCloneName(session.Port) - - if err := postgres.Stop(p.runner, fsm.Pool(), name); err != nil { - return errors.Wrap(err, "failed to stop a container") - } + name := clone.ID - if err := fsm.DestroyClone(name); err != nil { - return errors.Wrap(err, "failed to destroy a clone") + if err := postgres.Stop(p.runner, fsm.Pool(), name, clone.DB.Port); err != nil { + return errors.Wrap(err, "failed to stop container") } if err := p.FreePort(session.Port); err != nil { @@ -241,13 +237,13 @@ func (p *Provisioner) StopSession(session *resources.Session) error { } // ResetSession resets an existing session. -func (p *Provisioner) ResetSession(session *resources.Session, snapshotID string) (*models.Snapshot, error) { +func (p *Provisioner) ResetSession(session *resources.Session, clone *models.Clone, snapshotID string) (*models.Snapshot, error) { fsm, err := p.pm.GetFSManager(session.Pool) if err != nil { return nil, errors.Wrap(err, "failed to find filesystem manager of this session") } - name := util.GetCloneName(session.Port) + name := clone.ID snapshot, err := p.getSnapshot(snapshotID) if err != nil { @@ -270,23 +266,25 @@ func (p *Provisioner) ResetSession(session *resources.Session, snapshotID string defer func() { if err != nil { - p.revertSession(newFSManager, name) + p.revertSession(newFSManager, clone.Branch, name, clone.DB.Port, clone.Revision) } }() - if err = postgres.Stop(p.runner, fsm.Pool(), name); err != nil { + if err = postgres.Stop(p.runner, fsm.Pool(), name, clone.DB.Port); err != nil { return nil, errors.Wrap(err, "failed to stop container") } - if err = fsm.DestroyClone(name); err != nil { - return nil, errors.Wrap(err, "failed to destroy clone") + if clone.Revision == branching.DefaultRevision || !clone.HasDependent { + if err = fsm.DestroyClone(clone.Branch, name, clone.Revision); err != nil { + return nil, errors.Wrap(err, "failed to destroy clone") + } } - if err = newFSManager.CreateClone(name, snapshot.ID); err != nil { + if err = newFSManager.CreateClone(clone.Branch, name, snapshot.ID, clone.Revision); err != nil { return nil, errors.Wrap(err, "failed to create clone") } - appConfig := p.getAppConfig(newFSManager.Pool(), name, session.Port) + appConfig := p.getAppConfig(newFSManager.Pool(), clone.Branch, name, clone.Revision, session.Port) appConfig.SetExtraConf(session.ExtraConfig) if err := fs.CleanupLogsDir(appConfig.DataDir()); err != nil { @@ -328,13 +326,13 @@ func (p *Provisioner) GetSnapshots() ([]resources.Snapshot, error) { } // GetSessionState describes the state of the session. -func (p *Provisioner) GetSessionState(s *resources.Session) (*resources.SessionState, error) { +func (p *Provisioner) GetSessionState(s *resources.Session, branch, cloneID string) (*resources.SessionState, error) { fsm, err := p.pm.GetFSManager(s.Pool) if err != nil { - return nil, errors.Wrap(err, "failed to find a filesystem manager of this session") + return nil, errors.Wrap(err, "failed to find filesystem manager of this session") } - return fsm.GetSessionState(util.GetCloneName(s.Port)) + return fsm.GetSessionState(branch, cloneID) } // GetPoolEntryList provides an ordered list of available pools. @@ -389,15 +387,15 @@ func buildPoolEntry(fsm pool.FSManager) (models.PoolEntry, error) { } // Other methods. -func (p *Provisioner) revertSession(fsm pool.FSManager, name string) { - log.Dbg(`Reverting start of a session...`) +func (p *Provisioner) revertSession(fsm pool.FSManager, branch, name, port string, revision int) { + log.Dbg(`Reverting start of session...`) - if runnerErr := postgres.Stop(p.runner, fsm.Pool(), name); runnerErr != nil { - log.Err("Stop Postgres:", runnerErr) + if runnerErr := postgres.Stop(p.runner, fsm.Pool(), name, port); runnerErr != nil { + log.Err("stop Postgres:", runnerErr) } - if runnerErr := fsm.DestroyClone(name); runnerErr != nil { - log.Err("Destroy clone:", runnerErr) + if runnerErr := fsm.DestroyClone(branch, name, revision); runnerErr != nil { + log.Err("destroy clone:", runnerErr) } } @@ -590,7 +588,9 @@ func (p *Provisioner) stopPoolSessions(fsm pool.FSManager, exceptClones map[stri log.Dbg("Stopping container:", instance) - if err = postgres.Stop(p.runner, fsPool, instance); err != nil { + port := "" // TODO: check this case to prevent removing active sockets. + + if err = postgres.Stop(p.runner, fsPool, instance, port); err != nil { return errors.Wrap(err, "failed to container") } } @@ -607,7 +607,10 @@ func (p *Provisioner) stopPoolSessions(fsm pool.FSManager, exceptClones map[stri continue } - if err := fsm.DestroyClone(clone); err != nil { + branchName := branching.DefaultBranch // TODO: extract branch from name OR pass as an argument. + revision := branching.DefaultRevision // TODO: the same for the revision. + + if err := fsm.DestroyClone(branchName, clone, revision); err != nil { return err } } @@ -615,11 +618,13 @@ func (p *Provisioner) stopPoolSessions(fsm pool.FSManager, exceptClones map[stri return nil } -func (p *Provisioner) getAppConfig(pool *resources.Pool, name string, port uint) *resources.AppConfig { +func (p *Provisioner) getAppConfig(pool *resources.Pool, branch, name string, rev int, port uint) *resources.AppConfig { provisionHosts := p.getProvisionHosts() appConfig := &resources.AppConfig{ CloneName: name, + Branch: branch, + Revision: rev, DockerImage: p.config.DockerImage, Host: pool.SocketCloneDir(name), Port: port, @@ -655,16 +660,17 @@ func (p *Provisioner) getProvisionHosts() string { } // LastSessionActivity returns the time of the last session activity. -func (p *Provisioner) LastSessionActivity(session *resources.Session, minimumTime time.Time) (*time.Time, error) { +func (p *Provisioner) LastSessionActivity(session *resources.Session, branch, cloneID string, revision int, + minimumTime time.Time) (*time.Time, error) { fsm, err := p.pm.GetFSManager(session.Pool) if err != nil { - return nil, errors.Wrap(err, "failed to find a filesystem manager") + return nil, errors.Wrap(err, "failed to find filesystem manager") } ctx, cancel := context.WithCancel(p.ctx) defer cancel() - clonePath := fsm.Pool().ClonePath(session.Port) + clonePath := fsm.Pool().ClonePath(branch, cloneID, revision) fileSelector := pglog.NewSelector(clonePath) if err := fileSelector.DiscoverLogDir(); err != nil { @@ -734,7 +740,7 @@ func (p *Provisioner) scanCSVLogFile(ctx context.Context, filename string, avail defer func() { if err := csvFile.Close(); err != nil { - log.Errf("Failed to close a CSV log file: %s", err.Error()) + log.Errf("failed to close CSV log file: %s", err.Error()) } }() diff --git a/engine/internal/provision/mode_local_test.go b/engine/internal/provision/mode_local_test.go index 02fe78a3..72c70e13 100644 --- a/engine/internal/provision/mode_local_test.go +++ b/engine/internal/provision/mode_local_test.go @@ -14,6 +14,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/provision/pool" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/resources" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" ) @@ -66,11 +67,11 @@ type mockFSManager struct { cloneList []string } -func (m mockFSManager) CreateClone(_, _ string) error { +func (m mockFSManager) CreateClone(_, _, _ string, _ int) error { return nil } -func (m mockFSManager) DestroyClone(_ string) error { +func (m mockFSManager) DestroyClone(_, _ string, _ int) error { return nil } @@ -82,7 +83,7 @@ func (m mockFSManager) CreateSnapshot(_, _ string) (snapshotName string, err err return "", nil } -func (m mockFSManager) DestroySnapshot(_ string) (err error) { +func (m mockFSManager) DestroySnapshot(_ string, _ thinclones.DestroyOptions) (err error) { return nil } @@ -97,7 +98,7 @@ func (m mockFSManager) SnapshotList() []resources.Snapshot { func (m mockFSManager) RefreshSnapshotList() { } -func (m mockFSManager) GetSessionState(_ string) (*resources.SessionState, error) { +func (m mockFSManager) GetSessionState(_, _ string) (*resources.SessionState, error) { return nil, nil } @@ -109,6 +110,110 @@ func (m mockFSManager) Pool() *resources.Pool { return m.pool } +func (m mockFSManager) InitBranching() error { + return nil +} + +func (m mockFSManager) VerifyBranchMetadata() error { + return nil +} + +func (m mockFSManager) CreateDataset(_ string) error { + return nil +} + +func (m mockFSManager) CreateBranch(_, _ string) error { + return nil +} + +func (m mockFSManager) DestroyDataset(_ string) error { + return nil +} + +func (m mockFSManager) Snapshot(_ string) error { + return nil +} + +func (m mockFSManager) Reset(_ string, _ thinclones.ResetOptions) error { + return nil +} + +func (m mockFSManager) ListBranches() (map[string]string, error) { + return nil, nil +} + +func (m mockFSManager) ListAllBranches(_ []string) ([]models.BranchEntity, error) { + return nil, nil +} + +func (m mockFSManager) GetSnapshotProperties(_ string) (thinclones.SnapshotProperties, error) { + return thinclones.SnapshotProperties{}, nil +} + +func (m mockFSManager) AddBranchProp(_, _ string) error { + return nil +} + +func (m mockFSManager) DeleteBranchProp(_, _ string) error { + return nil +} + +func (m mockFSManager) SetRelation(_, _ string) error { + return nil +} + +func (m mockFSManager) SetRoot(_, _ string) error { + return nil +} + +func (m mockFSManager) GetRepo() (*models.Repo, error) { + return nil, nil +} + +func (m mockFSManager) GetAllRepo() (*models.Repo, error) { + return nil, nil +} + +func (m mockFSManager) SetDSA(_, _ string) error { + return nil +} + +func (m mockFSManager) SetMessage(_, _ string) error { + return nil +} + +func (m mockFSManager) SetMountpoint(_, _ string) error { + return nil +} + +func (m mockFSManager) Move(_, _, _ string) error { + return nil +} + +func (m mockFSManager) Rename(_, _ string) error { + return nil +} + +func (m mockFSManager) DeleteBranch(_ string) error { + return nil +} + +func (m mockFSManager) DeleteChildProp(_, _ string) error { + return nil +} + +func (m mockFSManager) DeleteRootProp(_, _ string) error { + return nil +} + +func (m mockFSManager) HasDependentEntity(_ string) ([]string, error) { + return nil, nil +} + +func (m mockFSManager) KeepRelation(_ string) error { + return nil +} + func TestBuildPoolEntry(t *testing.T) { testCases := []struct { pool *resources.Pool diff --git a/engine/internal/provision/pool/manager.go b/engine/internal/provision/pool/manager.go index 74c41171..1c63a6a2 100644 --- a/engine/internal/provision/pool/manager.go +++ b/engine/internal/provision/pool/manager.go @@ -13,6 +13,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/provision/resources" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/runners" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones/lvm" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones/zfs" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" @@ -25,30 +26,60 @@ type FSManager interface { Snapshotter StateReporter Pooler + Branching } // Cloner describes methods of clone management. type Cloner interface { - CreateClone(name, snapshotID string) error - DestroyClone(name string) error + CreateClone(branch, name, snapshotID string, revision int) error + DestroyClone(branch, name string, revision int) error ListClonesNames() ([]string, error) } // StateReporter describes methods of state reporting. type StateReporter interface { - GetSessionState(name string) (*resources.SessionState, error) + GetSessionState(branch, name string) (*resources.SessionState, error) GetFilesystemState() (models.FileSystem, error) } // Snapshotter describes methods of snapshot management. type Snapshotter interface { CreateSnapshot(poolSuffix, dataStateAt string) (snapshotName string, err error) - DestroySnapshot(snapshotName string) (err error) + DestroySnapshot(snapshotName string, options thinclones.DestroyOptions) (err error) CleanupSnapshots(retentionLimit int) ([]string, error) SnapshotList() []resources.Snapshot RefreshSnapshotList() } +// Branching describes methods for data branching. +type Branching interface { + InitBranching() error + VerifyBranchMetadata() error + CreateDataset(datasetName string) error + CreateBranch(branchName, snapshotID string) error + DestroyDataset(branchName string) (err error) + ListBranches() (map[string]string, error) + ListAllBranches(filterPools []string) ([]models.BranchEntity, error) + GetRepo() (*models.Repo, error) + GetAllRepo() (*models.Repo, error) + SetRelation(parent, snapshotName string) error + Snapshot(snapshotName string) error + Move(baseSnap, currentSnap, target string) error + SetMountpoint(path, branch string) error + Rename(oldName, branch string) error + GetSnapshotProperties(snapshotName string) (thinclones.SnapshotProperties, error) + AddBranchProp(branch, snapshotName string) error + DeleteBranchProp(branch, snapshotName string) error + DeleteChildProp(childSnapshot, snapshotName string) error + DeleteRootProp(branch, snapshotName string) error + SetRoot(branch, snapshotName string) error + SetDSA(dsa, snapshotName string) error + SetMessage(message, snapshotName string) error + Reset(snapshotID string, options thinclones.ResetOptions) error + HasDependentEntity(snapshotName string) ([]string, error) + KeepRelation(snapshotName string) error +} + // Pooler describes methods for Pool providing. type Pooler interface { Pool() *resources.Pool diff --git a/engine/internal/provision/pool/pool_manager.go b/engine/internal/provision/pool/pool_manager.go index fb56f80e..fc35da3a 100644 --- a/engine/internal/provision/pool/pool_manager.go +++ b/engine/internal/provision/pool/pool_manager.go @@ -30,6 +30,9 @@ const ( ext4 = "ext4" ) +// ErrNoPools means that there no available pools. +var ErrNoPools = errors.New("no available pools") + // Manager describes a pool manager. type Manager struct { cfg *Config @@ -144,7 +147,7 @@ func (pm *Manager) GetFSManager(name string) (FSManager, error) { pm.mu.Unlock() if !ok { - return nil, errors.New("pool manager not found") + return nil, fmt.Errorf("pool manager not found: %s", name) } return fsm, nil @@ -240,7 +243,7 @@ func (pm *Manager) ReloadPools() error { fsPools, fsManagerList := pm.examineEntries(dirEntries) if len(fsPools) == 0 { - return errors.New("no available pools") + return ErrNoPools } pm.mu.Lock() @@ -294,7 +297,6 @@ func (pm *Manager) examineEntries(entries []os.DirEntry) (map[string]FSManager, Name: entry.Name(), PoolDirName: entry.Name(), MountDir: pm.cfg.MountDir, - CloneSubDir: pm.cfg.CloneSubDir, DataSubDir: pm.cfg.DataSubDir, SocketSubDir: pm.cfg.SocketSubDir, ObserverSubDir: pm.cfg.ObserverSubDir, diff --git a/engine/internal/provision/resources/appconfig.go b/engine/internal/provision/resources/appconfig.go index 94a37c40..f05f5266 100644 --- a/engine/internal/provision/resources/appconfig.go +++ b/engine/internal/provision/resources/appconfig.go @@ -6,11 +6,15 @@ package resources import ( "path" + + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" ) // AppConfig currently stores Postgres configuration (other application in the future too). type AppConfig struct { CloneName string + Branch string + Revision int DockerImage string Pool *Pool Host string @@ -32,13 +36,13 @@ type DB struct { // CloneDir returns the path of the clone directory. func (c *AppConfig) CloneDir() string { // TODO(akartasov): Move to pool. - return path.Join(c.Pool.ClonesDir(), c.CloneName) + return path.Join(c.Pool.ClonesDir(c.Branch), c.CloneName, branching.RevisionSegment(c.Revision)) } // DataDir returns the path of clone data. func (c *AppConfig) DataDir() string { // TODO(akartasov): Move to pool. - return path.Join(c.Pool.ClonesDir(), c.CloneName, c.Pool.DataSubDir) + return path.Join(c.Pool.ClonesDir(c.Branch), c.CloneName, branching.RevisionSegment(c.Revision), c.Pool.DataSubDir) } // ExtraConf returns a map with an extra configuration. diff --git a/engine/internal/provision/resources/pool.go b/engine/internal/provision/resources/pool.go index 1fd5b28e..0f4e695e 100644 --- a/engine/internal/provision/resources/pool.go +++ b/engine/internal/provision/resources/pool.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "gitlab.com/postgres-ai/database-lab/v3/pkg/util" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" ) // PoolStatus represents a pool status. @@ -65,18 +65,28 @@ func (p *Pool) SocketDir() string { } // ObserverDir returns a path to the observer directory of the storage pool. -func (p *Pool) ObserverDir(port uint) string { - return path.Join(p.ClonePath(port), p.ObserverSubDir) +func (p *Pool) ObserverDir(branch, name string, revision int) string { + return path.Join(p.ClonePath(branch, name, revision), p.ObserverSubDir) } // ClonesDir returns a path to the clones directory of the storage pool. -func (p *Pool) ClonesDir() string { - return path.Join(p.MountDir, p.PoolDirName, p.CloneSubDir) +func (p *Pool) ClonesDir(branch string) string { + return path.Join(p.MountDir, p.PoolDirName, branching.BranchDir, branch) } -// ClonePath returns a path to the initialized clone directory. -func (p *Pool) ClonePath(port uint) string { - return path.Join(p.MountDir, p.PoolDirName, p.CloneSubDir, util.GetCloneName(port), p.DataSubDir) +// ClonePath returns a path to the data clone directory. +func (p *Pool) ClonePath(branchName, name string, revision int) string { + return path.Join(p.MountDir, p.PoolDirName, branching.BranchDir, branchName, name, branching.RevisionSegment(revision), p.DataSubDir) +} + +// CloneLocation returns a path to the initialized clone directory. +func (p *Pool) CloneLocation(branchName, name string, revision int) string { + return path.Join(p.MountDir, p.PoolDirName, branching.BranchDir, branchName, name, branching.RevisionSegment(revision)) +} + +// CloneRevisionLocation returns a path to the clone revisions. +func (p *Pool) CloneRevisionLocation(branchName, name string) string { + return path.Join(p.MountDir, p.PoolDirName, branching.BranchDir, branchName, name) } // SocketCloneDir returns a path to the socket clone directory. @@ -84,6 +94,21 @@ func (p *Pool) SocketCloneDir(name string) string { return path.Join(p.SocketDir(), name) } +// BranchName returns a full branch name in the data pool. +func (p *Pool) BranchName(poolName, branchName string) string { + return branching.BranchName(poolName, branchName) +} + +// CloneDataset returns a full clone dataset in the data pool. +func (p *Pool) CloneDataset(branchName, cloneName string) string { + return branching.CloneDataset(p.Name, branchName, cloneName) +} + +// CloneName returns a full clone name in the data pool. +func (p *Pool) CloneName(branchName, cloneName string, revision int) string { + return branching.CloneName(p.Name, branchName, cloneName, revision) +} + // Status gets the pool status. func (p *Pool) Status() PoolStatus { p.mu.RLock() diff --git a/engine/internal/provision/resources/resources.go b/engine/internal/provision/resources/resources.go index 201f9e11..1a5538ee 100644 --- a/engine/internal/provision/resources/resources.go +++ b/engine/internal/provision/resources/resources.go @@ -33,12 +33,14 @@ type EphemeralUser struct { // Snapshot defines snapshot of the data with related meta-information. type Snapshot struct { - ID string - CreatedAt time.Time - DataStateAt time.Time - Used uint64 - LogicalReferenced uint64 - Pool string + ID string `json:"id"` + CreatedAt time.Time `json:"createdAt"` + DataStateAt time.Time `json:"dataStateAt"` + Used uint64 `json:"used"` + LogicalReferenced uint64 `json:"logicalReferenced"` + Pool string `json:"pool"` + Branch string `json:"branch"` + Message string `json:"message"` } // SessionState defines current state of a Session. diff --git a/engine/internal/provision/thinclones/lvm/lvmanager.go b/engine/internal/provision/thinclones/lvm/lvmanager.go index 35da7082..8afc4c74 100644 --- a/engine/internal/provision/thinclones/lvm/lvmanager.go +++ b/engine/internal/provision/thinclones/lvm/lvmanager.go @@ -12,6 +12,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/provision/resources" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/runners" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" ) @@ -53,13 +54,13 @@ func (m *LVManager) UpdateConfig(pool *resources.Pool) { } // CreateClone creates a new volume. -func (m *LVManager) CreateClone(name, _ string) error { - return CreateVolume(m.runner, m.volumeGroup, m.logicalVolume, name, m.pool.ClonesDir()) +func (m *LVManager) CreateClone(branch, name, _ string, _ int) error { + return CreateVolume(m.runner, m.volumeGroup, m.logicalVolume, name, m.pool.ClonesDir(branch)) } // DestroyClone destroys volumes. -func (m *LVManager) DestroyClone(name string) error { - return RemoveVolume(m.runner, m.volumeGroup, m.logicalVolume, name, m.pool.ClonesDir()) +func (m *LVManager) DestroyClone(branch, name string, _ int) error { + return RemoveVolume(m.runner, m.volumeGroup, m.logicalVolume, name, m.pool.ClonesDir(branch)) } // ListClonesNames returns a list of clone names. @@ -98,7 +99,7 @@ func (m *LVManager) CreateSnapshot(_, _ string) (string, error) { } // DestroySnapshot is not supported in LVM mode. -func (m *LVManager) DestroySnapshot(_ string) error { +func (m *LVManager) DestroySnapshot(_ string, _ thinclones.DestroyOptions) error { log.Msg("Destroying a snapshot is not supported in LVM mode. Skip the operation.") return nil @@ -130,7 +131,7 @@ func (m *LVManager) RefreshSnapshotList() { } // GetSessionState is not implemented. -func (m *LVManager) GetSessionState(_ string) (*resources.SessionState, error) { +func (m *LVManager) GetSessionState(_, _ string) (*resources.SessionState, error) { // TODO(anatoly): Implement. return &resources.SessionState{}, nil } @@ -140,3 +141,178 @@ func (m *LVManager) GetFilesystemState() (models.FileSystem, error) { // TODO(anatoly): Implement. return models.FileSystem{Mode: PoolMode}, nil } + +// InitBranching inits data branching. +func (m *LVManager) InitBranching() error { + log.Msg("InitBranching is not supported for LVM. Skip the operation") + + return nil +} + +// VerifyBranchMetadata checks snapshot metadata. +func (m *LVManager) VerifyBranchMetadata() error { + log.Msg("VerifyBranchMetadata is not supported for LVM. Skip the operation") + + return nil +} + +// CreateDataset creates a new dataset. +func (m *LVManager) CreateDataset(_ string) error { + log.Msg("CreateDataset is not supported for LVM. Skip the operation") + + return nil +} + +// CreateBranch clones data as a new branch. +func (m *LVManager) CreateBranch(_, _ string) error { + log.Msg("CreateBranch is not supported for LVM. Skip the operation") + + return nil +} + +// DestroyDataset destroys dataset. +func (m *LVManager) DestroyDataset(_ string) error { + log.Msg("DestroyDataset is not supported for LVM; skipping operation") + + return nil +} + +// Snapshot takes a snapshot of the current data state. +func (m *LVManager) Snapshot(_ string) error { + log.Msg("Snapshot is not supported for LVM. Skip the operation") + + return nil +} + +// Reset rollbacks data to ZFS snapshot. +func (m *LVManager) Reset(_ string, _ thinclones.ResetOptions) error { + log.Msg("Reset is not supported for LVM. Skip the operation") + + return nil +} + +// ListBranches lists data pool branches. +func (m *LVManager) ListBranches() (map[string]string, error) { + log.Msg("ListBranches is not supported for LVM. Skip the operation") + + return nil, nil +} + +// ListAllBranches lists all branches. +func (m *LVManager) ListAllBranches(_ []string) ([]models.BranchEntity, error) { + log.Msg("ListAllBranches is not supported for LVM. Skip the operation") + + return nil, nil +} + +// GetSnapshotProperties get custom snapshot properties. +func (m *LVManager) GetSnapshotProperties(_ string) (thinclones.SnapshotProperties, error) { + log.Msg("GetSnapshotProperties is not supported for LVM. Skip the operation") + + return thinclones.SnapshotProperties{}, nil +} + +// AddBranchProp adds branch to snapshot property. +func (m *LVManager) AddBranchProp(_, _ string) error { + log.Msg("AddBranchProp is not supported for LVM. Skip the operation") + + return nil +} + +// DeleteBranchProp deletes branch from snapshot property. +func (m *LVManager) DeleteBranchProp(_, _ string) error { + log.Msg("DeleteBranchProp is not supported for LVM. Skip the operation") + + return nil +} + +// DeleteChildProp deletes child from snapshot property. +func (m *LVManager) DeleteChildProp(_, _ string) error { + log.Msg("DeleteChildProp is not supported for LVM. Skip the operation") + + return nil +} + +// DeleteRootProp deletes root from snapshot property. +func (m *LVManager) DeleteRootProp(_, _ string) error { + log.Msg("DeleteRootProp is not supported for LVM. Skip the operation") + + return nil +} + +// SetRelation sets relation between snapshots. +func (m *LVManager) SetRelation(_, _ string) error { + log.Msg("SetRelation is not supported for LVM. Skip the operation") + + return nil +} + +// SetRoot marks snapshot as a root of branch. +func (m *LVManager) SetRoot(_, _ string) error { + log.Msg("SetRoot is not supported for LVM. Skip the operation") + + return nil +} + +// GetRepo provides data repository details. +func (m *LVManager) GetRepo() (*models.Repo, error) { + log.Msg("GetRepo is not supported for LVM. Skip the operation") + + return nil, nil +} + +// GetAllRepo provides data repository details. +func (m *LVManager) GetAllRepo() (*models.Repo, error) { + log.Msg("GetAllRepo is not supported for LVM. Skip the operation") + + return nil, nil +} + +// SetDSA sets value of DataStateAt to snapshot. +func (m *LVManager) SetDSA(_, _ string) error { + log.Msg("SetDSA is not supported for LVM. Skip the operation") + + return nil +} + +// SetMessage sets commit message to snapshot. +func (m *LVManager) SetMessage(_, _ string) error { + log.Msg("SetMessage is not supported for LVM. Skip the operation") + + return nil +} + +// SetMountpoint sets clone mount point. +func (m *LVManager) SetMountpoint(_, _ string) error { + log.Msg("SetMountpoint is not supported for LVM. Skip the operation") + + return nil +} + +// Rename renames clone. +func (m *LVManager) Rename(_, _ string) error { + log.Msg("Rename is not supported for LVM. Skip the operation") + + return nil +} + +// Move moves snapshot diff. +func (m *LVManager) Move(_, _, _ string) error { + log.Msg("Move is not supported for LVM. Skip the operation") + + return nil +} + +// HasDependentEntity checks if snapshot has dependent entities. +func (m *LVManager) HasDependentEntity(_ string) ([]string, error) { + log.Msg("HasDependentEntity is not supported for LVM. Skip the operation") + + return nil, nil +} + +// KeepRelation keeps relation between adjacent snapshots. +func (m *LVManager) KeepRelation(_ string) error { + log.Msg("KeepRelation is not supported for LVM. Skip the operation") + + return nil +} diff --git a/engine/internal/provision/thinclones/manager.go b/engine/internal/provision/thinclones/manager.go index b830fad9..648d8c87 100644 --- a/engine/internal/provision/thinclones/manager.go +++ b/engine/internal/provision/thinclones/manager.go @@ -9,6 +9,12 @@ import ( "fmt" ) +// ResetOptions defines reset options. +type ResetOptions struct { + // -f + // -r +} + // SnapshotExistsError defines an error when snapshot already exists. type SnapshotExistsError struct { name string @@ -23,3 +29,20 @@ func NewSnapshotExistsError(name string) *SnapshotExistsError { func (e *SnapshotExistsError) Error() string { return fmt.Sprintf(`snapshot %s already exists`, e.name) } + +// DestroyOptions provides options for destroy commands. +type DestroyOptions struct { + Force bool +} + +// SnapshotProperties describe custom properties of the dataset. +type SnapshotProperties struct { + Name string + Parent string + Child string + Branch string + Root string + DataStateAt string + Message string + Clones string +} diff --git a/engine/internal/provision/thinclones/zfs/branching.go b/engine/internal/provision/thinclones/zfs/branching.go new file mode 100644 index 00000000..f446edc9 --- /dev/null +++ b/engine/internal/provision/thinclones/zfs/branching.go @@ -0,0 +1,685 @@ +/* +2022 © Postgres.ai +*/ + +package zfs + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "strings" + + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones" + "gitlab.com/postgres-ai/database-lab/v3/pkg/log" + "gitlab.com/postgres-ai/database-lab/v3/pkg/models" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" +) + +const ( + branchProp = "dle:branch" + parentProp = "dle:parent" + childProp = "dle:child" + rootProp = "dle:root" + messageProp = "dle:message" + branchSep = "," + empty = "-" +) + +type cmdCfg struct { + pool string +} + +// InitBranching inits data branching. +func (m *Manager) InitBranching() error { + snapshots := m.SnapshotList() + + numberSnapshots := len(snapshots) + + if numberSnapshots == 0 { + log.Dbg("no snapshots to init data branching") + return nil + } + + latest := snapshots[0] + + if getPoolPrefix(latest.ID) != m.config.Pool.Name { + for _, s := range snapshots { + if s.Pool == m.config.Pool.Name { + latest = s + break + } + } + } + + latestBranchProperty, err := m.getProperty(branchProp, latest.ID) + if err != nil { + return fmt.Errorf("failed to read snapshot property: %w", err) + } + + if latestBranchProperty != "" && latestBranchProperty != "-" { + log.Dbg("data branching is already initialized") + + return nil + } + + if err := m.AddBranchProp(branching.DefaultBranch, latest.ID); err != nil { + return fmt.Errorf("failed to add branch property: %w", err) + } + + leader := latest + + for i := 1; i < numberSnapshots; i++ { + follower := snapshots[i] + + if getPoolPrefix(leader.ID) != getPoolPrefix(follower.ID) { + continue + } + + if err := m.SetRelation(leader.ID, follower.ID); err != nil { + return fmt.Errorf("failed to set snapshot relations: %w", err) + } + + brProperty, err := m.getProperty(branchProp, follower.ID) + if err != nil { + return fmt.Errorf("failed to read branch property: %w", err) + } + + if brProperty == branching.DefaultBranch { + if err := m.DeleteBranchProp(branching.DefaultBranch, follower.ID); err != nil { + return fmt.Errorf("failed to delete default branch property: %w", err) + } + + break + } + + leader = follower + } + + // If not exists pool/branch/main, init main branch dataset. + brName := m.Pool().BranchName(m.Pool().Name, branching.DefaultBranch) + + if err := m.CreateDataset(brName); err != nil { + return fmt.Errorf("failed to init main branch dataset: %w", err) + } + + m.RefreshSnapshotList() + + log.Msg("data branching has been successfully initialized") + + return nil +} + +func getPoolPrefix(pool string) string { + return strings.Split(pool, "@")[0] +} + +// VerifyBranchMetadata verifies data branching metadata. +func (m *Manager) VerifyBranchMetadata() error { + snapshots := m.SnapshotList() + + numberSnapshots := len(snapshots) + + if numberSnapshots == 0 { + log.Dbg("no snapshots to verify data branching") + return nil + } + + latest := snapshots[0] + + brName, err := m.getProperty(branchProp, latest.ID) + if err != nil { + log.Dbg("cannot find branch for snapshot", latest.ID, err.Error()) + } + + for i := numberSnapshots; i > 1; i-- { + if err := m.SetRelation(snapshots[i-1].ID, snapshots[i-2].ID); err != nil { + return fmt.Errorf("failed to set snapshot relations: %w", err) + } + + if brName == "" { + brName, err = m.getProperty(branchProp, snapshots[i-1].ID) + if err != nil { + log.Dbg("cannot find branch for snapshot", snapshots[i-1].ID, err.Error()) + } + } + } + + if brName == "" { + brName = branching.DefaultBranch + } + + if err := m.AddBranchProp(brName, latest.ID); err != nil { + return fmt.Errorf("failed to add branch property: %w", err) + } + + log.Msg("data branching has been verified") + + return nil +} + +// CreateBranch clones data as a new branch. +func (m *Manager) CreateBranch(branchName, snapshotID string) error { + // zfs clone -p pool@snapshot_20221019094237 pool/branch/001-branch + cmd := []string{ + "zfs clone -p", snapshotID, branchName, + } + + out, err := m.runner.Run(strings.Join(cmd, " ")) + if err != nil { + return fmt.Errorf("zfs clone error: %w. Out: %v", err, out) + } + + return nil +} + +// Snapshot takes a snapshot of the current data state. +func (m *Manager) Snapshot(snapshotName string) error { + cmd := []string{ + "zfs snapshot ", snapshotName, + } + + out, err := m.runner.Run(strings.Join(cmd, " ")) + if err != nil { + return fmt.Errorf("zfs snapshot error: %w. Out: %v", err, out) + } + + return nil +} + +// Move sends and receives snapshot diff. +func (m *Manager) Move(baseSnap, currentSnap, target string) error { + cmd := fmt.Sprintf( + "zfs send -I %s %s | zfs receive -F %s", baseSnap, currentSnap, target, + ) + + out, err := m.runner.Run(cmd) + if err != nil { + return fmt.Errorf("zfs moving snapshot error: %w. Out: %v", err, out) + } + + return nil +} + +// Rename renames clone. +func (m *Manager) Rename(oldName, newName string) error { + cmd := []string{ + "zfs rename -p", oldName, newName, + } + + out, err := m.runner.Run(strings.Join(cmd, " ")) + if err != nil { + return fmt.Errorf("zfs renaming error: %w. Out: %v", err, out) + } + + return nil +} + +// SetMountpoint sets clone mount point. +func (m *Manager) SetMountpoint(path, name string) error { + cmd := []string{ + "zfs set", "mountpoint=" + path, name, + } + + out, err := m.runner.Run(strings.Join(cmd, " ")) + if err != nil { + return fmt.Errorf("zfs mountpoint error: %w. Out: %v", err, out) + } + + return nil +} + +// ListBranches lists data pool branches. +func (m *Manager) ListBranches() (map[string]string, error) { + return m.listBranches() +} + +// ListAllBranches lists all branches. +func (m *Manager) ListAllBranches(poolList []string) ([]models.BranchEntity, error) { + poolFilter := "" + + if len(poolList) > 0 { + poolFilter += "-r " + strings.Join(poolList, " ") + } + + cmd := fmt.Sprintf( + // Get all ZFS snapshots (-t) with options (-o) without output headers (-H). + // Excluding snapshots without "dle:branch" property ("grep -v"). + `zfs list -H -t snapshot -o %s,name %s | grep -v "^-" | cat`, branchProp, poolFilter, + ) + + out, err := m.runner.Run(cmd) + if err != nil { + return nil, fmt.Errorf("failed to list branches: %w. Out: %v", err, out) + } + + branches := make([]models.BranchEntity, 0) + lines := strings.Split(strings.TrimSpace(out), "\n") + + const expectedColumns = 2 + + for _, line := range lines { + fields := strings.Fields(line) + + if len(fields) != expectedColumns { + continue + } + + if !strings.Contains(fields[0], branchSep) { + branches = append(branches, models.BranchEntity{Name: fields[0], SnapshotID: fields[1]}) + continue + } + + for _, branchName := range strings.Split(fields[0], branchSep) { + branches = append(branches, models.BranchEntity{Name: branchName, SnapshotID: fields[1]}) + } + } + + return branches, nil +} + +func (m *Manager) listBranches() (map[string]string, error) { + cmd := fmt.Sprintf( + // Get ZFS snapshots (-t) with options (-o) without output headers (-H) filtered by pool (-r). + // Excluding snapshots without "dle:branch" property ("grep -v"). + `zfs list -H -t snapshot -o %s,name -r %s | grep -v "^-" | cat`, branchProp, m.config.Pool.Name, + ) + + out, err := m.runner.Run(cmd) + if err != nil { + return nil, fmt.Errorf("failed to list branches: %w. Out: %v", err, out) + } + + branches := make(map[string]string) + lines := strings.Split(strings.TrimSpace(out), "\n") + + const expectedColumns = 2 + + for _, line := range lines { + fields := strings.Fields(line) + + if len(fields) != expectedColumns { + continue + } + + if !strings.Contains(fields[0], branchSep) { + branches[fields[0]] = fields[1] + continue + } + + for _, branchName := range strings.Split(fields[0], branchSep) { + branches[branchName] = fields[1] + } + } + + return branches, nil +} + +var repoFields = []any{"name", parentProp, childProp, branchProp, rootProp, dataStateAtLabel, messageProp, "clones"} + +// GetRepo provides repository details about snapshots and branches filtered by data pool. +func (m *Manager) GetRepo() (*models.Repo, error) { + return m.getRepo(cmdCfg{pool: m.config.Pool.Name}) +} + +// GetAllRepo provides all repository details about snapshots and branches. +func (m *Manager) GetAllRepo() (*models.Repo, error) { + return m.getRepo(cmdCfg{}) +} + +func (m *Manager) getRepo(cmdCfg cmdCfg) (*models.Repo, error) { + strFields := bytes.TrimRight(bytes.Repeat([]byte(`%s,`), len(repoFields)), ",") + + // Get ZFS snapshots (-t) with options (-o) without output headers (-H) filtered by pool (-r). + format := `zfs list -H -t snapshot -o ` + string(strFields) + args := repoFields + + if cmdCfg.pool != "" { + format += " -r %s" + + args = append(args, cmdCfg.pool) + } + + out, err := m.runner.Run(fmt.Sprintf(format, args...)) + if err != nil { + return nil, fmt.Errorf("failed to list branches: %w. Out: %v", err, out) + } + + lines := strings.Split(strings.TrimSpace(out), "\n") + + repo := models.NewRepo() + + for _, line := range lines { + fields := strings.Fields(line) + + if len(fields) != len(repoFields) { + log.Dbg(fmt.Sprintf("Skip invalid line: %#v\n", line)) + + continue + } + + dataset, _, _ := strings.Cut(fields[0], "@") + + snDetail := models.SnapshotDetails{ + ID: fields[0], + Parent: fields[1], + Child: unwindField(fields[2]), + Branch: unwindField(fields[3]), + Root: unwindField(fields[4]), + DataStateAt: strings.Trim(fields[5], empty), + Message: decodeCommitMessage(fields[6]), + Dataset: dataset, + Clones: unwindField(fields[7]), + } + + repo.Snapshots[fields[0]] = snDetail + + for _, sn := range snDetail.Branch { + if sn == "" { + continue + } + + repo.Branches[sn] = fields[0] + } + } + + return repo, nil +} + +func decodeCommitMessage(field string) string { + if field == "" || field == empty { + return field + } + + decodedString, err := base64.StdEncoding.DecodeString(field) + if err != nil { + log.Dbg(fmt.Sprintf("Unable to decode commit message: %#v\n", field)) + return field + } + + return string(decodedString) +} + +func unwindField(field string) []string { + trimValue := strings.Trim(field, empty) + + if len(trimValue) == 0 { + return nil + } + + if !strings.Contains(field, branchSep) { + return []string{trimValue} + } + + items := make([]string, 0) + for _, item := range strings.Split(field, branchSep) { + items = append(items, strings.Trim(item, empty)) + } + + return items +} + +// GetSnapshotProperties get custom snapshot properties. +func (m *Manager) GetSnapshotProperties(snapshotName string) (thinclones.SnapshotProperties, error) { + strFields := bytes.TrimRight(bytes.Repeat([]byte(`%s,`), len(repoFields)), ",") + + // Get ZFS snapshot (-t) with options (-o) without output headers (-H) filtered by snapshot. + format := `zfs list -H -t snapshot -o ` + string(strFields) + ` %s` + + args := append(repoFields, snapshotName) + + out, err := m.runner.Run(fmt.Sprintf(format, args...)) + if err != nil { + log.Dbg(out) + + return thinclones.SnapshotProperties{}, err + } + + fields := strings.Fields(strings.TrimSpace(out)) + + if len(fields) != len(repoFields) { + log.Dbg("Retrieved fields values:", fields) + + return thinclones.SnapshotProperties{}, errors.New("some snapshot properties could not be retrieved") + } + + properties := thinclones.SnapshotProperties{ + Name: strings.Trim(fields[0], empty), + Parent: strings.Trim(fields[1], empty), + Child: strings.Trim(fields[2], empty), + Branch: strings.Trim(fields[3], empty), + Root: strings.Trim(fields[4], empty), + DataStateAt: strings.Trim(fields[5], empty), + Message: decodeCommitMessage(fields[6]), + Clones: strings.Trim(fields[7], empty), + } + + return properties, nil +} + +// AddBranchProp adds branch to snapshot property. +func (m *Manager) AddBranchProp(branch, snapshotName string) error { + return m.addToSet(branchProp, snapshotName, branch) +} + +// DeleteBranchProp deletes branch from snapshot property. +func (m *Manager) DeleteBranchProp(branch, snapshotName string) error { + return m.deleteFromSet(branchProp, branch, snapshotName) +} + +// SetRelation sets up relation between two snapshots. +func (m *Manager) SetRelation(parent, snapshotName string) error { + if err := m.setParent(parent, snapshotName); err != nil { + return err + } + + return m.addChild(parent, snapshotName) +} + +// DeleteChildProp deletes child from snapshot property. +func (m *Manager) DeleteChildProp(childSnapshot, snapshotName string) error { + return m.deleteFromSet(childProp, childSnapshot, snapshotName) +} + +// DeleteRootProp deletes root from snapshot property. +func (m *Manager) DeleteRootProp(branch, snapshotName string) error { + return m.deleteFromSet(rootProp, branch, snapshotName) +} + +func (m *Manager) setParent(parent, snapshotName string) error { + return m.setProperty(parentProp, parent, snapshotName) +} + +func (m *Manager) addChild(parent, snapshotName string) error { + return m.addToSet(childProp, parent, snapshotName) +} + +// SetRoot marks snapshot as a root of branch. +func (m *Manager) SetRoot(branch, snapshotName string) error { + return m.addToSet(rootProp, snapshotName, branch) +} + +// SetDSA sets value of DataStateAt to snapshot. +func (m *Manager) SetDSA(dsa, snapshotName string) error { + return m.setProperty(dataStateAtLabel, dsa, snapshotName) +} + +// SetMessage uses the given message as the commit message. +func (m *Manager) SetMessage(message, snapshotName string) error { + encodedMessage := base64.StdEncoding.EncodeToString([]byte(message)) + return m.setProperty(messageProp, encodedMessage, snapshotName) +} + +// HasDependentEntity gets the root property of the snapshot. +func (m *Manager) HasDependentEntity(snapshotName string) ([]string, error) { + root, err := m.getProperty(rootProp, snapshotName) + if err != nil { + return nil, fmt.Errorf("failed to check root property: %w", err) + } + + if root != "" { + log.Warn(fmt.Errorf("snapshot has dependent branches: %s", root)) + } + + child, err := m.getProperty(childProp, snapshotName) + if err != nil { + return nil, fmt.Errorf("failed to check snapshot child property: %w", err) + } + + if child != "" { + log.Warn(fmt.Sprintf("snapshot %s has dependent snapshots: %s", snapshotName, child)) + } + + clones, err := m.checkDependentClones(snapshotName) + if err != nil { + return nil, fmt.Errorf("failed to check dependent clones: %w", err) + } + + dependentClones := strings.Split(clones, ",") + + // Check clones of dependent snapshots. + if child != "" { + // check all child snapshots + childList := strings.Split(child, ",") + + for _, childSnapshot := range childList { + // TODO: limit the max level of recursion. + childClones, err := m.HasDependentEntity(childSnapshot) + if err != nil { + return nil, fmt.Errorf("failed to check dependent clones of dependent snapshots: %w", err) + } + + dependentClones = append(dependentClones, childClones...) + } + } + + return dependentClones, nil +} + +// KeepRelation keeps relation between adjacent snapshots. +func (m *Manager) KeepRelation(snapshotName string) error { + child, err := m.getProperty(childProp, snapshotName) + if err != nil { + return fmt.Errorf("failed to check snapshot child property: %w", err) + } + + parent, err := m.getProperty(parentProp, snapshotName) + if err != nil { + return fmt.Errorf("failed to check snapshot parent property: %w", err) + } + + if parent != "" { + if err := m.DeleteChildProp(snapshotName, parent); err != nil { + return fmt.Errorf("failed to delete child: %w", err) + } + + if err := m.addChild(parent, child); err != nil { + return fmt.Errorf("failed to add child: %w", err) + } + } + + if child != "" { + if err := m.setParent(parent, child); err != nil { + return fmt.Errorf("failed to set parent: %w", err) + } + } + + return nil +} + +func (m *Manager) addToSet(property, snapshot, value string) error { + original, err := m.getProperty(property, snapshot) + if err != nil { + return err + } + + dirtyList := append(strings.Split(original, branchSep), value) + uniqueList := unique(dirtyList) + + return m.setProperty(property, strings.Join(uniqueList, branchSep), snapshot) +} + +// deleteFromSet deletes specific value from snapshot property. +func (m *Manager) deleteFromSet(prop, branch, snapshotName string) error { + propertyValue, err := m.getProperty(prop, snapshotName) + if err != nil { + return err + } + + originalList := strings.Split(propertyValue, branchSep) + resultList := make([]string, 0, len(originalList)-1) + + for _, item := range originalList { + if item != branch { + resultList = append(resultList, item) + } + } + + value := strings.Join(resultList, branchSep) + + if value == "" { + value = empty + } + + return m.setProperty(prop, value, snapshotName) +} + +func (m *Manager) getProperty(property, snapshotName string) (string, error) { + cmd := fmt.Sprintf("zfs get -H -o value %s %s", property, snapshotName) + + out, err := m.runner.Run(cmd) + if err != nil { + return "", fmt.Errorf("error when trying to get property: %w. Out: %v", err, out) + } + + value := strings.Trim(strings.TrimSpace(out), "-") + + return value, nil +} + +func (m *Manager) setProperty(property, value, snapshotName string) error { + if value == "" { + value = empty + } + + cmd := fmt.Sprintf("zfs set %s=%q %s", property, value, snapshotName) + + out, err := m.runner.Run(cmd) + if err != nil { + return fmt.Errorf("error when trying to set property: %w. Out: %v", err, out) + } + + return nil +} + +func unique(originalList []string) []string { + keys := make(map[string]struct{}, 0) + branchList := make([]string, 0, len(originalList)) + + for _, item := range originalList { + if _, ok := keys[item]; !ok { + if item == "" || item == "-" { + continue + } + + keys[item] = struct{}{} + + branchList = append(branchList, item) + } + } + + return branchList +} + +// Reset rollbacks data to ZFS snapshot. +func (m *Manager) Reset(snapshotID string, _ thinclones.ResetOptions) error { + // zfs rollback pool@snapshot_20221019094237 + cmd := fmt.Sprintf("zfs rollback %s", snapshotID) + + if out, err := m.runner.Run(cmd, true); err != nil { + return fmt.Errorf("failed to rollback a snapshot: %w. Out: %v", err, out) + } + + return nil +} diff --git a/engine/internal/provision/thinclones/zfs/snapshots_filter.go b/engine/internal/provision/thinclones/zfs/snapshots_filter.go index 05d2e0ca..d1dcaccb 100644 --- a/engine/internal/provision/thinclones/zfs/snapshots_filter.go +++ b/engine/internal/provision/thinclones/zfs/snapshots_filter.go @@ -41,6 +41,8 @@ var defaultFields = snapshotFields{ "usedbysnapshots", "usedbychildren", dataStateAtLabel, + branchProp, + messageProp, } var defaultSorting = snapshotSorting{ diff --git a/engine/internal/provision/thinclones/zfs/zfs.go b/engine/internal/provision/thinclones/zfs/zfs.go index 14c17dde..1b76ac27 100644 --- a/engine/internal/provision/thinclones/zfs/zfs.go +++ b/engine/internal/provision/thinclones/zfs/zfs.go @@ -6,6 +6,7 @@ package zfs import ( + "encoding/base64" "fmt" "path" "strconv" @@ -22,6 +23,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" "gitlab.com/postgres-ai/database-lab/v3/pkg/util" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" ) const ( @@ -31,6 +33,9 @@ const ( // PoolMode defines the zfs filesystem name. PoolMode = "zfs" + + // Clone must have 3 segments: branch, name, revision. + numCloneSegments = 3 ) // ListEntry defines entry of ZFS list command. @@ -116,6 +121,12 @@ type ListEntry struct { // Data state timestamp. DataStateAt time.Time + + // Branch to which the snapshot belongs. + Branch string + + // Message associated with the snapshot. + Message string } type setFunc func(s string) error @@ -179,23 +190,26 @@ func (m *Manager) UpdateConfig(cfg Config) { } // CreateClone creates a new ZFS clone. -func (m *Manager) CreateClone(cloneName, snapshotID string) error { - exists, err := m.cloneExists(cloneName) +func (m *Manager) CreateClone(branchName, cloneName, snapshotID string, revision int) error { + cloneMountName := m.config.Pool.CloneName(branchName, cloneName, revision) + + log.Dbg(cloneMountName) + + exists, err := m.cloneExists(cloneMountName) if err != nil { - return fmt.Errorf("cannot check the clone existence: %w", err) + return fmt.Errorf("cannot check existence of clone: %w", err) } - if exists { - return fmt.Errorf("clone %q is already exists. Skip creation", cloneName) + if exists && revision == branching.DefaultRevision { + return fmt.Errorf("clone %q is already exists; skipping", cloneName) } - clonesMountDir := m.config.Pool.ClonesDir() + cloneMountLocation := m.config.Pool.CloneLocation(branchName, cloneName, revision) + + cmd := fmt.Sprintf("zfs clone -p -o mountpoint=%s %s %s && chown -R %s %s", + cloneMountLocation, snapshotID, cloneMountName, m.config.OSUsername, cloneMountLocation) - cmd := "zfs clone " + - "-o mountpoint=" + clonesMountDir + "/" + cloneName + " " + - snapshotID + " " + - m.config.Pool.Name + "/" + cloneName + " && " + - "chown -R " + m.config.OSUsername + " " + clonesMountDir + "/" + cloneName + log.Dbg(cmd) out, err := m.runner.Run(cmd) if err != nil { @@ -206,14 +220,18 @@ func (m *Manager) CreateClone(cloneName, snapshotID string) error { } // DestroyClone destroys a ZFS clone. -func (m *Manager) DestroyClone(cloneName string) error { - exists, err := m.cloneExists(cloneName) +func (m *Manager) DestroyClone(branchName, cloneName string, revision int) error { + cloneMountName := m.config.Pool.CloneName(branchName, cloneName, revision) + + log.Dbg(cloneMountName) + + exists, err := m.cloneExists(cloneMountName) if err != nil { return errors.Wrap(err, "clone does not exist") } if !exists { - log.Msg(fmt.Sprintf("clone %q is not exists. Skip deletion", cloneName)) + log.Msg(fmt.Sprintf("clone %q is not exists; skipping", cloneMountName)) return nil } @@ -223,10 +241,14 @@ func (m *Manager) DestroyClone(cloneName string) error { // this function to delete clones used during the preparation // of baseline snapshots, we need to omit `-R`, to avoid // unexpected deletion of users' clones. - cmd := fmt.Sprintf("zfs destroy -R %s/%s", m.config.Pool.Name, cloneName) + cmd := fmt.Sprintf("zfs destroy %s", cloneMountName) if _, err = m.runner.Run(cmd); err != nil { - return errors.Wrap(err, "failed to run command") + if strings.Contains(cloneName, "clone_pre") { + return errors.Wrap(err, "failed to run command") + } + + log.Dbg(err) } return nil @@ -254,25 +276,54 @@ func (m *Manager) ListClonesNames() ([]string, error) { } cloneNames := []string{} - poolPrefix := m.config.Pool.Name + "/" - clonePoolPrefix := m.config.Pool.Name + "/" + util.ClonePrefix + branchPrefix := m.config.Pool.Name + "/branch/" lines := strings.Split(strings.TrimSpace(cmdOutput), "\n") for _, line := range lines { - if strings.HasPrefix(line, clonePoolPrefix) { - cloneNames = append(cloneNames, strings.TrimPrefix(line, poolPrefix)) + bc, found := strings.CutPrefix(line, branchPrefix) + if !found { + // It's a pool dataset, not a clone. Skip it. + continue + } + + segments := strings.Split(bc, "/") + + if len(segments) != numCloneSegments { + // It's a branch dataset, not a clone. Skip it. + continue + } + + cloneName := segments[1] + + // TODO: check revision suffix. + + if cloneName != "" && !strings.Contains(line, "_pre") { + cloneNames = append(cloneNames, cloneName) } } return util.Unique(cloneNames), nil } +// CreateDataset creates a new dataset. +func (m *Manager) CreateDataset(datasetName string) error { + datasetCmd := fmt.Sprintf("zfs create -p %s", datasetName) + + cmdOutput, err := m.runner.Run(datasetCmd) + if err != nil { + log.Dbg(cmdOutput) + return fmt.Errorf("failed to create dataset: %w", err) + } + + return nil +} + // CreateSnapshot creates a new snapshot. func (m *Manager) CreateSnapshot(poolSuffix, dataStateAt string) (string, error) { poolName := m.config.Pool.Name if poolSuffix != "" { - poolName += "/" + poolSuffix + poolName = util.GetPoolName(m.config.Pool.Name, poolSuffix) } originalDSA := dataStateAt @@ -297,7 +348,7 @@ func (m *Manager) CreateSnapshot(poolSuffix, dataStateAt string) (string, error) } } - cmd := fmt.Sprintf("zfs snapshot -r %s", snapshotName) + cmd := fmt.Sprintf("zfs snapshot %s", snapshotName) if _, err := m.runner.Run(cmd, true); err != nil { return "", errors.Wrap(err, "failed to create snapshot") @@ -345,30 +396,113 @@ func getSnapshotName(pool, dataStateAt string) string { return fmt.Sprintf("%s@snapshot_%s", pool, dataStateAt) } -// RollbackSnapshot rollbacks ZFS snapshot. -func RollbackSnapshot(r runners.Runner, _ string, snapshot string) error { - cmd := fmt.Sprintf("zfs rollback -f -r %s", snapshot) +// DestroySnapshot destroys the snapshot. +func (m *Manager) DestroySnapshot(snapshotName string, opts thinclones.DestroyOptions) error { + rel, err := m.detectBranching(snapshotName) + if err != nil { + return fmt.Errorf("failed to inspect snapshot properties: %w", err) + } + + flags := "" + + if opts.Force { + flags = "-R" + } + + cmd := fmt.Sprintf("zfs destroy %s %s", flags, snapshotName) + + if _, err := m.runner.Run(cmd); err != nil { + return fmt.Errorf("failed to run command: %w", err) + } - if _, err := r.Run(cmd, true); err != nil { - return errors.Wrap(err, "failed to rollback a snapshot") + if rel != nil { + if err := m.moveBranchPointer(rel, snapshotName); err != nil { + return err + } } + m.removeSnapshotFromList(snapshotName) + return nil } -// DestroySnapshot destroys the snapshot. -func (m *Manager) DestroySnapshot(snapshotName string) error { - cmd := fmt.Sprintf("zfs destroy -R %s", snapshotName) +// DestroyDataset destroys dataset with all dependent objects. +func (m *Manager) DestroyDataset(dataset string) error { + cmd := fmt.Sprintf("zfs destroy -R %s", dataset) if _, err := m.runner.Run(cmd); err != nil { - return errors.Wrap(err, "failed to run command") + return fmt.Errorf("failed to run command: %w", err) } - m.removeSnapshotFromList(snapshotName) + return nil +} + +type snapshotRelation struct { + parent string + branch string +} + +func (m *Manager) detectBranching(snapshotName string) (*snapshotRelation, error) { + cmd := fmt.Sprintf("zfs list -H -o dle:parent,dle:branch %s", snapshotName) + + out, err := m.runner.Run(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to run command") + } + + response := strings.Fields(out) + + const fieldsCounter = 2 + + if len(response) != fieldsCounter || response[0] == "-" || response[1] == "-" { + return nil, nil + } + + return &snapshotRelation{ + parent: response[0], + branch: response[1], + }, nil +} + +func (m *Manager) moveBranchPointer(rel *snapshotRelation, snapshotName string) error { + if rel == nil { + return nil + } + + if err := m.DeleteChildProp(snapshotName, rel.parent); err != nil { + return fmt.Errorf("failed to delete a child property from snapshot %s: %w", rel.parent, err) + } + + parentProperties, err := m.GetSnapshotProperties(rel.parent) + if err != nil { + return fmt.Errorf("failed to get parent snapshot properties: %w", err) + } + + if parentProperties.Root == rel.branch { + if err := m.DeleteRootProp(rel.branch, rel.parent); err != nil { + return fmt.Errorf("failed to delete root property: %w", err) + } + } else { + if err := m.AddBranchProp(rel.branch, rel.parent); err != nil { + return fmt.Errorf("failed to set branch property to snapshot %s: %w", rel.parent, err) + } + } return nil } +func (m *Manager) checkDependentClones(snapshotName string) (string, error) { + clonesCmd := fmt.Sprintf("zfs list -t snapshot -H -o clones %s", snapshotName) + + clonesOutput, err := m.runner.Run(clonesCmd) + if err != nil { + log.Dbg(clonesOutput) + return "", fmt.Errorf("failed to list dependent clones: %w", err) + } + + return strings.Trim(strings.TrimSpace(clonesOutput), "-"), nil +} + // CleanupSnapshots destroys old snapshots considering retention limit and related clones. func (m *Manager) CleanupSnapshots(retentionLimit int) ([]string, error) { clonesCmd := fmt.Sprintf("zfs list -S clones -o name,origin -H -r %s", m.config.Pool.Name) @@ -381,12 +515,14 @@ func (m *Manager) CleanupSnapshots(retentionLimit int) ([]string, error) { busySnapshots := m.getBusySnapshotList(clonesOutput) cleanupCmd := fmt.Sprintf( - "zfs list -t snapshot -H -o name -s %s -s creation -r %s | grep -v clone | head -n -%d %s"+ + "zfs list -t snapshot -H -o name -s %s -s creation -r %s | grep -v clone | grep _pre$ | head -n -%d %s"+ "| xargs -n1 --no-run-if-empty zfs destroy -R ", dataStateAtLabel, m.config.Pool.Name, retentionLimit, excludeBusySnapshots(busySnapshots)) out, err := m.runner.Run(cleanupCmd) if err != nil { + log.Dbg(out) + return nil, errors.Wrap(err, "failed to clean up snapshots") } @@ -398,9 +534,10 @@ func (m *Manager) CleanupSnapshots(retentionLimit int) ([]string, error) { } func (m *Manager) getBusySnapshotList(clonesOutput string) []string { - systemClones, userClones := make(map[string]string), make(map[string]struct{}) + systemClones := make(map[string]string) + branchingSnapshotDatasets := []string{} - userClonePrefix := m.config.Pool.Name + "/" + util.ClonePrefix + systemDatasetPrefix := fmt.Sprintf("%s/%s/%s/clone_pre_", m.config.Pool.Name, branching.BranchDir, branching.DefaultBranch) for _, line := range strings.Split(clonesOutput, "\n") { cloneLine := strings.FieldsFunc(line, unicode.IsSpace) @@ -409,25 +546,30 @@ func (m *Manager) getBusySnapshotList(clonesOutput string) []string { continue } - if strings.HasPrefix(cloneLine[0], userClonePrefix) { - origin := cloneLine[1] + // Make dataset-snapshot map for system snapshots. + if strings.HasPrefix(cloneLine[0], systemDatasetPrefix) { + systemClones[cloneLine[0]] = cloneLine[1] + continue + } - if idx := strings.Index(origin, "@"); idx != -1 { - origin = origin[:idx] + // Keep snapshots related to the user-defined datasets. + if strings.HasPrefix(cloneLine[1], systemDatasetPrefix) { + systemDataset, _, found := strings.Cut(cloneLine[1], "@") + if found { + branchingSnapshotDatasets = append(branchingSnapshotDatasets, systemDataset) } - userClones[origin] = struct{}{} - continue } - - systemClones[cloneLine[0]] = cloneLine[1] } - busySnapshots := make([]string, 0, len(userClones)) + busySnapshots := make([]string, 0, len(branchingSnapshotDatasets)) - for userClone := range userClones { - busySnapshots = append(busySnapshots, systemClones[userClone]) + for _, busyDataset := range branchingSnapshotDatasets { + busySnapshot, ok := systemClones[busyDataset] + if ok { + busySnapshots = append(busySnapshots, busySnapshot) + } } return busySnapshots @@ -444,7 +586,7 @@ func excludeBusySnapshots(busySnapshots []string) string { } // GetSessionState returns a state of a session. -func (m *Manager) GetSessionState(name string) (*resources.SessionState, error) { +func (m *Manager) GetSessionState(branch, name string) (*resources.SessionState, error) { entries, err := m.listFilesystems(m.config.Pool.Name) if err != nil { return nil, errors.Wrap(err, "failed to list filesystems") @@ -452,7 +594,7 @@ func (m *Manager) GetSessionState(name string) (*resources.SessionState, error) var sEntry *ListEntry - entryName := m.config.Pool.Name + "/" + name + entryName := path.Join(m.config.Pool.Name, "branch", branch, name) for _, entry := range entries { if entry.Name == entryName { @@ -534,7 +676,7 @@ func (m *Manager) SnapshotList() []resources.Snapshot { func (m *Manager) RefreshSnapshotList() { snapshots, err := m.getSnapshots() if err != nil { - log.Err("Failed to refresh snapshot list: ", err) + log.Err("failed to refresh snapshot list: ", err) return } @@ -557,6 +699,16 @@ func (m *Manager) getSnapshots() ([]resources.Snapshot, error) { continue } + branch := entry.Branch + + if branch == empty { + if parsedBranch := branching.ParseBranchNameFromSnapshot(entry.Name, m.config.Pool.Name); parsedBranch != "" { + branch = parsedBranch + } else { + branch = branching.DefaultBranch + } + } + snapshot := resources.Snapshot{ ID: entry.Name, CreatedAt: entry.Creation, @@ -564,6 +716,8 @@ func (m *Manager) getSnapshots() ([]resources.Snapshot, error) { Used: entry.Used, LogicalReferenced: entry.LogicalReferenced, Pool: m.config.Pool.Name, + Branch: branch, + Message: entry.Message, } snapshots = append(snapshots, snapshot) @@ -689,7 +843,7 @@ func (m *Manager) listDetails(filter snapshotFilter) ([]*ListEntry, error) { return nil, NewEmptyPoolError(filter.dsType, filter.pool) } - numberFields := len([]string(filter.fields)) // 14 + numberFields := len([]string(filter.fields)) // 16 entries := make([]*ListEntry, len(lines)-headerOffset) for i := headerOffset; i < len(lines); i++ { @@ -715,6 +869,7 @@ func (m *Manager) listDetails(filter snapshotFilter) ([]*ListEntry, error) { MountPoint: fields[2], Type: fields[5], Origin: fields[6], + Branch: fields[14], } setRules := []setTuple{ @@ -728,6 +883,7 @@ func (m *Manager) listDetails(filter snapshotFilter) ([]*ListEntry, error) { {field: fields[11], setFunc: zfsListEntry.setUsedBySnapshots}, {field: fields[12], setFunc: zfsListEntry.setUsedByChildren}, {field: fields[13], setFunc: zfsListEntry.setDataStateAt}, + {field: fields[15], setFunc: zfsListEntry.setMessage}, } for _, rule := range setRules { @@ -859,6 +1015,22 @@ func (z *ListEntry) setDataStateAt(field string) error { return nil } +func (z *ListEntry) setMessage(field string) error { + if field == empty || field == "" { + z.Message = field + return nil + } + + decoded, err := base64.StdEncoding.DecodeString(field) + if err != nil { + return err + } + + z.Message = string(decoded) + + return nil +} + // PoolMappings provides a mapping of pool name and mount point directory. func PoolMappings(runner runners.Runner, mountDir, preSnapshotSuffix string) (map[string]string, error) { listCmd := "zfs list -Ho name,mountpoint -t filesystem | grep -v " + preSnapshotSuffix diff --git a/engine/internal/provision/thinclones/zfs/zfs_test.go b/engine/internal/provision/thinclones/zfs/zfs_test.go index db2acecd..0001c8a6 100644 --- a/engine/internal/provision/thinclones/zfs/zfs_test.go +++ b/engine/internal/provision/thinclones/zfs/zfs_test.go @@ -21,8 +21,8 @@ func (r runnerMock) Run(string, ...bool) (string, error) { func TestListClones(t *testing.T) { const ( - poolName = "datastore" - clonePrefix = "dblab_clone_" + poolName = "datastore" + preSnapshotSuffix = "_pre" ) testCases := []struct { @@ -36,48 +36,48 @@ func TestListClones(t *testing.T) { }, { caseName: "single clone", - cmdOutput: `datastore/clone_pre_20200831030000 -datastore/dblab_clone_6000 + cmdOutput: `datastore/branch/main/clone_pre_20200831030000 +datastore/branch/main/cls19p20l4rc73bc2v9g/r0 `, cloneNames: []string{ - "dblab_clone_6000", + "cls19p20l4rc73bc2v9g", }, }, { caseName: "multiple clones", - cmdOutput: `datastore/clone_pre_20200831030000 -datastore/dblab_clone_6000 -datastore/dblab_clone_6001 + cmdOutput: `datastore/branch/main/clone_pre_20200831030000 +datastore/branch/main/cls19p20l4rc73bc2v9g/r0 +datastore/branch/main/cls184a0l4rc73bc2v90/r0 `, cloneNames: []string{ - "dblab_clone_6000", - "dblab_clone_6001", + "cls19p20l4rc73bc2v9g", + "cls184a0l4rc73bc2v90", }, }, { caseName: "clone duplicate", - cmdOutput: `datastore/clone_pre_20200831030000 -datastore/dblab_clone_6000 -datastore/dblab_clone_6000 + cmdOutput: `datastore/branch/main/clone_pre_20200831030000 +datastore/branch/main/cls19p20l4rc73bc2v9g/r0 +datastore/branch/main/cls19p20l4rc73bc2v9g/r1 `, cloneNames: []string{ - "dblab_clone_6000", + "cls19p20l4rc73bc2v9g", }, }, { caseName: "different pool", - cmdOutput: `datastore/clone_pre_20200831030000 -dblab_pool/dblab_clone_6001 -datastore/dblab_clone_6000 + cmdOutput: `datastore/branch/main/clone_pre_20200831030000 +dblab_pool/branch/main/cls19p20l4rc73bc2v9g/r0 +datastore/branch/main/cls184a0l4rc73bc2v90/r0 `, cloneNames: []string{ - "dblab_clone_6000", + "cls184a0l4rc73bc2v90", }, }, { caseName: "no matched clone", - cmdOutput: `datastore/clone_pre_20200831030000 -dblab_pool/dblab_clone_6001 + cmdOutput: `datastore/branch/main/clone_pre_20200831030000 +dblab_pool/branch/main/cls19p20l4rc73bc2v9g/r0 `, cloneNames: []string{}, }, @@ -90,7 +90,7 @@ dblab_pool/dblab_clone_6001 }, config: Config{ Pool: resources.NewPool(poolName), - PreSnapshotSuffix: clonePrefix, + PreSnapshotSuffix: preSnapshotSuffix, }, } @@ -115,25 +115,35 @@ func TestFailedListClones(t *testing.T) { } func TestBusySnapshotList(t *testing.T) { - m := Manager{config: Config{Pool: &resources.Pool{Name: "dblab_pool"}}} - - out := `dblab_pool - -dblab_pool/clone_pre_20210127105215 dblab_pool@snapshot_20210127105215_pre -dblab_pool/clone_pre_20210127113000 dblab_pool@snapshot_20210127113000_pre -dblab_pool/clone_pre_20210127120000 dblab_pool@snapshot_20210127120000_pre -dblab_pool/clone_pre_20210127123000 dblab_pool@snapshot_20210127123000_pre -dblab_pool/clone_pre_20210127130000 dblab_pool@snapshot_20210127130000_pre -dblab_pool/clone_pre_20210127133000 dblab_pool@snapshot_20210127133000_pre -dblab_pool/clone_pre_20210127140000 dblab_pool@snapshot_20210127140000_pre -dblab_pool/dblab_clone_6000 dblab_pool/clone_pre_20210127133000@snapshot_20210127133008 -dblab_pool/dblab_clone_6001 dblab_pool/clone_pre_20210127123000@snapshot_20210127133008 + const preSnapshotSuffix = "_pre" + m := Manager{config: Config{Pool: &resources.Pool{Name: "test_dblab_pool"}, PreSnapshotSuffix: preSnapshotSuffix}} + + out := `test_dblab_pool - +test_dblab_pool/branch - +test_dblab_pool/branch/main - +test_dblab_pool/branch/main/clone_pre_20250403061908 - +test_dblab_pool/branch/main/clone_pre_20250403061908/r0 test_dblab_pool@snapshot_20250403061908_pre +test_dblab_pool/branch/main/clone_pre_20250403085500 - +test_dblab_pool/branch/main/clone_pre_20250403085500/r0 test_dblab_pool@snapshot_20250403085500_pre +test_dblab_pool/branch/main/clone_pre_20250403090000 - +test_dblab_pool/branch/main/clone_pre_20250403090000/r0 test_dblab_pool@snapshot_20250403090000_pre +test_dblab_pool/branch/main/clone_pre_20250403090500 - +test_dblab_pool/branch/main/clone_pre_20250403090500/r0 test_dblab_pool@snapshot_20250403090500_pre +test_dblab_pool/branch/main/cvn2j50n9i6s73as3k9g - +test_dblab_pool/branch/main/cvn2j50n9i6s73as3k9g/r0 test_dblab_pool/branch/main/clone_pre_20250403061908/r0@snapshot_20250403061908 +test_dblab_pool/branch/main/cvn2kdon9i6s73as3ka0 - +test_dblab_pool/branch/main/cvn2kdon9i6s73as3ka0/r0 test_dblab_pool/branch/new001@20250403062641 +test_dblab_pool/branch/new001 test_dblab_pool/branch/main/cvn2j50n9i6s73as3k9g/r0@20250403062503 +test_dblab_pool/branch/new001/cvn4n38n9i6s73as3kag - +test_dblab_pool/branch/new001/cvn4n38n9i6s73as3kag/r0 test_dblab_pool/branch/new001@20250403062641 ` - expected := []string{"dblab_pool@snapshot_20210127133000_pre", "dblab_pool@snapshot_20210127123000_pre"} + expected := []string{ + "test_dblab_pool@snapshot_20250403061908_pre", + } list := m.getBusySnapshotList(out) - require.Equal(t, 2, len(list)) - assert.Contains(t, list, expected[0]) - assert.Contains(t, list, expected[1]) + require.Len(t, list, len(expected)) + assert.ElementsMatch(t, list, expected) } func TestExcludingBusySnapshots(t *testing.T) { diff --git a/engine/internal/retrieval/config/config.go b/engine/internal/retrieval/config/config.go index 6d2d06d4..caa859e9 100644 --- a/engine/internal/retrieval/config/config.go +++ b/engine/internal/retrieval/config/config.go @@ -14,15 +14,14 @@ import ( // Config describes of data retrieval jobs. type Config struct { - Refresh *Refresh `yaml:"refresh"` + Refresh Refresh `yaml:"refresh"` Jobs []string `yaml:"jobs,flow"` JobsSpec map[string]JobSpec `yaml:"spec"` } // Refresh describes full-refresh options. type Refresh struct { - Timetable string `yaml:"timetable"` - SkipStartRefresh bool `yaml:"skipStartRefresh"` + Timetable string `yaml:"timetable"` } // JobSpec contains details about a job. diff --git a/engine/internal/retrieval/dbmarker/dbmarker.go b/engine/internal/retrieval/dbmarker/dbmarker.go index 8acb5892..4d6e3b97 100644 --- a/engine/internal/retrieval/dbmarker/dbmarker.go +++ b/engine/internal/retrieval/dbmarker/dbmarker.go @@ -6,13 +6,34 @@ package dbmarker import ( + "bytes" + "fmt" "os" "path" + "strings" "github.com/pkg/errors" "gopkg.in/yaml.v2" ) +const ( + configDir = ".dblab" + configFilename = "dbmarker" + + refsDir = "refs" + branchesDir = "branch" + snapshotsDir = "snapshot" + headFile = "HEAD" + logsFile = "logs" + mainBranch = "main" + + // LogicalDataType defines a logical data type. + LogicalDataType = "logical" + + // PhysicalDataType defines a physical data type. + PhysicalDataType = "physical" +) + // Marker marks database data depends on a retrieval process. type Marker struct { dataPath string @@ -31,21 +52,22 @@ type Config struct { DataType string `yaml:"dataType"` } -const ( - // ConfigDir defines the name of the dbMarker configuration directory. - ConfigDir = ".dblab" - configFilename = "dbmarker" - - // LogicalDataType defines a logical data type. - LogicalDataType = "logical" +// Head describes content of HEAD file. +type Head struct { + Ref string `yaml:"ref"` +} - // PhysicalDataType defines a physical data type. - PhysicalDataType = "physical" -) +// SnapshotInfo describes snapshot info. +type SnapshotInfo struct { + ID string + Parent string + CreatedAt string + StateAt string +} // Init inits DB marker for the data directory. func (m *Marker) initDBLabDirectory() error { - dirname := path.Join(m.dataPath, ConfigDir) + dirname := path.Join(m.dataPath, configDir) if err := os.MkdirAll(dirname, 0755); err != nil { return errors.Wrapf(err, "cannot create a DBMarker directory %s", dirname) } @@ -59,7 +81,7 @@ func (m *Marker) CreateConfig() error { return errors.Wrap(err, "failed to init DBMarker") } - dbMarkerFile, err := os.OpenFile(m.buildFileName(), os.O_RDWR|os.O_CREATE, 0600) + dbMarkerFile, err := os.OpenFile(m.buildFileName(configFilename), os.O_RDWR|os.O_CREATE, 0600) if err != nil { return err } @@ -71,7 +93,7 @@ func (m *Marker) CreateConfig() error { // GetConfig provides a loaded DBMarker config. func (m *Marker) GetConfig() (*Config, error) { - configData, err := os.ReadFile(m.buildFileName()) + configData, err := os.ReadFile(m.buildFileName(configFilename)) if err != nil { return nil, err } @@ -96,10 +118,243 @@ func (m *Marker) SaveConfig(cfg *Config) error { return err } - return os.WriteFile(m.buildFileName(), configData, 0600) + return os.WriteFile(m.buildFileName(configFilename), configData, 0600) +} + +// buildFileName builds a DBMarker filename. +func (m *Marker) buildFileName(filename string) string { + return path.Join(m.dataPath, configDir, filename) +} + +// InitBranching creates structures for data branching. +func (m *Marker) InitBranching() error { + branchesDir := m.buildBranchesPath() + if err := os.MkdirAll(branchesDir, 0755); err != nil { + return fmt.Errorf("cannot create branches directory %s: %w", branchesDir, err) + } + + snapshotsDir := m.buildSnapshotsPath() + if err := os.MkdirAll(snapshotsDir, 0755); err != nil { + return fmt.Errorf("cannot create snapshots directory %s: %w", snapshotsDir, err) + } + + f, err := os.Create(m.buildFileName(headFile)) + if err != nil { + return fmt.Errorf("cannot create HEAD file: %w", err) + } + + _ = f.Close() + + return nil +} + +// InitMainBranch creates a new main branch. +func (m *Marker) InitMainBranch(infos []SnapshotInfo) error { + var head Head + + mainDir := m.buildBranchName(mainBranch) + if err := os.MkdirAll(mainDir, 0755); err != nil { + return fmt.Errorf("cannot create branches directory %s: %w", mainDir, err) + } + + var bb bytes.Buffer + + for _, info := range infos { + if err := m.storeSnapshotInfo(info); err != nil { + return err + } + + head.Ref = buildSnapshotRef(info.ID) + log := strings.Join([]string{info.Parent, info.ID, info.CreatedAt, info.StateAt}, " ") + "\n" + bb.WriteString(log) + } + + if err := os.WriteFile(m.buildBranchArtifactPath(mainBranch, logsFile), bb.Bytes(), 0755); err != nil { + return fmt.Errorf("cannot store file with HEAD metadata: %w", err) + } + + headData, err := yaml.Marshal(head) + if err != nil { + return fmt.Errorf("cannot prepare HEAD metadata: %w", err) + } + + if err := os.WriteFile(m.buildFileName(headFile), headData, 0755); err != nil { + return fmt.Errorf("cannot store file with HEAD metadata: %w", err) + } + + if err := os.WriteFile(m.buildBranchArtifactPath(mainBranch, headFile), headData, 0755); err != nil { + return fmt.Errorf("cannot store file with HEAD metadata: %w", err) + } + + return nil +} + +func (m *Marker) storeSnapshotInfo(info SnapshotInfo) error { + snapshotName := m.buildSnapshotName(info.ID) + + data, err := yaml.Marshal(info) + if err != nil { + return fmt.Errorf("cannot prepare snapshot metadata %s: %w", snapshotName, err) + } + + if err := os.WriteFile(snapshotName, data, 0755); err != nil { + return fmt.Errorf("cannot store file with snapshot metadata %s: %w", snapshotName, err) + } + + return nil +} + +// CreateBranch creates a new DLE data branch. +func (m *Marker) CreateBranch(branch, base string) error { + dirname := m.buildBranchName(branch) + if err := os.MkdirAll(dirname, 0755); err != nil { + return fmt.Errorf("cannot create branches directory %s: %w", dirname, err) + } + + headPath := m.buildBranchArtifactPath(base, headFile) + + readData, err := os.ReadFile(headPath) + if err != nil { + return fmt.Errorf("cannot read file %s: %w", headPath, err) + } + + branchPath := m.buildBranchArtifactPath(branch, headFile) + + if err := os.WriteFile(branchPath, readData, 0755); err != nil { + return fmt.Errorf("cannot write file %s: %w", branchPath, err) + } + + return nil +} + +// ListBranches returns branch list. +func (m *Marker) ListBranches() ([]string, error) { + branches := []string{} + + dirs, err := os.ReadDir(m.buildBranchesPath()) + if err != nil { + return nil, fmt.Errorf("failed to read repository: %w", err) + } + + for _, dir := range dirs { + if !dir.IsDir() { + continue + } + + branches = append(branches, dir.Name()) + } + + return branches, nil +} + +// GetSnapshotID returns snapshot pointer for branch. +func (m *Marker) GetSnapshotID(branch string) (string, error) { + headPath := m.buildBranchArtifactPath(branch, headFile) + + readData, err := os.ReadFile(headPath) + if err != nil { + return "", fmt.Errorf("cannot read file %s: %w", headPath, err) + } + + h := &Head{} + if err := yaml.Unmarshal(readData, &h); err != nil { + return "", fmt.Errorf("cannot read reference: %w", err) + } + + snapshotsPath := m.buildPathFromRef(h.Ref) + + snapshotData, err := os.ReadFile(snapshotsPath) + if err != nil { + return "", fmt.Errorf("cannot read file %s: %w", snapshotsPath, err) + } + + snInfo := &SnapshotInfo{} + + if err := yaml.Unmarshal(snapshotData, &snInfo); err != nil { + return "", fmt.Errorf("cannot read reference: %w", err) + } + + return snInfo.ID, nil +} + +// SaveSnapshotRef stores snapshot reference for branch. +func (m *Marker) SaveSnapshotRef(branch, snapshotID string) error { + h, err := m.getBranchHead(branch) + if err != nil { + return err + } + + h.Ref = buildSnapshotRef(snapshotID) + + if err := m.writeBranchHead(h, branch); err != nil { + return fmt.Errorf("cannot write branch head: %w", err) + } + + return nil +} + +func (m *Marker) getBranchHead(branch string) (*Head, error) { + headPath := m.buildBranchArtifactPath(branch, headFile) + + readData, err := os.ReadFile(headPath) + if err != nil { + return nil, fmt.Errorf("cannot read file %s: %w", headPath, err) + } + + h := &Head{} + if err := yaml.Unmarshal(readData, &h); err != nil { + return nil, fmt.Errorf("cannot read reference: %w", err) + } + + return h, nil +} + +func (m *Marker) writeBranchHead(h *Head, branch string) error { + headPath := m.buildBranchArtifactPath(branch, headFile) + + writeData, err := yaml.Marshal(h) + if err != nil { + return fmt.Errorf("cannot marshal structure: %w", err) + } + + if err := os.WriteFile(headPath, writeData, 0755); err != nil { + return fmt.Errorf("cannot write file %s: %w", headPath, err) + } + + return nil +} + +// buildBranchesPath builds path of branches dir. +func (m *Marker) buildBranchesPath() string { + return path.Join(m.dataPath, configDir, refsDir, branchesDir) +} + +// buildBranchName builds a branch name. +func (m *Marker) buildBranchName(branch string) string { + return path.Join(m.buildBranchesPath(), branch) +} + +// buildBranchArtifactPath builds a branch artifact name. +func (m *Marker) buildBranchArtifactPath(branch, artifact string) string { + return path.Join(m.buildBranchName(branch), artifact) +} + +// buildSnapshotsPath builds path of snapshots dir. +func (m *Marker) buildSnapshotsPath() string { + return path.Join(m.dataPath, configDir, refsDir, snapshotsDir) +} + +// buildSnapshotName builds a snapshot file name. +func (m *Marker) buildSnapshotName(snapshotID string) string { + return path.Join(m.buildSnapshotsPath(), snapshotID) +} + +// buildSnapshotRef builds snapshot ref. +func buildSnapshotRef(snapshotID string) string { + return path.Join(refsDir, snapshotsDir, snapshotID) } -// buildFileName builds a DBMarker config filename. -func (m *Marker) buildFileName() string { - return path.Join(m.dataPath, ConfigDir, configFilename) +// buildPathFromRef builds path from ref. +func (m *Marker) buildPathFromRef(ref string) string { + return path.Join(m.dataPath, configDir, ref) } diff --git a/engine/internal/retrieval/engine/postgres/logical/dump.go b/engine/internal/retrieval/engine/postgres/logical/dump.go index 5f36b9f5..f5947519 100644 --- a/engine/internal/retrieval/engine/postgres/logical/dump.go +++ b/engine/internal/retrieval/engine/postgres/logical/dump.go @@ -88,7 +88,6 @@ type DumpOptions struct { Source Source `yaml:"source"` Databases map[string]DumpDefinition `yaml:"databases"` ParallelJobs int `yaml:"parallelJobs"` - IgnoreErrors bool `yaml:"ignoreErrors"` Restore ImmediateRestore `yaml:"immediateRestore"` CustomOptions []string `yaml:"customOptions"` } @@ -137,6 +136,7 @@ type Connection struct { // ImmediateRestore contains options for direct data restore without saving the dump file on disk. type ImmediateRestore struct { Enabled bool `yaml:"enabled"` + ForceInit bool `yaml:"forceInit"` Configs map[string]string `yaml:"configs"` CustomOptions []string `yaml:"customOptions"` } @@ -280,7 +280,11 @@ func (d *DumpJob) Run(ctx context.Context) (err error) { } if d.DumpOptions.Restore.Enabled && !isEmpty { - log.Warn("The data directory is not empty. Existing data will be overwritten.") + if !d.DumpOptions.Restore.ForceInit { + return errors.New("the data directory is not empty. Use 'forceInit' or empty the data directory") + } + + log.Msg("The data directory is not empty. Existing data may be overwritten.") if err := updateConfigs(dataDir, d.DumpOptions.Restore.Configs); err != nil { return fmt.Errorf("failed to update configs: %w", err) @@ -416,7 +420,7 @@ func collectDiagnostics(ctx context.Context, client *client.Client, postgresName Value: fmt.Sprintf("%s=%s", cont.DBLabControlLabel, cont.DBLabDumpLabel)}) if err := diagnostic.CollectDiagnostics(ctx, client, filterArgs, postgresName, dataDir); err != nil { - log.Err("Failed to collect container diagnostics", err) + log.Err("failed to collect container diagnostics", err) } } @@ -508,11 +512,9 @@ func (d *DumpJob) dumpDatabase(ctx context.Context, dumpContID, dbName string, d Cmd: dumpCommand, Env: d.getExecEnvironmentVariables(), }); err != nil { - log.Err("Dump command failed: ", output) + log.Err("dump command failed: ", output) - if !d.DumpOptions.IgnoreErrors { - return fmt.Errorf("failed to dump a database: %w. Output: %s", err, output) - } + return fmt.Errorf("failed to dump a database: %w. Output: %s", err, output) } log.Msg(fmt.Sprintf("Dumping job for the database %q has been finished", dbName)) @@ -737,6 +739,10 @@ func (d *DumpJob) buildLogicalRestoreCommand(dbName string) []string { restoreCmd = append(restoreCmd, "--create") } + if d.Restore.ForceInit { + restoreCmd = append(restoreCmd, "--clean", "--if-exists") + } + restoreCmd = append(restoreCmd, d.DumpOptions.Restore.CustomOptions...) return restoreCmd diff --git a/engine/internal/retrieval/engine/postgres/logical/restore.go b/engine/internal/retrieval/engine/postgres/logical/restore.go index 43b24440..bec2b9c3 100644 --- a/engine/internal/retrieval/engine/postgres/logical/restore.go +++ b/engine/internal/retrieval/engine/postgres/logical/restore.go @@ -102,6 +102,7 @@ type RestoreOptions struct { DockerImage string `yaml:"dockerImage"` ContainerConfig map[string]interface{} `yaml:"containerConfig"` Databases map[string]DumpDefinition `yaml:"databases"` + ForceInit bool `yaml:"forceInit"` IgnoreErrors bool `yaml:"ignoreErrors"` ParallelJobs int `yaml:"parallelJobs"` Configs map[string]string `yaml:"configs"` @@ -210,10 +211,6 @@ func (r *RestoreJob) Run(ctx context.Context) (err error) { return fmt.Errorf("failed to explore the data directory %q: %w", dataDir, err) } - if !isEmpty { - log.Warn(fmt.Sprintf("The data directory %q is not empty. Existing data will be overwritten.", dataDir)) - } - if err := tools.PullImage(ctx, r.dockerClient, r.RestoreOptions.DockerImage); err != nil { return errors.Wrap(err, "failed to scan image pulling response") } @@ -243,6 +240,16 @@ func (r *RestoreJob) Run(ctx context.Context) (err error) { } }() + if !isEmpty { + log.Warn(fmt.Sprintf("The data directory %q is not empty. Existing data will be overwritten.", dataDir)) + + log.Msg("Clean up data directory:", dataDir) + + if err := tools.CleanupDir(dataDir); err != nil { + return fmt.Errorf("failed to clean up data directory before restore: %w", err) + } + } + log.Msg(fmt.Sprintf("Running container: %s. ID: %v", r.restoreContainerName(), containerID)) if err := r.dockerClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { @@ -521,7 +528,7 @@ func (r *RestoreJob) getDirectoryDumpDefinition(ctx context.Context, contID, dum dbName, err := r.extractDBNameFromDump(ctx, contID, dumpDir) if err != nil { - log.Err("Invalid dump: ", err) + log.Err("invalid dump: ", err) return DumpDefinition{}, errors.Wrap(err, "invalid database name") } @@ -590,7 +597,7 @@ func (r *RestoreJob) restoreDB(ctx context.Context, contID, dbName string, dbDef }) if err != nil && !r.RestoreOptions.IgnoreErrors { - log.Err("Restore command failed: ", output) + log.Err("restore command failed: ", output) return fmt.Errorf("failed to exec restore command: %w. Output: %s", err, output) } @@ -600,7 +607,7 @@ func (r *RestoreJob) restoreDB(ctx context.Context, contID, dbName string, dbDef } if err := r.defineDSA(ctx, dbDefinition, contID, dbName); err != nil { - log.Err("Failed to define DataStateAt: ", err) + log.Err("failed to define DataStateAt: ", err) } if err := r.markDatabase(); err != nil { @@ -771,7 +778,7 @@ func (r *RestoreJob) markDatabase() error { func (r *RestoreJob) updateDataStateAt() { dsaTime, err := time.Parse(util.DataStateAtFormat, r.dbMark.DataStateAt) if err != nil { - log.Err("Invalid value for DataStateAt: ", r.dbMark.DataStateAt) + log.Err("invalid value for DataStateAt: ", r.dbMark.DataStateAt) return } diff --git a/engine/internal/retrieval/engine/postgres/logical/restore_test.go b/engine/internal/retrieval/engine/postgres/logical/restore_test.go index 94bba845..79e72045 100644 --- a/engine/internal/retrieval/engine/postgres/logical/restore_test.go +++ b/engine/internal/retrieval/engine/postgres/logical/restore_test.go @@ -34,6 +34,7 @@ func TestRestoreCommandBuilding(t *testing.T) { { copyOptions: RestoreOptions{ ParallelJobs: 1, + ForceInit: false, Databases: map[string]DumpDefinition{ "testDB": { Format: customFormat, @@ -47,12 +48,14 @@ func TestRestoreCommandBuilding(t *testing.T) { { copyOptions: RestoreOptions{ ParallelJobs: 4, + ForceInit: true, }, - command: []string{"pg_restore", "--username", "john", "--dbname", "postgres", "--create", "--jobs", "4"}, + command: []string{"pg_restore", "--username", "john", "--dbname", "postgres", "--create", "--clean", "--if-exists", "--jobs", "4"}, }, { copyOptions: RestoreOptions{ ParallelJobs: 2, + ForceInit: false, Databases: map[string]DumpDefinition{"testDB": {}}, DumpLocation: "/tmp/db.dump", CustomOptions: []string{"--no-privileges", "--no-owner", "--exit-on-error"}, diff --git a/engine/internal/retrieval/engine/postgres/physical/physical.go b/engine/internal/retrieval/engine/postgres/physical/physical.go index 2bfe97e4..62f719e3 100644 --- a/engine/internal/retrieval/engine/postgres/physical/physical.go +++ b/engine/internal/retrieval/engine/postgres/physical/physical.go @@ -176,7 +176,7 @@ func (r *RestoreJob) Run(ctx context.Context) (err error) { if err == nil && r.CopyOptions.Sync.Enabled { go func() { if syncErr := r.runSyncInstance(ctx); syncErr != nil { - log.Err("Failed to run sync instance: ", syncErr) + log.Err("failed to run sync instance: ", syncErr) if ctx.Err() != nil { // if context was canceled @@ -249,7 +249,7 @@ func (r *RestoreJob) Run(ctx context.Context) (err error) { log.Msg("Restoring job has been finished") if err := r.markDatabaseData(); err != nil { - log.Err("Failed to mark database data: ", err) + log.Err("failed to mark database data: ", err) } cfgManager, err := pgconfig.NewCorrector(dataDir) @@ -350,7 +350,7 @@ func (r *RestoreJob) runSyncInstance(ctx context.Context) (err error) { Value: fmt.Sprintf("%s=%s", cont.DBLabControlLabel, cont.DBLabSyncLabel)}) if err := diagnostic.CollectDiagnostics(ctx, r.dockerClient, filterArgs, r.syncInstanceName(), r.fsPool.DataDir()); err != nil { - log.Err("Failed to collect container diagnostics", err) + log.Err("failed to collect container diagnostics", err) } } }() diff --git a/engine/internal/retrieval/engine/postgres/physical/wal_g.go b/engine/internal/retrieval/engine/postgres/physical/wal_g.go index cdb934b8..0abb2b36 100644 --- a/engine/internal/retrieval/engine/postgres/physical/wal_g.go +++ b/engine/internal/retrieval/engine/postgres/physical/wal_g.go @@ -106,7 +106,7 @@ func getLastBackupName(ctx context.Context, dockerClient *client.Client, contain } // fallback to fetching last backup from list - log.Err("Failed to parse last backup from wal-g details", err) + log.Err("failed to parse last backup from wal-g details", err) } return parseLastBackupFromList(ctx, dockerClient, containerID) diff --git a/engine/internal/retrieval/engine/postgres/snapshot/logical.go b/engine/internal/retrieval/engine/postgres/snapshot/logical.go index c596d6d5..bc555e4a 100644 --- a/engine/internal/retrieval/engine/postgres/snapshot/logical.go +++ b/engine/internal/retrieval/engine/postgres/snapshot/logical.go @@ -149,6 +149,12 @@ func (s *LogicalInitial) Run(ctx context.Context) error { } } + log.Dbg("Cleaning up old snapshots from a dataset") + + if _, err := s.cloneManager.CleanupSnapshots(0); err != nil { + return errors.Wrap(err, "failed to destroy old snapshots") + } + dataStateAt := extractDataStateAt(s.dbMarker) if _, err := s.cloneManager.CreateSnapshot("", dataStateAt); err != nil { @@ -240,7 +246,7 @@ func (s *LogicalInitial) runPreprocessingQueries(ctx context.Context, dataDir st Value: fmt.Sprintf("%s=%s", cont.DBLabControlLabel, cont.DBLabPatchLabel)}) if err := diagnostic.CollectDiagnostics(ctx, s.dockerClient, filterArgs, s.patchContainerName(), dataDir); err != nil { - log.Err("Failed to collect container diagnostics", err) + log.Err("failed to collect container diagnostics", err) } } }() diff --git a/engine/internal/retrieval/engine/postgres/snapshot/physical.go b/engine/internal/retrieval/engine/postgres/snapshot/physical.go index 3a089a9a..f49b9d8d 100644 --- a/engine/internal/retrieval/engine/postgres/snapshot/physical.go +++ b/engine/internal/retrieval/engine/postgres/snapshot/physical.go @@ -32,6 +32,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/provision/databases/postgres/pgconfig" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/pool" "gitlab.com/postgres-ai/database-lab/v3/internal/provision/resources" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones" "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/config" "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/dbmarker" "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/engine/postgres/tools" @@ -47,6 +48,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/pkg/config/global" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/util" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" ) const ( @@ -302,6 +304,8 @@ func (p *PhysicalInitial) Run(ctx context.Context) (err error) { } func (p *PhysicalInitial) run(ctx context.Context) (err error) { + log.Msg("Run job: ", p.Name()) + select { case <-ctx.Done(): if p.scheduler != nil { @@ -346,25 +350,25 @@ func (p *PhysicalInitial) run(ctx context.Context) (err error) { defer func() { if err != nil { - if errDestroy := p.cloneManager.DestroySnapshot(snapshotName); errDestroy != nil { - log.Err(fmt.Sprintf("Failed to destroy the %q snapshot: %v", snapshotName, errDestroy)) + if errDestroy := p.cloneManager.DestroySnapshot(snapshotName, thinclones.DestroyOptions{}); errDestroy != nil { + log.Err(fmt.Sprintf("failed to destroy %q snapshot: %v", snapshotName, errDestroy)) } } }() - if err := p.cloneManager.CreateClone(cloneName, snapshotName); err != nil { + if err := p.cloneManager.CreateClone(branching.DefaultBranch, cloneName, snapshotName, branching.DefaultRevision); err != nil { return errors.Wrapf(err, "failed to create \"pre\" clone %s", cloneName) } - cloneDataDir := path.Join(p.fsPool.ClonesDir(), cloneName, p.fsPool.DataSubDir) + cloneDataDir := path.Join(p.fsPool.CloneLocation(branching.DefaultBranch, cloneName, branching.DefaultRevision), p.fsPool.DataSubDir) if err := fs.CleanupLogsDir(cloneDataDir); err != nil { log.Warn("Failed to clean up logs directory:", err.Error()) } defer func() { if err != nil { - if errDestroy := p.cloneManager.DestroyClone(cloneName); errDestroy != nil { - log.Err(fmt.Sprintf("Failed to destroy clone %q: %v", cloneName, errDestroy)) + if errDestroy := p.cloneManager.DestroyClone(branching.DefaultBranch, cloneName, branching.DefaultRevision); errDestroy != nil { + log.Err(fmt.Sprintf("failed to destroy clone %q: %v", cloneName, errDestroy)) } } }() @@ -389,8 +393,9 @@ func (p *PhysicalInitial) run(ctx context.Context) (err error) { } // Create a snapshot. - if _, err := p.cloneManager.CreateSnapshot(cloneName, p.dbMark.DataStateAt); err != nil { - return errors.Wrap(err, "failed to create a snapshot") + fullClonePath := path.Join(branching.BranchDir, branching.DefaultBranch, cloneName, branching.RevisionSegment(branching.DefaultRevision)) + if _, err := p.cloneManager.CreateSnapshot(fullClonePath, p.dbMark.DataStateAt); err != nil { + return errors.Wrap(err, "failed to create snapshot") } p.updateDataStateAt() @@ -621,7 +626,7 @@ func (p *PhysicalInitial) promoteInstance(ctx context.Context, clonePath string, Value: fmt.Sprintf("%s=%s", cont.DBLabControlLabel, cont.DBLabPromoteLabel)}) if err := diagnostic.CollectDiagnostics(ctx, p.dockerClient, filterArgs, p.promoteContainerName(), clonePath); err != nil { - log.Err("Failed to collect container diagnostics", err) + log.Err("failed to collect container diagnostics", err) } } }() @@ -1102,7 +1107,7 @@ func (p *PhysicalInitial) markDatabaseData() error { func (p *PhysicalInitial) updateDataStateAt() { dsaTime, err := time.Parse(util.DataStateAtFormat, p.dbMark.DataStateAt) if err != nil { - log.Err("Invalid value for DataStateAt: ", p.dbMark.DataStateAt) + log.Err("invalid value for DataStateAt: ", p.dbMark.DataStateAt) return } diff --git a/engine/internal/retrieval/engine/postgres/tools/query/preprocessor.go b/engine/internal/retrieval/engine/postgres/tools/query/preprocessor.go index 2e09da6e..00d48552 100644 --- a/engine/internal/retrieval/engine/postgres/tools/query/preprocessor.go +++ b/engine/internal/retrieval/engine/postgres/tools/query/preprocessor.go @@ -138,7 +138,7 @@ func (q *Processor) runParallel(ctx context.Context, containerID, parallelDir st errCh <- err cancel() - log.Err("Preprocessing query: ", err) + log.Err("preprocessing query: ", err) return } diff --git a/engine/internal/retrieval/engine/postgres/tools/tools.go b/engine/internal/retrieval/engine/postgres/tools/tools.go index 4a0bbe2d..1fe2cefe 100644 --- a/engine/internal/retrieval/engine/postgres/tools/tools.go +++ b/engine/internal/retrieval/engine/postgres/tools/tools.go @@ -15,6 +15,7 @@ import ( "os" "os/exec" "path" + "path/filepath" "strconv" "strings" "time" @@ -95,6 +96,24 @@ func IsEmptyDirectory(dir string) (bool, error) { return len(names) == 0, nil } +// CleanupDir removes content of the directory. +func CleanupDir(dir string) error { + entries, err := os.ReadDir(dir) + if err != nil { + return fmt.Errorf("failed to read directory %s: %w", dir, err) + } + + for _, entry := range entries { + entryName := filepath.Join(dir, entry.Name()) + + if err := os.RemoveAll(entryName); err != nil { + return fmt.Errorf("failed to remove %s: %w", entryName, err) + } + } + + return nil +} + // TouchFile creates an empty file. func TouchFile(filename string) error { file, err := os.Create(filename) @@ -445,7 +464,7 @@ func StopContainer(ctx context.Context, dockerClient *client.Client, containerID log.Msg(fmt.Sprintf("Stopping container ID: %v", containerID)) if err := dockerClient.ContainerStop(ctx, containerID, container.StopOptions{Timeout: pointer.ToInt(stopTimeout)}); err != nil { - log.Err("Failed to stop container: ", err) + log.Err("failed to stop container: ", err) } log.Msg(fmt.Sprintf("Container %q has been stopped", containerID)) @@ -456,7 +475,7 @@ func RemoveContainer(ctx context.Context, dockerClient *client.Client, container log.Msg(fmt.Sprintf("Removing container ID: %v", containerID)) if err := dockerClient.ContainerStop(ctx, containerID, container.StopOptions{Timeout: pointer.ToInt(stopTimeout)}); err != nil { - log.Err("Failed to stop container: ", err) + log.Err("failed to stop container: ", err) } log.Msg(fmt.Sprintf("Container %q has been stopped", containerID)) @@ -465,7 +484,7 @@ func RemoveContainer(ctx context.Context, dockerClient *client.Client, container RemoveVolumes: true, Force: true, }); err != nil { - log.Err("Failed to remove container: ", err) + log.Err("failed to remove container: ", err) return } @@ -495,7 +514,7 @@ func PullImage(ctx context.Context, dockerClient *client.Client, image string) e defer func() { _ = pullOutput.Close() }() if err := jsonmessage.DisplayJSONMessagesToStream(pullOutput, streams.NewOut(os.Stdout), nil); err != nil { - log.Err("Failed to render pull image output: ", err) + log.Err("failed to render pull image output: ", err) } return nil @@ -667,7 +686,7 @@ func CopyContainerLogs(ctx context.Context, docker *client.Client, containerName defer func() { err := reader.Close() if err != nil { - log.Err("Failed to close container output reader", err) + log.Err("failed to close container output reader", err) } }() @@ -679,7 +698,7 @@ func CopyContainerLogs(ctx context.Context, docker *client.Client, containerName defer func() { err := writeFile.Close() if err != nil { - log.Err("Failed to close container output file", err) + log.Err("failed to close container output file", err) } }() diff --git a/engine/internal/retrieval/retrieval.go b/engine/internal/retrieval/retrieval.go index 78b1f8fa..6f3176a1 100644 --- a/engine/internal/retrieval/retrieval.go +++ b/engine/internal/retrieval/retrieval.go @@ -10,7 +10,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "time" "github.com/docker/docker/api/types" @@ -51,6 +50,8 @@ const ( pendingFilename = "pending.retrieval" ) +var errNoJobs = errors.New("no jobs to snapshot pool data") + type jobGroup string // Retrieval describes a data retrieval. @@ -75,6 +76,12 @@ type Scheduler struct { Spec cron.Schedule } +var ( + ErrRefreshInProgress = errors.New("The data refresh/snapshot is currently in progress. Skip a new data refresh iteration") + ErrRefreshPending = errors.New("Data retrieving suspended because Retrieval state is pending") + ErrNoAvailablePool = errors.New("Pool to perform full refresh not found. Skip refreshing") +) + // New creates a new data retrieval. func New(cfg *dblabCfg.Config, engineProps *global.EngineProps, docker *client.Client, pm *pool.Manager, tm *telemetry.Agent, runner runners.Runner) (*Retrieval, error) { @@ -174,7 +181,7 @@ func (r *Retrieval) reloadStatefulJobs() { // todo should we remove if jobs are not there ? // todo should we check for completion before ? if err := job.Reload(cfg.Options); err != nil { - log.Err("Failed to reload configuration of the retrieval job", job.Name(), err) + log.Err("failed to reload configuration of retrieval job", job.Name(), err) } } } @@ -190,13 +197,6 @@ func (r *Retrieval) Run(ctx context.Context) error { return fmt.Errorf("failed to collect content lists from the foundation Docker image of the logicalDump job: %w", err) } - if r.cfg.Refresh != nil && r.cfg.Refresh.SkipStartRefresh { - log.Msg("Continue without performing initial data refresh because the `skipStartRefresh` option is enabled") - r.setupScheduler(ctx) - - return nil - } - fsManager, err := r.getNextPoolToDataRetrieving() if err != nil { var skipError *SkipRefreshingError @@ -350,7 +350,7 @@ func (r *Retrieval) run(ctx context.Context, fsm pool.FSManager) (err error) { r.State.cleanAlerts() } - if err := r.SnapshotData(ctx, poolName); err != nil { + if err := r.SnapshotData(ctx, poolName); err != nil && err != errNoJobs { return err } @@ -359,6 +359,10 @@ func (r *Retrieval) run(ctx context.Context, fsm pool.FSManager) (err error) { r.State.cleanAlerts() } + if err := fsm.InitBranching(); err != nil { + return fmt.Errorf("failed to init branching: %w", err) + } + return nil } @@ -406,12 +410,6 @@ func (r *Retrieval) RefreshData(ctx context.Context, poolName string) error { r.State.CurrentJob = nil }() - if r.State.Mode == models.Logical { - if err := preparePoolToRefresh(fsm, r.runner); err != nil { - return fmt.Errorf("failed to prepare pool for initial refresh: %w", err) - } - } - for _, j := range jobs { r.State.CurrentJob = j @@ -446,8 +444,8 @@ func (r *Retrieval) SnapshotData(ctx context.Context, poolName string) error { } if len(jobs) == 0 { - log.Dbg("no jobs to snapshot pool data:", fsm.Pool()) - return nil + log.Dbg(errNoJobs, fsm.Pool()) + return errNoJobs } log.Dbg("Taking a snapshot on the pool: ", fsm.Pool()) @@ -549,7 +547,7 @@ func (r *Retrieval) defineRetrievalMode() { func (r *Retrieval) setupScheduler(ctx context.Context) { r.stopScheduler() - if r.cfg.Refresh == nil || r.cfg.Refresh.Timetable == "" { + if r.cfg.Refresh.Timetable == "" { return } @@ -580,20 +578,20 @@ func (r *Retrieval) refreshFunc(ctx context.Context) func() { // FullRefresh performs full refresh for an unused storage pool and makes it active. func (r *Retrieval) FullRefresh(ctx context.Context) error { - if r.State.Status == models.Refreshing || r.State.Status == models.Snapshotting { - alert := telemetry.Alert{ - Level: models.RefreshSkipped, - Message: "The data refresh/snapshot is currently in progress. Skip a new data refresh iteration", - } - r.State.addAlert(alert) - r.tm.SendEvent(ctx, telemetry.AlertEvent, alert) - log.Msg(alert.Message) - - return nil - } + if err := r.CanStartRefresh(); err != nil { + switch { + case errors.Is(err, ErrRefreshInProgress): + alert := telemetry.Alert{ + Level: models.RefreshSkipped, + Message: err.Error(), + } + r.State.addAlert(alert) + r.tm.SendEvent(ctx, telemetry.AlertEvent, alert) + log.Msg(alert.Message) - if r.State.Status == models.Pending { - log.Msg("Data retrieving suspended because Retrieval state is pending") + case errors.Is(err, ErrRefreshPending): + log.Msg(err.Error()) + } return nil } @@ -605,31 +603,32 @@ func (r *Retrieval) FullRefresh(ctx context.Context) error { runCtx, cancel := context.WithCancel(ctx) r.ctxCancel = cancel - elementToUpdate := r.poolManager.GetPoolToUpdate() - if elementToUpdate == nil || elementToUpdate.Value == nil { + if err := r.HasAvailablePool(); err != nil { alert := telemetry.Alert{ Level: models.RefreshSkipped, - Message: "Pool to perform full refresh not found. Skip refreshing", + Message: err.Error(), } r.State.addAlert(alert) r.tm.SendEvent(ctx, telemetry.AlertEvent, alert) - log.Msg(alert.Message + ". Hint: Check that there is at least one pool that does not have clones running. " + + log.Msg(err.Error() + ". Hint: Check that there is at least one pool that does not have clones running. " + "Refresh can be performed only to a pool without clones.") return nil } + elementToUpdate := r.poolManager.GetPoolToUpdate() + poolToUpdate, err := r.poolManager.GetFSManager(elementToUpdate.Value.(string)) if err != nil { return errors.Wrap(err, "failed to get FSManager") } - log.Msg("Pool to a full refresh: ", poolToUpdate.Pool()) + log.Msg("Pool selected to perform full refresh: ", poolToUpdate.Pool()) // Stop service containers: sync-instance, etc. if cleanUpErr := cont.CleanUpControlContainers(runCtx, r.docker, r.engineProps.InstanceID); cleanUpErr != nil { - log.Err("Failed to clean up service containers:", cleanUpErr) + log.Err("failed to clean up service containers:", cleanUpErr) return cleanUpErr } @@ -656,55 +655,11 @@ func (r *Retrieval) stopScheduler() { } } -func preparePoolToRefresh(poolToUpdate pool.FSManager, runner runners.Runner) error { - cloneList, err := poolToUpdate.ListClonesNames() - if err != nil { - return errors.Wrap(err, "failed to check running clones") - } - - if len(cloneList) > 0 { - return errors.Errorf("there are active clones in the requested pool: %s\nDestroy them to perform a full refresh", - strings.Join(cloneList, " ")) - } - - if _, err := runner.Run(fmt.Sprintf("rm -rf %s %s", - filepath.Join(poolToUpdate.Pool().DataDir(), "*"), - filepath.Join(poolToUpdate.Pool().DataDir(), dbmarker.ConfigDir))); err != nil { - return errors.Wrap(err, "failed to clean unix socket directory") - } - - poolToUpdate.RefreshSnapshotList() - - snapshots := poolToUpdate.SnapshotList() - if len(snapshots) == 0 { - log.Msg(fmt.Sprintf("no snapshots for pool %s", poolToUpdate.Pool().Name)) - return nil - } - - log.Msg("Preparing pool for full data refresh; existing snapshots are to be destroyed") - - for _, snapshotEntry := range snapshots { - log.Msg("Destroying snapshot:", snapshotEntry.ID) - - if err := poolToUpdate.DestroySnapshot(snapshotEntry.ID); err != nil { - return errors.Wrap(err, "failed to destroy the existing snapshot") - } - } - - return nil -} - // ReportState collects the current restore state. func (r *Retrieval) ReportState() telemetry.Restore { - var refreshingTimetable string - - if r.cfg.Refresh != nil { - refreshingTimetable = r.cfg.Refresh.Timetable - } - return telemetry.Restore{ Mode: r.State.Mode, - Refreshing: refreshingTimetable, + Refreshing: r.cfg.Refresh.Timetable, Jobs: r.cfg.Jobs, } } @@ -827,3 +782,24 @@ func (r *Retrieval) reportContainerSyncStatus(ctx context.Context, containerID s return value, nil } + +func (r *Retrieval) CanStartRefresh() error { + if r.State.Status == models.Refreshing || r.State.Status == models.Snapshotting { + return ErrRefreshInProgress + } + + if r.State.Status == models.Pending { + return ErrRefreshPending + } + + return nil +} + +func (r *Retrieval) HasAvailablePool() error { + element := r.poolManager.GetPoolToUpdate() + if element == nil || element.Value == nil { + return ErrNoAvailablePool + } + + return nil +} diff --git a/engine/internal/retrieval/validator.go b/engine/internal/retrieval/validator.go index 0c62935e..2371a884 100644 --- a/engine/internal/retrieval/validator.go +++ b/engine/internal/retrieval/validator.go @@ -70,7 +70,7 @@ func validateStructure(r *config.Config) error { } func validateRefreshTimetable(r *config.Config) error { - if r.Refresh == nil || r.Refresh.Timetable == "" { + if r.Refresh.Timetable == "" { return nil } diff --git a/engine/internal/runci/handlers.go b/engine/internal/runci/handlers.go index 120b795a..35236a49 100644 --- a/engine/internal/runci/handlers.go +++ b/engine/internal/runci/handlers.go @@ -30,7 +30,6 @@ import ( dblab_types "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" - "gitlab.com/postgres-ai/database-lab/v3/pkg/util" "gitlab.com/postgres-ai/database-lab/v3/version" ) @@ -266,7 +265,7 @@ func (s *Server) runCommands(ctx context.Context, clone *models.Clone, runID str func (s *Server) buildContainerConfig(clone *models.Clone, migrationEnvs []string) *container.Config { host := clone.DB.Host if host == s.dle.URL("").Hostname() || host == "127.0.0.1" || host == "localhost" { - host = util.GetCloneNameStr(clone.DB.Port) + host = clone.ID } return &container.Config{ diff --git a/engine/internal/srv/branch.go b/engine/internal/srv/branch.go new file mode 100644 index 00000000..6556ae1e --- /dev/null +++ b/engine/internal/srv/branch.go @@ -0,0 +1,676 @@ +package srv + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strings" + "time" + + "github.com/gorilla/mux" + + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/pool" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/resources" + "gitlab.com/postgres-ai/database-lab/v3/internal/srv/api" + "gitlab.com/postgres-ai/database-lab/v3/internal/telemetry" + "gitlab.com/postgres-ai/database-lab/v3/internal/webhooks" + "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" + "gitlab.com/postgres-ai/database-lab/v3/pkg/log" + "gitlab.com/postgres-ai/database-lab/v3/pkg/models" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" +) + +var branchNameRegexp = regexp.MustCompile(`^[\p{L}\d_-]+$`) + +// listBranches returns branch list. +func (s *Server) listBranches(w http.ResponseWriter, r *http.Request) { + fsm := s.pm.First() + + if fsm == nil { + api.SendBadRequestError(w, r, "no available pools") + return + } + + branches, err := s.getAllAvailableBranches(fsm) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + repo, err := fsm.GetAllRepo() + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + branchDetails := make([]models.BranchView, 0, len(branches)) + + // branchRegistry is used to display the "main" branch with only the most recent snapshot. + branchRegistry := make(map[string]int, 0) + + for _, branchEntity := range branches { + snapshotDetails, ok := repo.Snapshots[branchEntity.SnapshotID] + if !ok { + continue + } + + numSnapshots, parentSnapshot := findBranchParent(repo.Snapshots, snapshotDetails.ID, branchEntity.Name) + + branchView := models.BranchView{ + Name: branchEntity.Name, + Parent: parentSnapshot, + DataStateAt: snapshotDetails.DataStateAt, + SnapshotID: snapshotDetails.ID, + Dataset: snapshotDetails.Dataset, + NumSnapshots: numSnapshots, + } + + if position, ok := branchRegistry[branchEntity.Name]; ok { + if branchView.DataStateAt > branchDetails[position].DataStateAt { + branchDetails[position] = branchView + } + + continue + } + + branchRegistry[branchView.Name] = len(branchDetails) + branchDetails = append(branchDetails, branchView) + } + + if err := api.WriteJSON(w, http.StatusOK, branchDetails); err != nil { + api.SendError(w, r, err) + return + } +} + +func (s *Server) getAllAvailableBranches(fsm pool.FSManager) ([]models.BranchEntity, error) { + if fsm == nil { + return nil, fmt.Errorf("no available pools") + } + + // Filter by available pools in case if two or more DLE is running on the same pool and use the selectedPool feature. + poolNames := []string{} + + for _, fsManager := range s.pm.GetFSManagerList() { + poolNames = append(poolNames, fsManager.Pool().Name) + } + + return fsm.ListAllBranches(poolNames) +} + +func findBranchParent(snapshots map[string]models.SnapshotDetails, parentID, branch string) (int, string) { + snapshotCounter := 0 + + for i := len(snapshots); i > 0; i-- { + snapshotPointer := snapshots[parentID] + snapshotCounter++ + + if containsString(snapshotPointer.Root, branch) { + if len(snapshotPointer.Branch) > 0 { + return snapshotCounter, snapshotPointer.Branch[0] + } + + break + } + + if snapshotPointer.Parent == "-" { + break + } + + parentID = snapshotPointer.Parent + } + + return snapshotCounter, "-" +} + +func containsString(slice []string, s string) bool { + for _, str := range slice { + if str == s { + return true + } + } + + return false +} + +func (s *Server) getFSManagerForBranch(branchName string) (pool.FSManager, error) { + allBranches, err := s.getAllAvailableBranches(s.pm.First()) + if err != nil { + return nil, fmt.Errorf("failed to get branch list: %w", err) + } + + for _, branchEntity := range allBranches { + if branchEntity.Name == branchName { // TODO: filter by pool name as well because branch name is ambiguous. + return s.getFSManagerForSnapshot(branchEntity.SnapshotID) + } + } + + return nil, fmt.Errorf("failed to found dataset of the branch: %s", branchName) +} + +func (s *Server) createBranch(w http.ResponseWriter, r *http.Request) { + var createRequest types.BranchCreateRequest + if err := api.ReadJSON(r, &createRequest); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if createRequest.BranchName == "" { + api.SendBadRequestError(w, r, "The branch name must not be empty") + return + } + + if createRequest.BranchName == createRequest.BaseBranch { + api.SendBadRequestError(w, r, "new and base branches must have different names") + return + } + + if !isValidBranchName(createRequest.BranchName) { + api.SendBadRequestError(w, r, "The branch name must contain only Unicode characters, numbers, underscores, and hyphens. "+ + "Spaces and slashes are not allowed") + return + } + + var err error + + fsm := s.pm.First() + + if createRequest.BaseBranch != "" { + fsm, err = s.getFSManagerForBranch(createRequest.BaseBranch) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + } + + if fsm == nil { + api.SendBadRequestError(w, r, "no pool manager found") + return + } + + branches, err := fsm.ListBranches() + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if _, ok := branches[createRequest.BranchName]; ok { + api.SendBadRequestError(w, r, fmt.Sprintf("branch '%s' already exists", createRequest.BranchName)) + return + } + + snapshotID := createRequest.SnapshotID + + if snapshotID == "" { + if createRequest.BaseBranch == "" { + api.SendBadRequestError(w, r, "either base branch name or base snapshot ID must be specified") + return + } + + branchPointer, ok := branches[createRequest.BaseBranch] + if !ok { + api.SendBadRequestError(w, r, "base branch not found") + return + } + + snapshotID = branchPointer + } + + poolName, err := s.detectPoolName(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + brName := fsm.Pool().BranchName(poolName, createRequest.BranchName) + dataStateAt := time.Now().Format(util.DataStateAtFormat) + + if err := fsm.CreateBranch(brName, snapshotID); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + branchSnapshot := fmt.Sprintf("%s@%s", brName, dataStateAt) + + if err := fsm.Snapshot(branchSnapshot); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.AddBranchProp(createRequest.BranchName, branchSnapshot); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.SetRoot(createRequest.BranchName, snapshotID); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.SetRelation(snapshotID, branchSnapshot); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.SetDSA(dataStateAt, branchSnapshot); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + fsm.RefreshSnapshotList() + + branch := models.Branch{Name: createRequest.BranchName} + + s.webhookCh <- webhooks.BasicEvent{ + EventType: webhooks.BranchCreateEvent, + EntityID: branch.Name, + } + + s.tm.SendEvent(context.Background(), telemetry.BranchCreatedEvent, telemetry.BranchCreated{ + Name: branch.Name, + }) + + if err := api.WriteJSON(w, http.StatusOK, branch); err != nil { + api.SendError(w, r, err) + return + } +} + +func isValidBranchName(branchName string) bool { + return branchNameRegexp.MatchString(branchName) +} + +func (s *Server) getSnapshot(w http.ResponseWriter, r *http.Request) { + snapshotID := mux.Vars(r)["id"] + + if snapshotID == "" { + api.SendBadRequestError(w, r, "snapshotID must not be empty") + return + } + + snapshot, err := s.Cloning.GetSnapshotByID(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := api.WriteJSON(w, http.StatusOK, snapshot); err != nil { + api.SendError(w, r, err) + return + } +} + +func (s *Server) getCommit(w http.ResponseWriter, r *http.Request) { + snapshotID := mux.Vars(r)["id"] + + if snapshotID == "" { + api.SendBadRequestError(w, r, "snapshotID must not be empty") + return + } + + fsm, err := s.getFSManagerForSnapshot(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + repo, err := fsm.GetRepo() + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + snapshotPointer, ok := repo.Snapshots[snapshotID] + + if !ok { + api.SendNotFoundError(w, r) + return + } + + if err := api.WriteJSON(w, http.StatusOK, snapshotPointer); err != nil { + api.SendError(w, r, err) + return + } +} + +func (s *Server) getFSManagerForSnapshot(snapshotID string) (pool.FSManager, error) { + poolName, err := s.detectPoolName(snapshotID) + if err != nil { + return nil, fmt.Errorf("failed to detect pool name for the snapshot %s: %w", snapshotID, err) + } + + fsm, err := s.pm.GetFSManager(poolName) + if err != nil { + return nil, fmt.Errorf("pool manager not available %s: %w", poolName, err) + } + + return fsm, nil +} + +func (s *Server) snapshot(w http.ResponseWriter, r *http.Request) { + var snapshotRequest types.SnapshotCloneCreateRequest + if err := api.ReadJSON(r, &snapshotRequest); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + clone, err := s.Cloning.GetClone(snapshotRequest.CloneID) + if err != nil { + api.SendBadRequestError(w, r, "clone not found") + return + } + + if clone.Branch == "" { + api.SendBadRequestError(w, r, "clone was not created on branch") + return + } + + fsm, err := s.pm.GetFSManager(clone.Snapshot.Pool) + + if err != nil { + api.SendBadRequestError(w, r, fmt.Sprintf("pool %q not found", clone.Snapshot.Pool)) + return + } + + branches, err := fsm.ListBranches() + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + currentSnapshotID, ok := branches[clone.Branch] + if !ok { + api.SendBadRequestError(w, r, "branch not found: "+clone.Branch) + return + } + + log.Dbg("Current snapshot ID", currentSnapshotID) + + dataStateAt := time.Now().Format(util.DataStateAtFormat) + snapshotBase := fsm.Pool().CloneName(clone.Branch, clone.ID, clone.Revision) + snapshotName := fmt.Sprintf("%s@%s", snapshotBase, dataStateAt) + + if err := fsm.Snapshot(snapshotName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.SetDSA(dataStateAt, snapshotName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.AddBranchProp(clone.Branch, snapshotName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.DeleteBranchProp(clone.Branch, currentSnapshotID); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.SetRelation(currentSnapshotID, snapshotName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.SetDSA(dataStateAt, snapshotName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := fsm.SetMessage(snapshotRequest.Message, snapshotName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + fsm.RefreshSnapshotList() + + if err := s.Cloning.ReloadSnapshots(); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + snapshot, err := s.Cloning.GetSnapshotByID(snapshotName) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := s.Cloning.UpdateCloneSnapshot(clone.ID, snapshot); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + s.tm.SendEvent(context.Background(), telemetry.SnapshotCreatedEvent, telemetry.SnapshotCreated{}) + + if err := api.WriteJSON(w, http.StatusOK, types.SnapshotResponse{SnapshotID: snapshotName}); err != nil { + api.SendError(w, r, err) + return + } +} + +func filterSnapshotsByBranch(pool *resources.Pool, branch string, snapshots []models.Snapshot) []models.Snapshot { + filtered := make([]models.Snapshot, 0) + + branchName := pool.BranchName(pool.Name, branch) + + for _, sn := range snapshots { + dataset, _, found := strings.Cut(sn.ID, "@") + if !found { + continue + } + + if strings.HasPrefix(dataset, branchName) || (branch == branching.DefaultBranch && pool.Name == dataset) { + filtered = append(filtered, sn) + } + } + + return filtered +} + +func (s *Server) log(w http.ResponseWriter, r *http.Request) { + branchName := mux.Vars(r)["branchName"] + + fsm, err := s.getFSManagerForBranch(branchName) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if fsm == nil { + api.SendBadRequestError(w, r, "no pool manager found") + return + } + + repo, err := fsm.GetRepo() + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + snapshotID, ok := repo.Branches[branchName] + if !ok { + api.SendBadRequestError(w, r, "branch not found: "+branchName) + return + } + + snapshotPointer := repo.Snapshots[snapshotID] + + logList := []models.SnapshotDetails{snapshotPointer} + + // Limit the number of iterations to the number of snapshots. + for i := len(repo.Snapshots); i > 1; i-- { + if snapshotPointer.Parent == "-" || snapshotPointer.Parent == "" { + break + } + + snapshotPointer = repo.Snapshots[snapshotPointer.Parent] + logList = append(logList, snapshotPointer) + } + + if err := api.WriteJSON(w, http.StatusOK, logList); err != nil { + api.SendError(w, r, err) + return + } +} + +func (s *Server) deleteBranch(w http.ResponseWriter, r *http.Request) { + branchName := mux.Vars(r)["branchName"] + + fsm, err := s.getFSManagerForBranch(branchName) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if fsm == nil { + api.SendBadRequestError(w, r, "no pool manager found") + return + } + + repo, err := fsm.GetRepo() + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if branchName == branching.DefaultBranch { + api.SendBadRequestError(w, r, fmt.Sprintf("cannot delete default branch: %s", branching.DefaultBranch)) + return + } + + snapshotID, ok := repo.Branches[branchName] + if !ok { + api.SendBadRequestError(w, r, "branch not found: "+branchName) + return + } + + toRemove := snapshotsToRemove(repo, snapshotID, branchName) + + if len(toRemove) > 0 { + // Pre-check. + preCheckList := make(map[string]int) + + for _, snapshotID := range toRemove { + if cloneNum := s.Cloning.GetCloneNumber(snapshotID); cloneNum > 0 { + preCheckList[snapshotID] = cloneNum + } + } + + if len(preCheckList) > 0 { + errMsg := fmt.Sprintf("cannot delete branch %q because", branchName) + + for snapID, cloneNum := range preCheckList { + errMsg += fmt.Sprintf(" snapshot %q contains %d clone(s)", snapID, cloneNum) + } + + log.Warn(errMsg) + api.SendBadRequestError(w, r, errMsg) + + return + } + } + + if err := s.destroyBranchDataset(fsm, branchName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := api.WriteJSON(w, http.StatusOK, models.Response{ + Status: models.ResponseOK, + Message: "Deleted branch", + }); err != nil { + api.SendError(w, r, err) + return + } +} + +func cleanupSnapshotProperties(repo *models.Repo, fsm pool.FSManager, branchName string) error { + for _, snap := range repo.Snapshots { + for _, rootBranch := range snap.Root { + if rootBranch == branchName { + if err := fsm.DeleteRootProp(branchName, snap.ID); err != nil { + return err + } + + if err := fsm.DeleteBranchProp(branchName, snap.ID); err != nil { + return err + } + + for _, child := range snap.Child { + if _, ok := repo.Snapshots[child]; !ok { + if err := fsm.DeleteChildProp(child, snap.ID); err != nil { + return err + } + } + } + + break + } + } + } + + return nil +} + +func snapshotsToRemove(repo *models.Repo, snapshotID, branchName string) []string { + snapshotPointer := repo.Snapshots[snapshotID] + + removingList := []string{} + + for snapshotPointer.Parent != "-" { + if len(snapshotPointer.Root) > 0 { + break + } + + for _, snapshotRoot := range snapshotPointer.Root { + if snapshotRoot == branchName { + break + } + } + + removingList = append(removingList, snapshotPointer.ID) + snapshotPointer = repo.Snapshots[snapshotPointer.Parent] + } + + return removingList +} + +func (s *Server) destroyBranchDataset(fsm pool.FSManager, branchName string) error { + branchDatasetName := fsm.Pool().BranchName(fsm.Pool().Name, branchName) + + if err := fsm.DestroyDataset(branchDatasetName); err != nil { + log.Warn(fmt.Sprintf("failed to remove dataset %q:", branchDatasetName), err) + + return err + } + + // Re-request the repository as the list of snapshots may change significantly. + repo, err := fsm.GetRepo() + if err != nil { + return err + } + + if err := cleanupSnapshotProperties(repo, fsm, branchName); err != nil { + return err + } + + fsm.RefreshSnapshotList() + + s.webhookCh <- webhooks.BasicEvent{ + EventType: webhooks.BranchDeleteEvent, + EntityID: branchName, + } + + s.tm.SendEvent(context.Background(), telemetry.BranchDestroyedEvent, telemetry.BranchDestroyed{ + Name: branchName, + }) + + log.Dbg(fmt.Sprintf("Branch %s has been deleted", branchName)) + + return nil +} diff --git a/engine/internal/srv/branch_test.go b/engine/internal/srv/branch_test.go new file mode 100644 index 00000000..7cccf8ed --- /dev/null +++ b/engine/internal/srv/branch_test.go @@ -0,0 +1,76 @@ +package srv + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/resources" + "gitlab.com/postgres-ai/database-lab/v3/pkg/models" +) + +func TestBranchNames(t *testing.T) { + t.Run("valid branches", func(t *testing.T) { + testCases := []struct { + branchName string + }{ + {branchName: "001-branch"}, + {branchName: "001_branch"}, + {branchName: "001_"}, + {branchName: "_branch"}, + {branchName: "branch"}, + {branchName: "001"}, + {branchName: "tři"}, + } + + for _, tc := range testCases { + require.True(t, isValidBranchName(tc.branchName)) + } + }) + + t.Run("invalid branches", func(t *testing.T) { + testCases := []struct { + branchName string + }{ + {branchName: "001 branch"}, + {branchName: ""}, + {branchName: "branch 001"}, + {branchName: "branch/001"}, + } + + for _, tc := range testCases { + require.False(t, isValidBranchName(tc.branchName)) + } + }) + +} + +func TestSnapshotFiltering(t *testing.T) { + t.Run("filter snapshots", func(t *testing.T) { + pool := &resources.Pool{Name: "pool1/pg14"} + input := []models.Snapshot{ + {ID: "pool1/pg14@snapshot_20240912082141", Pool: "pool1/pg14"}, + {ID: "pool1/pg14@snapshot_20240912082987", Pool: "pool1/pg14"}, + {ID: "pool5/pg14@snapshot_20240912082987", Pool: "pool5/pg14"}, + {ID: "pool1/pg14/branch/main@snapshot_20240912082333", Pool: "pool1/pg14"}, + {ID: "pool1/pg14/branch/dev001@snapshot_20240912082141", Pool: "pool1/pg14"}, + {ID: "pool1/pg14/branch/dev001/20240912082141@20240912082141", Pool: "pool1/pg14"}, + {ID: "pool5/pg14/branch/dev001@snapshot_20240912082141", Pool: "pool5/pg14"}, + {ID: "pool1/pg14/branch/dev002/20240912082141@20240912082141", Pool: "pool1/pg14"}, + } + + outputDev001 := []models.Snapshot{ + {ID: "pool1/pg14/branch/dev001@snapshot_20240912082141", Pool: "pool1/pg14"}, + {ID: "pool1/pg14/branch/dev001/20240912082141@20240912082141", Pool: "pool1/pg14"}, + } + + outputMain := []models.Snapshot{ + {ID: "pool1/pg14@snapshot_20240912082141", Pool: "pool1/pg14"}, + {ID: "pool1/pg14@snapshot_20240912082987", Pool: "pool1/pg14"}, + {ID: "pool1/pg14/branch/main@snapshot_20240912082333", Pool: "pool1/pg14"}, + } + + require.Equal(t, outputDev001, filterSnapshotsByBranch(pool, "dev001", input)) + require.Equal(t, outputMain, filterSnapshotsByBranch(pool, "main", input)) + }) +} diff --git a/engine/internal/srv/config.go b/engine/internal/srv/config.go index a8d34f7b..e10bcbf8 100644 --- a/engine/internal/srv/config.go +++ b/engine/internal/srv/config.go @@ -17,6 +17,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/engine/postgres/logical" "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/engine/postgres/tools/db" "gitlab.com/postgres-ai/database-lab/v3/internal/srv/api" + "gitlab.com/postgres-ai/database-lab/v3/internal/telemetry" "gitlab.com/postgres-ai/database-lab/v3/pkg/config" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" @@ -78,6 +79,8 @@ func (s *Server) setProjectedAdminConfig(w http.ResponseWriter, r *http.Request) return } + s.tm.SendEvent(context.Background(), telemetry.ConfigUpdatedEvent, telemetry.ConfigUpdated{}) + retrievalStatus := s.Retrieval.State.Status if err := s.Retrieval.RemovePendingMarker(); err != nil { @@ -288,7 +291,7 @@ func (s *Server) applyProjectedAdminConfig(ctx context.Context, obj interface{}) err = config.RotateConfig(cfgData) if err != nil { - log.Errf("Failed to backup config: %v", err) + log.Errf("failed to backup config: %v", err) return nil, err } diff --git a/engine/internal/srv/routes.go b/engine/internal/srv/routes.go index b2dab871..046ee184 100644 --- a/engine/internal/srv/routes.go +++ b/engine/internal/srv/routes.go @@ -7,21 +7,27 @@ import ( "net/http" "os" "strconv" + "strings" "time" "github.com/gorilla/mux" "github.com/pkg/errors" "gitlab.com/postgres-ai/database-lab/v3/internal/observer" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/pool" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/runners" + "gitlab.com/postgres-ai/database-lab/v3/internal/provision/thinclones" "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/engine/postgres/tools/activity" "gitlab.com/postgres-ai/database-lab/v3/internal/srv/api" "gitlab.com/postgres-ai/database-lab/v3/internal/telemetry" + "gitlab.com/postgres-ai/database-lab/v3/internal/webhooks" "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" "gitlab.com/postgres-ai/database-lab/v3/pkg/client/platform" "gitlab.com/postgres-ai/database-lab/v3/pkg/config/global" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" "gitlab.com/postgres-ai/database-lab/v3/pkg/util" + "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching" "gitlab.com/postgres-ai/database-lab/v3/version" ) @@ -101,12 +107,365 @@ func (s *Server) getSnapshots(w http.ResponseWriter, r *http.Request) { return } + if branchRequest := r.URL.Query().Get("branch"); branchRequest != "" { + fsm, err := s.getFSManagerForBranch(branchRequest) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if fsm == nil { + api.SendBadRequestError(w, r, "no pool manager found") + return + } + + snapshots = filterSnapshotsByBranch(fsm.Pool(), branchRequest, snapshots) + } + if err = api.WriteJSON(w, http.StatusOK, snapshots); err != nil { api.SendError(w, r, err) return } } +func (s *Server) createSnapshot(w http.ResponseWriter, r *http.Request) { + var poolName string + + if r.Body != http.NoBody { + var createRequest types.SnapshotCreateRequest + if err := api.ReadJSON(r, &createRequest); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + poolName = createRequest.PoolName + } + + if poolName == "" { + firstFSM := s.pm.First() + + if firstFSM == nil || firstFSM.Pool() == nil { + api.SendBadRequestError(w, r, pool.ErrNoPools.Error()) + return + } + + poolName = firstFSM.Pool().Name + } + + if err := s.Retrieval.SnapshotData(context.Background(), poolName); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + fsManager, err := s.pm.GetFSManager(poolName) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + fsManager.RefreshSnapshotList() + + snapshotList := fsManager.SnapshotList() + + if len(snapshotList) == 0 { + api.SendBadRequestError(w, r, "No snapshots at pool: "+poolName) + return + } + + if err := fsManager.InitBranching(); err != nil { + api.SendBadRequestError(w, r, "Cannot verify branch metadata: "+err.Error()) + return + } + + // TODO: set branching metadata. + + latestSnapshot := snapshotList[0] + + s.webhookCh <- webhooks.BasicEvent{ + EventType: webhooks.SnapshotCreateEvent, + EntityID: latestSnapshot.ID, + } + + if err := api.WriteJSON(w, http.StatusOK, latestSnapshot); err != nil { + api.SendError(w, r, err) + return + } +} + +func (s *Server) deleteSnapshot(w http.ResponseWriter, r *http.Request) { + snapshotID := mux.Vars(r)["id"] + if snapshotID == "" { + api.SendBadRequestError(w, r, "snapshot ID must not be empty") + return + } + + forceParam := r.URL.Query().Get("force") + force := false + + if forceParam != "" { + var err error + force, err = strconv.ParseBool(forceParam) + + if err != nil { + api.SendBadRequestError(w, r, "invalid value for `force`, must be boolean") + return + } + } + + poolName, err := s.detectPoolName(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if poolName == "" { + api.SendBadRequestError(w, r, fmt.Sprintf("pool for requested snapshot (%s) not found", snapshotID)) + return + } + + fsm, err := s.pm.GetFSManager(poolName) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + // Prevent deletion of automatic snapshots in the pool. + if fullDataset, _, found := strings.Cut(snapshotID, "@"); found && fullDataset == poolName { + api.SendBadRequestError(w, r, "cannot destroy automatic snapshot in the pool") + return + } + + // Check if snapshot exists. + if _, err := fsm.GetSnapshotProperties(snapshotID); err != nil { + if runnerError, ok := err.(runners.RunnerError); ok { + api.SendBadRequestError(w, r, runnerError.Stderr) + } else { + api.SendBadRequestError(w, r, err.Error()) + } + + return + } + + cloneIDs := []string{} + protectedClones := []string{} + + dependentCloneDatasets, err := fsm.HasDependentEntity(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + for _, cloneDataset := range dependentCloneDatasets { + cloneID, ok := branching.ParseCloneName(cloneDataset, poolName) + if !ok { + log.Dbg(fmt.Sprintf("cannot parse clone ID from %q", cloneDataset)) + continue + } + + clone, err := s.Cloning.GetClone(cloneID) + + if err != nil { + continue + } + + cloneIDs = append(cloneIDs, clone.ID) + + if clone.Protected { + protectedClones = append(protectedClones, clone.ID) + } + } + + if len(protectedClones) != 0 { + api.SendBadRequestError(w, r, fmt.Sprintf("cannot delete snapshot %s because it has dependent protected clones: %s", + snapshotID, strings.Join(protectedClones, ","))) + return + } + + if len(cloneIDs) != 0 && !force { + api.SendBadRequestError(w, r, fmt.Sprintf("cannot delete snapshot %s because it has dependent clones: %s", + snapshotID, strings.Join(cloneIDs, ","))) + return + } + + snapshotProperties, err := fsm.GetSnapshotProperties(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if snapshotProperties.Clones != "" && !force { + api.SendBadRequestError(w, r, fmt.Sprintf("cannot delete snapshot %s because it has dependent datasets: %s", + snapshotID, snapshotProperties.Clones)) + return + } + + // Remove dependent clones. + for _, cloneID := range cloneIDs { + if err = s.Cloning.DestroyCloneSync(cloneID); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + } + + // Remove snapshot and dependent datasets. + if !force { + if err := fsm.KeepRelation(snapshotID); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + } + + if err = fsm.DestroySnapshot(snapshotID, thinclones.DestroyOptions{Force: force}); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + snapshot, err := s.Cloning.GetSnapshotByID(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if snapshotProperties.Clones == "" && snapshot.NumClones == 0 { + // Destroy dataset if there are no related objects + if fullDataset, _, found := strings.Cut(snapshotID, "@"); found && fullDataset != poolName { + if err = fsm.DestroyDataset(fullDataset); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + // Remove dle:branch and dle:root from parent snapshot + if snapshotProperties.Parent != "" { + branchName := snapshotProperties.Branch + if branchName == "" { + branchName, _ = branching.ParseBranchName(fullDataset, poolName) + } + + if branchName != "" { + if err := fsm.DeleteBranchProp(branchName, snapshotProperties.Parent); err != nil { + log.Err(err.Error()) + } + + if err := fsm.DeleteRootProp(branchName, snapshotProperties.Parent); err != nil { + log.Err(err.Error()) + } + } + } + + // TODO: review all available revisions. Destroy base dataset only if there no any revision. + if baseDataset, found := strings.CutSuffix(fullDataset, "/r0"); found { + if err = fsm.DestroyDataset(baseDataset); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + } + } + } + + log.Dbg(fmt.Sprintf("Snapshot %s has been deleted", snapshotID)) + + if err := api.WriteJSON(w, http.StatusOK, models.Response{ + Status: models.ResponseOK, + Message: "Deleted snapshot", + }); err != nil { + api.SendError(w, r, err) + return + } + + fsm.RefreshSnapshotList() + + if err := s.Cloning.ReloadSnapshots(); err != nil { + log.Dbg("Failed to reload snapshots", err.Error()) + } + + s.webhookCh <- webhooks.BasicEvent{ + EventType: webhooks.SnapshotDeleteEvent, + EntityID: snapshotID, + } +} + +func (s *Server) detectPoolName(snapshotID string) (string, error) { + const snapshotParts = 2 + + parts := strings.Split(snapshotID, "@") + if len(parts) != snapshotParts { + return "", fmt.Errorf("invalid snapshot name given: %s. Should contain `dataset@snapname`", snapshotID) + } + + poolName := "" + + for _, fsm := range s.pm.GetFSManagerList() { + if strings.HasPrefix(parts[0], fsm.Pool().Name) { + poolName = fsm.Pool().Name + break + } + } + + return poolName, nil +} + +func (s *Server) createSnapshotClone(w http.ResponseWriter, r *http.Request) { + if r.Body == http.NoBody { + api.SendBadRequestError(w, r, "request body cannot be empty") + return + } + + var createRequest types.SnapshotCloneCreateRequest + if err := api.ReadJSON(r, &createRequest); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if createRequest.CloneID == "" { + api.SendBadRequestError(w, r, "cloneID cannot be empty") + return + } + + clone, err := s.Cloning.GetClone(createRequest.CloneID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + fsm, err := s.pm.GetFSManager(clone.Snapshot.Pool) + if err != nil { + api.SendBadRequestError(w, r, fmt.Sprintf("failed to find filesystem manager: %s", err.Error())) + return + } + + cloneName := clone.ID + + snapshotID, err := fsm.CreateSnapshot(cloneName, time.Now().Format(util.DataStateAtFormat)) + if err != nil { + api.SendBadRequestError(w, r, fmt.Sprintf("failed to create a snapshot: %s", err.Error())) + return + } + + if err := s.Cloning.ReloadSnapshots(); err != nil { + log.Dbg("Failed to reload snapshots", err.Error()) + } + + snapshot, err := s.Cloning.GetSnapshotByID(snapshotID) + if err != nil { + api.SendBadRequestError(w, r, fmt.Sprintf("failed to find a new snapshot: %s", err.Error())) + return + } + + if err := api.WriteJSON(w, http.StatusOK, snapshot); err != nil { + api.SendError(w, r, err) + return + } +} + +func (s *Server) clones(w http.ResponseWriter, r *http.Request) { + cloningState := s.Cloning.GetCloningState() + + if err := api.WriteJSON(w, http.StatusOK, cloningState.Clones); err != nil { + api.SendError(w, r, err) + return + } +} + func (s *Server) createClone(w http.ResponseWriter, r *http.Request) { if s.engProps.GetEdition() == global.StandardEdition { if err := s.engProps.CheckBilling(); err != nil { @@ -126,6 +485,67 @@ func (s *Server) createClone(w http.ResponseWriter, r *http.Request) { return } + if cloneRequest.Snapshot != nil && cloneRequest.Snapshot.ID != "" { + fsm, err := s.getFSManagerForSnapshot(cloneRequest.Snapshot.ID) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if fsm == nil { + api.SendBadRequestError(w, r, "no pool manager found") + return + } + + branch := branching.ParseBranchNameFromSnapshot(cloneRequest.Snapshot.ID, fsm.Pool().Name) + if branch == "" { + branch = branching.DefaultBranch + } + + // Snapshot ID takes precedence over the branch name. + cloneRequest.Branch = branch + } else { + if cloneRequest.Branch == "" { + cloneRequest.Branch = branching.DefaultBranch + } + + fsm, err := s.getFSManagerForBranch(cloneRequest.Branch) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if fsm == nil { + api.SendBadRequestError(w, r, "no pool manager found") + return + } + + branches, err := fsm.ListBranches() + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + snapshotID, ok := branches[cloneRequest.Branch] + if !ok { + api.SendBadRequestError(w, r, "branch not found") + return + } + + cloneRequest.Snapshot = &types.SnapshotCloneFieldRequest{ID: snapshotID} + } + + if cloneRequest.ID != "" { + fsm, err := s.getFSManagerForBranch(cloneRequest.Branch) + if err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + // Check if there is any clone revision under the dataset. + cloneRequest.Revision = findMaxCloneRevision(fsm.Pool().CloneRevisionLocation(cloneRequest.Branch, cloneRequest.ID)) + } + newClone, err := s.Cloning.CreateClone(cloneRequest) if err != nil { var reqErr *models.Error @@ -153,6 +573,39 @@ func (s *Server) createClone(w http.ResponseWriter, r *http.Request) { log.Dbg(fmt.Sprintf("Clone ID=%s is being created", newClone.ID)) } +func findMaxCloneRevision(path string) int { + files, err := os.ReadDir(path) + if err != nil { + log.Err(err) + return 0 + } + + maxIndex := -1 + + for _, file := range files { + if !file.IsDir() { + continue + } + + revisionIndex, ok := strings.CutPrefix(file.Name(), "r") + if !ok { + continue + } + + index, err := strconv.Atoi(revisionIndex) + if err != nil { + log.Err(err) + continue + } + + if index > maxIndex { + maxIndex = index + } + } + + return maxIndex + 1 +} + func (s *Server) destroyClone(w http.ResponseWriter, r *http.Request) { cloneID := mux.Vars(r)["id"] @@ -194,6 +647,11 @@ func (s *Server) patchClone(w http.ResponseWriter, r *http.Request) { return } + s.tm.SendEvent(context.Background(), telemetry.CloneUpdatedEvent, telemetry.CloneUpdated{ + ID: util.HashID(cloneID), + Protected: patchClone.Protected, + }) + if err := api.WriteJSON(w, http.StatusOK, updatedClone); err != nil { api.SendError(w, r, err) return @@ -285,7 +743,7 @@ func (s *Server) startObservation(w http.ResponseWriter, r *http.Request) { return } - s.Observer.AddObservingClone(clone.ID, uint(port), observingClone) + s.Observer.AddObservingClone(clone.ID, clone.Branch, clone.Revision, uint(port), observingClone) // Start session on the Platform. platformRequest := platform.StartObservationRequest{ @@ -343,8 +801,7 @@ func (s *Server) stopObservation(w http.ResponseWriter, r *http.Request) { return } - clone, err := s.Cloning.GetClone(observationRequest.CloneID) - if err != nil { + if _, err := s.Cloning.GetClone(observationRequest.CloneID); err != nil { api.SendNotFoundError(w, r) return } @@ -389,14 +846,14 @@ func (s *Server) stopObservation(w http.ResponseWriter, r *http.Request) { sessionID := strconv.FormatUint(session.SessionID, 10) - logs, err := s.Observer.GetCloneLog(context.TODO(), clone.DB.Port, observingClone) + logs, err := s.Observer.GetCloneLog(context.TODO(), observingClone) if err != nil { - log.Err("Failed to get observation logs", err) + log.Err("failed to get observation logs", err) } if len(logs) > 0 { if err := s.Platform.Client.UploadObservationLogs(context.Background(), logs, sessionID); err != nil { - log.Err("Failed to upload observation logs", err) + log.Err("failed to upload observation logs", err) } } @@ -410,7 +867,7 @@ func (s *Server) stopObservation(w http.ResponseWriter, r *http.Request) { } if err := s.Platform.Client.UploadObservationArtifact(context.Background(), data, sessionID, artifactType); err != nil { - log.Err("Failed to upload observation artifact", err) + log.Err("failed to upload observation artifact", err) } } @@ -493,3 +950,28 @@ func (s *Server) healthCheck(w http.ResponseWriter, _ *http.Request) { return } } + +func (s *Server) refresh(w http.ResponseWriter, r *http.Request) { + if err := s.Retrieval.CanStartRefresh(); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + if err := s.Retrieval.HasAvailablePool(); err != nil { + api.SendBadRequestError(w, r, err.Error()) + return + } + + go func() { + if err := s.Retrieval.FullRefresh(context.Background()); err != nil { + log.Err("failed to initiate full refresh", err) + } + }() + + if err := api.WriteJSON(w, http.StatusOK, models.Response{ + Status: models.ResponseOK, + Message: "Full refresh started", + }); err != nil { + api.SendError(w, r, err) + } +} diff --git a/engine/internal/srv/server.go b/engine/internal/srv/server.go index 04525053..af11b633 100644 --- a/engine/internal/srv/server.go +++ b/engine/internal/srv/server.go @@ -32,6 +32,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/srv/ws" "gitlab.com/postgres-ai/database-lab/v3/internal/telemetry" "gitlab.com/postgres-ai/database-lab/v3/internal/validator" + "gitlab.com/postgres-ai/database-lab/v3/internal/webhooks" "gitlab.com/postgres-ai/database-lab/v3/pkg/config/global" "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" @@ -59,6 +60,7 @@ type Server struct { startedAt *models.LocalTime filtering *log.Filtering reloadFn func(server *Server) error + webhookCh chan webhooks.EventTyper } // WSService defines a service to manage web-sockets. @@ -73,7 +75,8 @@ func NewServer(cfg *srvCfg.Config, globalCfg *global.Config, engineProps *global dockerClient *client.Client, cloning *cloning.Base, provisioner *provision.Provisioner, retrievalSvc *retrieval.Retrieval, platform *platform.Service, billingSvc *billing.Billing, observer *observer.Observer, pm *pool.Manager, tm *telemetry.Agent, tokenKeeper *ws.TokenKeeper, - filtering *log.Filtering, uiManager *embeddedui.UIManager, reloadConfigFn func(server *Server) error) *Server { + filtering *log.Filtering, uiManager *embeddedui.UIManager, reloadConfigFn func(server *Server) error, + webhookCh chan webhooks.EventTyper) *Server { server := &Server{ Config: cfg, Global: globalCfg, @@ -95,6 +98,7 @@ func NewServer(cfg *srvCfg.Config, globalCfg *global.Config, engineProps *global filtering: filtering, startedAt: &models.LocalTime{Time: time.Now().Truncate(time.Second)}, reloadFn: reloadConfigFn, + webhookCh: webhookCh, } return server @@ -193,6 +197,11 @@ func (s *Server) InitHandlers() { r.HandleFunc("/status", authMW.Authorized(s.getInstanceStatus)).Methods(http.MethodGet) r.HandleFunc("/snapshots", authMW.Authorized(s.getSnapshots)).Methods(http.MethodGet) + r.HandleFunc("/snapshot/{id:.*}", authMW.Authorized(s.getSnapshot)).Methods(http.MethodGet) + r.HandleFunc("/snapshot", authMW.Authorized(s.createSnapshot)).Methods(http.MethodPost) + r.HandleFunc("/snapshot/{id:.*}", authMW.Authorized(s.deleteSnapshot)).Methods(http.MethodDelete) + r.HandleFunc("/snapshot/clone", authMW.Authorized(s.createSnapshotClone)).Methods(http.MethodPost) + r.HandleFunc("/clones", authMW.Authorized(s.clones)).Methods(http.MethodGet) r.HandleFunc("/clone", authMW.Authorized(s.createClone)).Methods(http.MethodPost) r.HandleFunc("/clone/{id}", authMW.Authorized(s.destroyClone)).Methods(http.MethodDelete) r.HandleFunc("/clone/{id}", authMW.Authorized(s.patchClone)).Methods(http.MethodPatch) @@ -204,6 +213,13 @@ func (s *Server) InitHandlers() { r.HandleFunc("/observation/download", authMW.Authorized(s.downloadArtifact)).Methods(http.MethodGet) r.HandleFunc("/instance/retrieval", authMW.Authorized(s.retrievalState)).Methods(http.MethodGet) + r.HandleFunc("/branches", authMW.Authorized(s.listBranches)).Methods(http.MethodGet) + r.HandleFunc("/branch/snapshot/{id:.*}", authMW.Authorized(s.getCommit)).Methods(http.MethodGet) + r.HandleFunc("/branch", authMW.Authorized(s.createBranch)).Methods(http.MethodPost) + r.HandleFunc("/branch/snapshot", authMW.Authorized(s.snapshot)).Methods(http.MethodPost) + r.HandleFunc("/branch/{branchName}/log", authMW.Authorized(s.log)).Methods(http.MethodGet) + r.HandleFunc("/branch/{branchName}", authMW.Authorized(s.deleteBranch)).Methods(http.MethodDelete) + // Sub-route /admin adminR := r.PathPrefix("/admin").Subrouter() adminR.Use(authMW.AdminMW) @@ -218,16 +234,19 @@ func (s *Server) InitHandlers() { r.HandleFunc("/instance/logs", authMW.WebSocketsMW(s.wsService.tokenKeeper, s.instanceLogs)) // Health check. - r.HandleFunc("/healthz", s.healthCheck).Methods(http.MethodGet) + r.HandleFunc("/healthz", s.healthCheck).Methods(http.MethodGet, http.MethodPost) + + // Full refresh + r.HandleFunc("/full-refresh", authMW.Authorized(s.refresh)).Methods(http.MethodPost) // Show Swagger UI on index page. if err := attachAPI(r); err != nil { - log.Err("Cannot load API description.") + log.Err("cannot load API description") } // Show Swagger UI on index page. if err := attachSwaggerUI(r); err != nil { - log.Err("Cannot start Swagger UI.") + log.Err("cannot start Swagger UI") } // Show not found error for all other possible routes. @@ -262,7 +281,3 @@ func (s *Server) Uptime() float64 { func reportLaunching(cfg *srvCfg.Config) { log.Msg(fmt.Sprintf("API server started listening on %s:%d.", cfg.Host, cfg.Port)) } - -func (s *Server) initLogRegExp() { - s.filtering.ReloadLogRegExp([]string{s.Config.VerificationToken, s.Platform.AccessToken(), s.Platform.OrgKey()}) -} diff --git a/engine/internal/srv/ws.go b/engine/internal/srv/ws.go index 64f58211..60da6a08 100644 --- a/engine/internal/srv/ws.go +++ b/engine/internal/srv/ws.go @@ -75,7 +75,7 @@ func (s *Server) instanceLogs(w http.ResponseWriter, r *http.Request) { Follow: true, }) if err != nil { - log.Err("Failed to get container logs", err) + log.Err("failed to get container logs", err) if writingErr := conn.WriteMessage(websocket.TextMessage, []byte(err.Error())); writingErr != nil { log.Dbg("Failed to report about error", err) diff --git a/engine/internal/srv/ws_test.go b/engine/internal/srv/ws_test.go index a6fd1132..77e078a8 100644 --- a/engine/internal/srv/ws_test.go +++ b/engine/internal/srv/ws_test.go @@ -21,7 +21,8 @@ func TestLogLineFiltering(t *testing.T) { Platform: pl, filtering: log.GetFilter(), } - s.initLogRegExp() + + s.filtering.ReloadLogRegExp([]string{"secretToken"}) testCases := []struct { input []byte @@ -75,6 +76,10 @@ func TestLogLineFiltering(t *testing.T) { input: []byte(`AWS_ACCESS_KEY_ID:password`), output: []byte(`AWS_********`), }, + { + input: []byte(`secret: "secret_token"`), + output: []byte(`********`), + }, } for _, tc := range testCases { diff --git a/engine/internal/telemetry/events.go b/engine/internal/telemetry/events.go index 76703232..82b6f54c 100644 --- a/engine/internal/telemetry/events.go +++ b/engine/internal/telemetry/events.go @@ -49,11 +49,30 @@ type CloneCreated struct { DSADiff *float64 `json:"dsa_diff,omitempty"` } +// CloneUpdated describes the clone updates. +type CloneUpdated struct { + ID string `json:"id"` + Protected bool `json:"protected"` +} + // CloneDestroyed describes a clone destruction event. type CloneDestroyed struct { ID string `json:"id"` } +// BranchCreated describes a branch creation event. +type BranchCreated struct { + Name string `json:"name"` +} + +// BranchDestroyed describes a branch destruction event. +type BranchDestroyed struct { + Name string `json:"name"` +} + +// ConfigUpdated describes the config updates. +type ConfigUpdated struct{} + // Alert describes alert events. type Alert struct { Level models.AlertType `json:"level"` diff --git a/engine/internal/telemetry/telemetry.go b/engine/internal/telemetry/telemetry.go index 37ceea72..5feeb3fa 100644 --- a/engine/internal/telemetry/telemetry.go +++ b/engine/internal/telemetry/telemetry.go @@ -29,9 +29,20 @@ const ( // CloneDestroyedEvent describes a clone destruction event. CloneDestroyedEvent = "clone_destroyed" + // CloneUpdatedEvent describes a clone update event. + CloneUpdatedEvent = "clone_updated" + // SnapshotCreatedEvent describes a snapshot creation event. SnapshotCreatedEvent = "snapshot_created" + // BranchCreatedEvent describes a branch creation event. + BranchCreatedEvent = "branch_created" + + // BranchDestroyedEvent describes a branch destruction event. + BranchDestroyedEvent = "branch_destroyed" + + ConfigUpdatedEvent = "config_updated" + // AlertEvent describes alert events. AlertEvent = "alert" ) @@ -63,6 +74,6 @@ func (a *Agent) SendEvent(ctx context.Context, eventType string, payload interfa }) if err != nil { - log.Err("Failed to send telemetry event", err) + log.Err("failed to send telemetry event", err) } } diff --git a/engine/internal/validator/validator.go b/engine/internal/validator/validator.go index 87656c2b..f1f0a3b5 100644 --- a/engine/internal/validator/validator.go +++ b/engine/internal/validator/validator.go @@ -6,10 +6,10 @@ package validator import ( + "errors" "fmt" "strings" - "github.com/pkg/errors" passwordvalidator "github.com/wagslane/go-password-validator" "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" @@ -36,7 +36,7 @@ func (v Service) ValidateCloneRequest(cloneRequest *types.CloneCreateRequest) er } if cloneRequest.ID != "" && strings.Contains(cloneRequest.ID, "/") { - return errors.New("Clone ID cannot contain slash ('/'). Please choose another ID") + return errors.New("clone ID cannot contain slash ('/'). Please choose another ID") } if err := passwordvalidator.Validate(cloneRequest.DB.Password, minEntropyBits); err != nil { diff --git a/engine/internal/validator/validator_test.go b/engine/internal/validator/validator_test.go index a510319e..e371035c 100644 --- a/engine/internal/validator/validator_test.go +++ b/engine/internal/validator/validator_test.go @@ -19,7 +19,8 @@ func TestValidationCloneRequest(t *testing.T) { DB: &types.DatabaseRequest{ Username: "username", Password: "secret_password", - }}) + }, + }) assert.Nil(t, err) } @@ -31,7 +32,8 @@ func TestWeakPassword(t *testing.T) { DB: &types.DatabaseRequest{ Username: "username", Password: "password", - }}) + }, + }) assert.ErrorContains(t, err, "insecure password") } @@ -60,7 +62,7 @@ func TestValidationCloneRequestErrors(t *testing.T) { DB: &types.DatabaseRequest{Username: "user", Password: "password"}, ID: "test/ID", }, - error: "Clone ID cannot contain slash ('/'). Please choose another ID", + error: "clone ID cannot contain slash ('/'). Please choose another ID", }, } diff --git a/engine/internal/webhooks/events.go b/engine/internal/webhooks/events.go new file mode 100644 index 00000000..bf5e8f1e --- /dev/null +++ b/engine/internal/webhooks/events.go @@ -0,0 +1,48 @@ +package webhooks + +const ( + // CloneCreatedEvent defines the clone create event type. + CloneCreatedEvent = "clone_create" + // CloneResetEvent defines the clone reset event type. + CloneResetEvent = "clone_reset" + // CloneDeleteEvent defines the clone delete event type. + CloneDeleteEvent = "clone_delete" + + // SnapshotCreateEvent defines the snapshot create event type. + SnapshotCreateEvent = "snapshot_create" + + // SnapshotDeleteEvent defines the snapshot delete event type. + SnapshotDeleteEvent = "snapshot_delete" + + // BranchCreateEvent defines the branch create event type. + BranchCreateEvent = "branch_create" + + // BranchDeleteEvent defines the branch delete event type. + BranchDeleteEvent = "branch_delete" +) + +// EventTyper unifies webhook events. +type EventTyper interface { + GetType() string +} + +// BasicEvent defines payload of basic webhook event. +type BasicEvent struct { + EventType string `json:"event_type"` + EntityID string `json:"entity_id"` +} + +// GetType returns type of the event. +func (e BasicEvent) GetType() string { + return e.EventType +} + +// CloneEvent defines clone webhook events payload. +type CloneEvent struct { + BasicEvent + Host string `json:"host,omitempty"` + Port uint `json:"port,omitempty"` + Username string `json:"username,omitempty"` + DBName string `json:"dbname,omitempty"` + ContainerName string `json:"container_name,omitempty"` +} diff --git a/engine/internal/webhooks/webhooks.go b/engine/internal/webhooks/webhooks.go new file mode 100644 index 00000000..b2c6b4c2 --- /dev/null +++ b/engine/internal/webhooks/webhooks.go @@ -0,0 +1,149 @@ +// Package webhooks configures the webhooks that will be called by the DBLab Engine when an event occurs. +package webhooks + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "gitlab.com/postgres-ai/database-lab/v3/pkg/log" +) + +const ( + // DLEWebhookTokenHeader defines the HTTP header name to send secret with the webhook request. + DLEWebhookTokenHeader = "DBLab-Webhook-Token" +) + +// Config defines webhooks configuration. +type Config struct { + Hooks []Hook `yaml:"hooks"` +} + +// Hook defines structure of the webhook configuration. +type Hook struct { + URL string `yaml:"url"` + Secret string `yaml:"secret"` + Trigger []string `yaml:"trigger"` +} + +// Service listens events and performs webhooks requests. +type Service struct { + client *http.Client + hooksRegistry map[string][]Hook + eventCh <-chan EventTyper +} + +// NewService creates a new Webhook Service. +func NewService(cfg *Config, eventCh <-chan EventTyper) *Service { + whs := &Service{ + client: &http.Client{ + Transport: &http.Transport{}, + }, + hooksRegistry: make(map[string][]Hook), + eventCh: eventCh, + } + + whs.Reload(cfg) + + return whs +} + +// Reload reloads Webhook Service configuration. +func (s *Service) Reload(cfg *Config) { + s.hooksRegistry = make(map[string][]Hook) + + for _, hook := range cfg.Hooks { + if err := validateURL(hook.URL); err != nil { + log.Msg("Skip webhook processing:", err) + continue + } + + for _, event := range hook.Trigger { + s.hooksRegistry[event] = append(s.hooksRegistry[event], hook) + } + } + + log.Dbg("Registered webhooks", s.hooksRegistry) +} + +func validateURL(hookURL string) error { + parsedURL, err := url.ParseRequestURI(hookURL) + if err != nil { + return fmt.Errorf("URL %q is invalid: %w", hookURL, err) + } + + if parsedURL.Scheme == "" { + return fmt.Errorf("no scheme found in %q", hookURL) + } + + if parsedURL.Host == "" { + return fmt.Errorf("no host found in %q", hookURL) + } + + return nil +} + +// Run starts webhook listener. +func (s *Service) Run(ctx context.Context) { + for whEvent := range s.eventCh { + hooks, ok := s.hooksRegistry[whEvent.GetType()] + if !ok { + log.Dbg("Skipped unknown hook: ", whEvent.GetType()) + + continue + } + + log.Dbg("Trigger event:", whEvent) + + for _, hook := range hooks { + go s.triggerWebhook(ctx, hook, whEvent) + } + } +} + +func (s *Service) triggerWebhook(ctx context.Context, hook Hook, whEvent EventTyper) { + log.Msg("Webhook request: ", hook.URL) + + resp, err := s.makeRequest(ctx, hook, whEvent) + + if err != nil { + log.Err("webhook error:", err) + return + } + + log.Dbg("Webhook status code: ", resp.StatusCode) + + body, err := io.ReadAll(resp.Body) + if err != nil { + log.Err("webhook error:", err) + return + } + + log.Dbg("Webhook response: ", string(body)) +} + +func (s *Service) makeRequest(ctx context.Context, hook Hook, whEvent EventTyper) (*http.Response, error) { + payload, err := json.Marshal(whEvent) + if err != nil { + return nil, err + } + + log.Dbg("Webhook payload: ", string(payload)) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, hook.URL, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + if hook.Secret != "" { + req.Header.Add(DLEWebhookTokenHeader, hook.Secret) + } + + req.Header.Set("Content-Type", "application/json") + + return s.client.Do(req) +} diff --git a/engine/pkg/client/dblabapi/branch.go b/engine/pkg/client/dblabapi/branch.go new file mode 100644 index 00000000..b0505b6d --- /dev/null +++ b/engine/pkg/client/dblabapi/branch.go @@ -0,0 +1,162 @@ +/* +2019 © Postgres.ai +*/ + +package dblabapi + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "sort" + + "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" + "gitlab.com/postgres-ai/database-lab/v3/pkg/models" +) + +// ListBranches returns branches list. +func (c *Client) ListBranches(ctx context.Context) ([]string, error) { + u := c.URL("/branches") + + request, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, fmt.Errorf("failed to make a request: %w", err) + } + + response, err := c.Do(ctx, request) + if err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + defer func() { _ = response.Body.Close() }() + + branches := make([]models.BranchView, 0) + + if err := json.NewDecoder(response.Body).Decode(&branches); err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + listBranches := make([]string, 0, len(branches)) + + for _, branchView := range branches { + listBranches = append(listBranches, branchView.Name) + } + + sort.Strings(listBranches) + + return listBranches, nil +} + +// CreateBranch creates a new DLE data branch. +// +//nolint:dupl +func (c *Client) CreateBranch(ctx context.Context, branchRequest types.BranchCreateRequest) (*models.Branch, error) { + u := c.URL("/branch") + + body := bytes.NewBuffer(nil) + if err := json.NewEncoder(body).Encode(branchRequest); err != nil { + return nil, fmt.Errorf("failed to encode BranchCreateRequest: %w", err) + } + + request, err := http.NewRequest(http.MethodPost, u.String(), body) + if err != nil { + return nil, fmt.Errorf("failed to make a request: %w", err) + } + + response, err := c.Do(ctx, request) + if err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + defer func() { _ = response.Body.Close() }() + + var branch *models.Branch + + if err := json.NewDecoder(response.Body).Decode(&branch); err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + return branch, nil +} + +// CreateSnapshotForBranch creates a new snapshot for branch. +// +//nolint:dupl +func (c *Client) CreateSnapshotForBranch( + ctx context.Context, + snapshotRequest types.SnapshotCloneCreateRequest) (*types.SnapshotResponse, error) { + u := c.URL("/branch/snapshot") + + body := bytes.NewBuffer(nil) + if err := json.NewEncoder(body).Encode(snapshotRequest); err != nil { + return nil, fmt.Errorf("failed to encode SnapshotCreateRequest: %w", err) + } + + request, err := http.NewRequest(http.MethodPost, u.String(), body) + if err != nil { + return nil, fmt.Errorf("failed to make a request: %w", err) + } + + response, err := c.Do(ctx, request) + if err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + defer func() { _ = response.Body.Close() }() + + var snapshot *types.SnapshotResponse + + if err := json.NewDecoder(response.Body).Decode(&snapshot); err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + return snapshot, nil +} + +// BranchLog provides snapshot list for branch. +func (c *Client) BranchLog(ctx context.Context, logRequest types.LogRequest) ([]models.SnapshotDetails, error) { + u := c.URL(fmt.Sprintf("/branch/%s/log", logRequest.BranchName)) + + request, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, fmt.Errorf("failed to make a request: %w", err) + } + + response, err := c.Do(ctx, request) + if err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + defer func() { _ = response.Body.Close() }() + + var snapshots []models.SnapshotDetails + + if err := json.NewDecoder(response.Body).Decode(&snapshots); err != nil { + return nil, fmt.Errorf("failed to get response: %w", err) + } + + return snapshots, nil +} + +// DeleteBranch deletes data branch. +// +//nolint:dupl +func (c *Client) DeleteBranch(ctx context.Context, r types.BranchDeleteRequest) error { + u := c.URL(fmt.Sprintf("/branch/%s", r.BranchName)) + + request, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return fmt.Errorf("failed to make a request: %w", err) + } + + response, err := c.Do(ctx, request) + if err != nil { + return err + } + + defer func() { _ = response.Body.Close() }() + + return nil +} diff --git a/engine/pkg/client/dblabapi/client.go b/engine/pkg/client/dblabapi/client.go index 342ad931..9dc2b5f2 100644 --- a/engine/pkg/client/dblabapi/client.go +++ b/engine/pkg/client/dblabapi/client.go @@ -18,8 +18,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "gitlab.com/postgres-ai/database-lab/v3/pkg/log" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" ) @@ -136,7 +134,7 @@ func (c *Client) Do(ctx context.Context, request *http.Request) (response *http. errModel := models.Error{} if err = json.Unmarshal(b, &errModel); err != nil { - return response, errors.Wrapf(err, "failed to parse an error message: %s", (string(b))) + return response, fmt.Errorf("failed to parse an error message: %s, %w", string(b), err) } return response, errModel diff --git a/engine/pkg/client/dblabapi/snapshot.go b/engine/pkg/client/dblabapi/snapshot.go index 8e2a5cfd..0b9e607f 100644 --- a/engine/pkg/client/dblabapi/snapshot.go +++ b/engine/pkg/client/dblabapi/snapshot.go @@ -5,13 +5,17 @@ package dblabapi import ( + "bytes" "context" "encoding/json" + "fmt" "io" "net/http" + "net/url" "github.com/pkg/errors" + "gitlab.com/postgres-ai/database-lab/v3/pkg/client/dblabapi/types" "gitlab.com/postgres-ai/database-lab/v3/pkg/models" ) @@ -49,3 +53,67 @@ func (c *Client) ListSnapshotsRaw(ctx context.Context) (io.ReadCloser, error) { return response.Body, nil } + +// CreateSnapshot creates a new snapshot. +func (c *Client) CreateSnapshot(ctx context.Context, snapshotRequest types.SnapshotCreateRequest) (*models.Snapshot, error) { + u := c.URL("/snapshot") + + return c.createRequest(ctx, snapshotRequest, u) +} + +// CreateSnapshotFromClone creates a new snapshot from clone. +func (c *Client) CreateSnapshotFromClone( + ctx context.Context, + snapshotRequest types.SnapshotCloneCreateRequest) (*models.Snapshot, error) { + u := c.URL("/snapshot/clone") + + return c.createRequest(ctx, snapshotRequest, u) +} + +func (c *Client) createRequest(ctx context.Context, snapshotRequest any, u *url.URL) (*models.Snapshot, error) { + body := bytes.NewBuffer(nil) + if err := json.NewEncoder(body).Encode(snapshotRequest); err != nil { + return nil, errors.Wrap(err, "failed to encode SnapshotCreateRequest") + } + + request, err := http.NewRequest(http.MethodPost, u.String(), body) + if err != nil { + return nil, errors.Wrap(err, "failed to make a request") + } + + response, err := c.Do(ctx, request) + if err != nil { + return nil, errors.Wrap(err, "failed to get response") + } + + defer func() { _ = response.Body.Close() }() + + var snapshot *models.Snapshot + + if err := json.NewDecoder(response.Body).Decode(&snapshot); err != nil { + return nil, errors.Wrap(err, "failed to get response") + } + + return snapshot, nil +} + +// DeleteSnapshot deletes snapshot. +// +//nolint:dupl +func (c *Client) DeleteSnapshot(ctx context.Context, snapshotRequest types.SnapshotDestroyRequest) error { + u := c.URL(fmt.Sprintf("/snapshot/%s", snapshotRequest.SnapshotID)) + + request, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return fmt.Errorf("failed to make a request: %w", err) + } + + response, err := c.Do(ctx, request) + if err != nil { + return fmt.Errorf("failed to get response: %w", err) + } + + defer func() { _ = response.Body.Close() }() + + return nil +} diff --git a/engine/pkg/client/dblabapi/status.go b/engine/pkg/client/dblabapi/status.go index 74c31a15..2493e2b1 100644 --- a/engine/pkg/client/dblabapi/status.go +++ b/engine/pkg/client/dblabapi/status.go @@ -72,3 +72,27 @@ func (c *Client) Health(ctx context.Context) (*models.Engine, error) { return &engine, nil } + +// FullRefresh triggers a full refresh of the dataset. +func (c *Client) FullRefresh(ctx context.Context) (*models.Response, error) { + u := c.URL("/full-refresh") + + request, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return nil, errors.Wrap(err, "failed to make a request") + } + + response, err := c.Do(ctx, request) + if err != nil { + return nil, errors.Wrap(err, "failed to get response") + } + + defer func() { _ = response.Body.Close() }() + + var result models.Response + if err := json.NewDecoder(response.Body).Decode(&result); err != nil { + return nil, errors.Wrap(err, "failed to get response") + } + + return &result, nil +} diff --git a/engine/pkg/client/dblabapi/status_test.go b/engine/pkg/client/dblabapi/status_test.go index c9cd9cca..92d91bcd 100644 --- a/engine/pkg/client/dblabapi/status_test.go +++ b/engine/pkg/client/dblabapi/status_test.go @@ -111,3 +111,58 @@ func TestClientStatusWithFailedRequest(t *testing.T) { require.EqualError(t, err, "failed to get response: EOF") require.Nil(t, status) } + +func TestClientFullRefresh(t *testing.T) { + expectedResponse := &models.Response{ + Status: "OK", + Message: "Full refresh started", + } + + mockClient := NewTestClient(func(req *http.Request) *http.Response { + assert.Equal(t, req.URL.String(), "https://example.com/full-refresh") + assert.Equal(t, req.Method, http.MethodPost) + + body, err := json.Marshal(expectedResponse) + require.NoError(t, err) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewBuffer(body)), + Header: make(http.Header), + } + }) + + c, err := NewClient(Options{ + Host: "https://example.com/", + VerificationToken: "testVerify", + }) + require.NoError(t, err) + + c.client = mockClient + + resp, err := c.FullRefresh(context.Background()) + require.NoError(t, err) + assert.EqualValues(t, expectedResponse, resp) +} + +func TestClientFullRefreshWithFailedDecode(t *testing.T) { + mockClient := NewTestClient(func(req *http.Request) *http.Response { + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewBuffer([]byte{})), + Header: make(http.Header), + } + }) + + c, err := NewClient(Options{ + Host: "https://example.com/", + VerificationToken: "testVerify", + }) + require.NoError(t, err) + + c.client = mockClient + + resp, err := c.FullRefresh(context.Background()) + require.EqualError(t, err, "failed to get response: EOF") + require.Nil(t, resp) +} diff --git a/engine/pkg/client/dblabapi/types/clone.go b/engine/pkg/client/dblabapi/types/clone.go index c9b9e7b4..442d5e22 100644 --- a/engine/pkg/client/dblabapi/types/clone.go +++ b/engine/pkg/client/dblabapi/types/clone.go @@ -12,6 +12,8 @@ type CloneCreateRequest struct { DB *DatabaseRequest `json:"db"` Snapshot *SnapshotCloneFieldRequest `json:"snapshot"` ExtraConf map[string]string `json:"extra_conf"` + Branch string `json:"branch"` + Revision int `json:"-"` } // CloneUpdateRequest represents params of an update request. @@ -37,3 +39,47 @@ type ResetCloneRequest struct { SnapshotID string `json:"snapshotID"` Latest bool `json:"latest"` } + +// SnapshotCreateRequest describes params for creating snapshot request. +type SnapshotCreateRequest struct { + PoolName string `json:"poolName"` +} + +// SnapshotDestroyRequest describes params for destroying snapshot request. +type SnapshotDestroyRequest struct { + SnapshotID string `json:"snapshotID"` + Force bool `json:"force"` +} + +// SnapshotCloneCreateRequest describes params for creating snapshot request from clone. +type SnapshotCloneCreateRequest struct { + CloneID string `json:"cloneID"` + Message string `json:"message"` +} + +// BranchCreateRequest describes params for creating branch request. +type BranchCreateRequest struct { + BranchName string `json:"branchName"` + BaseBranch string `json:"baseBranch"` + SnapshotID string `json:"snapshotID"` +} + +// SnapshotResponse describes commit response. +type SnapshotResponse struct { + SnapshotID string `json:"snapshotID"` +} + +// ResetRequest describes params for reset request. +type ResetRequest struct { + SnapshotID string `json:"snapshotID"` +} + +// LogRequest describes params for log request. +type LogRequest struct { + BranchName string `json:"branchName"` +} + +// BranchDeleteRequest describes params for deleting branch request. +type BranchDeleteRequest struct { + BranchName string `json:"branchName"` +} diff --git a/engine/pkg/config/config.go b/engine/pkg/config/config.go index 747873f3..92be33fc 100644 --- a/engine/pkg/config/config.go +++ b/engine/pkg/config/config.go @@ -15,6 +15,7 @@ import ( "gitlab.com/postgres-ai/database-lab/v3/internal/provision/pool" retConfig "gitlab.com/postgres-ai/database-lab/v3/internal/retrieval/config" srvCfg "gitlab.com/postgres-ai/database-lab/v3/internal/srv/config" + "gitlab.com/postgres-ai/database-lab/v3/internal/webhooks" "gitlab.com/postgres-ai/database-lab/v3/pkg/config/global" ) @@ -35,4 +36,5 @@ type Config struct { PoolManager pool.Config `yaml:"poolManager"` EmbeddedUI embeddedui.Config `yaml:"embeddedUI"` Diagnostic diagnostic.Config `yaml:"diagnostic"` + Webhooks webhooks.Config `yaml:"webhooks"` } diff --git a/engine/pkg/log/filtering.go b/engine/pkg/log/filtering.go index c5fef4eb..c294aefb 100644 --- a/engine/pkg/log/filtering.go +++ b/engine/pkg/log/filtering.go @@ -39,6 +39,7 @@ func (f *Filtering) ReloadLogRegExp(secretStings []string) { "accessToken:\\s?(\\S+)", "orgKey:\\s?(\\S+)", "ACCESS_KEY(_ID)?:\\s?(\\S+)", + "secret:\\s?(\\S+)", } for _, secret := range secretStings { diff --git a/engine/pkg/log/log.go b/engine/pkg/log/log.go index c175003f..dd77cba9 100644 --- a/engine/pkg/log/log.go +++ b/engine/pkg/log/log.go @@ -70,7 +70,7 @@ func prepareMessage(v ...interface{}) string { builder := strings.Builder{} for _, value := range v { - builder.WriteString(" " + filter.re.ReplaceAllString(toString(value), replacingMask)) + builder.WriteString(" " + toString(value)) } return builder.String() diff --git a/engine/pkg/models/branch.go b/engine/pkg/models/branch.go new file mode 100644 index 00000000..e29f3cc7 --- /dev/null +++ b/engine/pkg/models/branch.go @@ -0,0 +1,49 @@ +package models + +// Branch defines a branch entity. +type Branch struct { + Name string `json:"name"` +} + +// Repo describes data repository with details about snapshots and branches. +type Repo struct { + Snapshots map[string]SnapshotDetails `json:"snapshots"` + Branches map[string]string `json:"branches"` +} + +// NewRepo creates a new Repo. +func NewRepo() *Repo { + return &Repo{ + Snapshots: make(map[string]SnapshotDetails), + Branches: make(map[string]string), + } +} + +// SnapshotDetails describes snapshot. +type SnapshotDetails struct { + ID string `json:"id"` + Parent string `json:"parent"` + Child []string `json:"child"` + Branch []string `json:"branch"` + Root []string `json:"root"` + DataStateAt string `json:"dataStateAt"` + Message string `json:"message"` + Dataset string `json:"dataset"` + Clones []string `json:"clones"` +} + +// BranchView describes branch view. +type BranchView struct { + Name string `json:"name"` + Parent string `json:"parent"` + DataStateAt string `json:"dataStateAt"` + SnapshotID string `json:"snapshotID"` + Dataset string `json:"dataset"` + NumSnapshots int `json:"numSnapshots"` +} + +// BranchEntity defines a branch-snapshot pair. +type BranchEntity struct { + Name string + SnapshotID string +} diff --git a/engine/pkg/models/clone.go b/engine/pkg/models/clone.go index 6b4520ff..b7300175 100644 --- a/engine/pkg/models/clone.go +++ b/engine/pkg/models/clone.go @@ -6,14 +6,17 @@ package models // Clone defines a clone model. type Clone struct { - ID string `json:"id"` - Snapshot *Snapshot `json:"snapshot"` - Protected bool `json:"protected"` - DeleteAt *LocalTime `json:"deleteAt"` - CreatedAt *LocalTime `json:"createdAt"` - Status Status `json:"status"` - DB Database `json:"db"` - Metadata CloneMetadata `json:"metadata"` + ID string `json:"id"` + Snapshot *Snapshot `json:"snapshot"` + Branch string `json:"branch"` + Revision int `json:"revision"` + HasDependent bool `json:"hasDependent"` + Protected bool `json:"protected"` + DeleteAt *LocalTime `json:"deleteAt"` + CreatedAt *LocalTime `json:"createdAt"` + Status Status `json:"status"` + DB Database `json:"db"` + Metadata CloneMetadata `json:"metadata"` } // CloneMetadata contains fields describing a clone model. diff --git a/engine/pkg/models/snapshot.go b/engine/pkg/models/snapshot.go index fe1ce8a4..5299e4ad 100644 --- a/engine/pkg/models/snapshot.go +++ b/engine/pkg/models/snapshot.go @@ -13,6 +13,9 @@ type Snapshot struct { LogicalSize uint64 `json:"logicalSize"` Pool string `json:"pool"` NumClones int `json:"numClones"` + Clones []string `json:"clones"` + Branch string `json:"branch"` + Message string `json:"message"` } // SnapshotView represents a view of snapshot. diff --git a/engine/pkg/models/status.go b/engine/pkg/models/status.go index 784d7667..4e5d890a 100644 --- a/engine/pkg/models/status.go +++ b/engine/pkg/models/status.go @@ -10,6 +10,12 @@ type Status struct { Message string `json:"message"` } +// Response defines the response structure. +type Response struct { + Status string `json:"status"` + Message string `json:"message"` +} + // StatusCode defines the status code of clones and instance. type StatusCode string @@ -37,4 +43,6 @@ const ( SyncStatusDown StatusCode = "Down" SyncStatusNotAvailable StatusCode = "Not available" SyncStatusError StatusCode = "Error" + + ResponseOK = "OK" ) diff --git a/engine/pkg/util/branching/branching.go b/engine/pkg/util/branching/branching.go new file mode 100644 index 00000000..75053856 --- /dev/null +++ b/engine/pkg/util/branching/branching.go @@ -0,0 +1,110 @@ +/* +2023 © Postgres.ai +*/ + +// Package branching contains branching tools and types. +package branching + +import ( + "fmt" + "path" + "strings" +) + +const ( + // DefaultBranch defines the name of the default branch. + DefaultBranch = "main" + + // DefaultRevison defines default clone revision. + DefaultRevision = 0 + + // BranchDir defines branch directory in the pool. + BranchDir = "branch" +) + +// BranchName returns a full branch name in the data pool. +func BranchName(poolName, branchName string) string { + return path.Join(poolName, BranchDir, branchName) +} + +// CloneDataset returns a full clone dataset in the data pool. +func CloneDataset(poolName, branchName, cloneName string) string { + return path.Join(BranchName(poolName, branchName), cloneName) +} + +// CloneName returns a full clone name in the data pool. +func CloneName(poolName, branchName, cloneName string, revision int) string { + return path.Join(BranchName(poolName, branchName), cloneName, RevisionSegment(revision)) +} + +// RevisionSegment returns a clone path suffix depends on its revision. +func RevisionSegment(revision int) string { + return fmt.Sprintf("r%d", revision) +} + +// ParseCloneName parses clone name from the clone dataset. +func ParseCloneName(cloneDataset, poolName string) (string, bool) { + const cloneSegmentNumber = 2 + + splits := parseCloneDataset(cloneDataset, poolName) + + if len(splits) < cloneSegmentNumber { + return "", false + } + + cloneID := splits[1] + + return cloneID, true +} + +// ParseBranchName parses branch name from the clone dataset. +func ParseBranchName(cloneDataset, poolName string) (string, bool) { + splits := parseCloneDataset(cloneDataset, poolName) + + if len(splits) < 1 { + return "", false + } + + branch := splits[0] + + return branch, true +} + +func parseCloneDataset(cloneDataset, poolName string) []string { + const splitParts = 3 + + // bcrStr contains branch, clone and revision. + bcrStr := strings.TrimPrefix(cloneDataset, poolName+"/"+BranchDir+"/") + + // Parse branchName/cloneID/revision. + splits := strings.SplitN(bcrStr, "/", splitParts) + if len(splits) != splitParts { + return nil + } + + return splits +} + +// ParseBranchNameFromSnapshot parses branch name from the snapshot ID. +func ParseBranchNameFromSnapshot(snapshot, poolName string) string { + dataset, _, found := strings.Cut(snapshot, "@") + if !found { + return "" + } + + branchPrefix := poolName + "/" + BranchDir + "/" + if !strings.HasPrefix(dataset, branchPrefix) { + return "" + } + + trimmedDataset := strings.TrimPrefix(dataset, branchPrefix) + + splits := strings.SplitN(trimmedDataset, "/", 2) + if len(splits) < 1 { + return "" + } + + branch := splits[0] + + return branch +} diff --git a/engine/pkg/util/branching/branching_test.go b/engine/pkg/util/branching/branching_test.go new file mode 100644 index 00000000..661ff82b --- /dev/null +++ b/engine/pkg/util/branching/branching_test.go @@ -0,0 +1,35 @@ +package branching + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParsingBranchNameFromSnapshot(t *testing.T) { + const poolName = "pool/pg17" + + testCases := []struct { + input string + expected string + }{ + { + input: "pool/pg17@snapshot_20250407101616", + expected: "", + }, + { + input: "pool/pg17/branch/dev@20250407101828", + expected: "dev", + }, + { + input: "pool/pg17/branch/main/cvpqe8gn9i6s73b49e3g/r0@20250407102140", + expected: "main", + }, + } + + for _, tc := range testCases { + branchName := ParseBranchNameFromSnapshot(tc.input, poolName) + + assert.Equal(t, tc.expected, branchName) + } +} diff --git a/engine/pkg/util/clones.go b/engine/pkg/util/clones.go index 4e868651..0a798c51 100644 --- a/engine/pkg/util/clones.go +++ b/engine/pkg/util/clones.go @@ -4,21 +4,12 @@ package util -import ( - "strconv" -) - const ( // ClonePrefix defines a Database Lab clone prefix. ClonePrefix = "dblab_clone_" ) -// GetCloneName returns a clone name. -func GetCloneName(port uint) string { - return ClonePrefix + strconv.FormatUint(uint64(port), 10) -} - -// GetCloneNameStr returns a clone name. -func GetCloneNameStr(port string) string { - return ClonePrefix + port +// GetPoolName returns pool name. +func GetPoolName(basePool, snapshotSuffix string) string { + return basePool + "/" + snapshotSuffix } diff --git a/engine/scripts/init-zfs-colima.sh b/engine/scripts/init-zfs-colima.sh new file mode 100755 index 00000000..ac96b8a9 --- /dev/null +++ b/engine/scripts/init-zfs-colima.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +POOL_NAME="dblab_pool" +POOL_MNT="/var/lib/dblab/dblab_pool" +DISK_FILE="/zfs-disk" +DATASETS=(dataset_1 dataset_2 dataset_3) + +echo "🔍 Checking if zfsutils-linux is installed..." +if ! command -v zfs >/dev/null 2>&1; then + echo "📦 Installing zfsutils-linux..." + sudo apt update + sudo apt install -y zfsutils-linux +else + echo "✅ ZFS already installed" +fi + +if [ ! -f "$DISK_FILE" ]; then + echo "🧱 Creating virtual ZFS disk at $DISK_FILE..." + sudo truncate -s 5G "$DISK_FILE" +else + echo "✅ ZFS disk file already exists" +fi + +echo "🔗 Setting up loop device..." +sudo losetup -fP "$DISK_FILE" +LOOP=$(sudo losetup -j "$DISK_FILE" | cut -d: -f1) + +echo "📂 Checking if pool '$POOL_NAME' exists..." +if ! zpool list | grep -q "$POOL_NAME"; then + echo "🚀 Creating ZFS pool $POOL_NAME..." + sudo zpool create -f \ + -O compression=on \ + -O atime=off \ + -O recordsize=128k \ + -O logbias=throughput \ + -m "$POOL_MNT" \ + "$POOL_NAME" \ + "$LOOP" +else + echo "✅ ZFS pool '$POOL_NAME' already exists" +fi + +echo "📦 Creating base datasets..." +for DATASET in "${DATASETS[@]}"; do + if ! zfs list | grep -q "${POOL_NAME}/${DATASET}"; then + echo "📁 Creating dataset ${POOL_NAME}/${DATASET}" + sudo zfs create -o mountpoint="${POOL_MNT}/${DATASET}" "${POOL_NAME}/${DATASET}" + else + echo "⚠️ Dataset '${DATASET}' already exists" + fi +done + +echo "✅ ZFS setup complete." \ No newline at end of file diff --git a/engine/test/1.synthetic.sh b/engine/test/1.synthetic.sh index 5470eb51..53a60770 100644 --- a/engine/test/1.synthetic.sh +++ b/engine/test/1.synthetic.sh @@ -66,9 +66,12 @@ sudo docker rm dblab_pg_initdb configDir="$HOME/.dblab/engine/configs" metaDir="$HOME/.dblab/engine/meta" +logsDir="$HOME/.dblab/engine/logs" # Copy the contents of configuration example mkdir -p "${configDir}" +mkdir -p "${metaDir}" +mkdir -p "${logsDir}" curl https://gitlab.com/postgres-ai/database-lab/-/raw/"${TAG:-master}"/engine/configs/config.example.logical_generic.yml \ --output "${configDir}/server.yml" @@ -116,6 +119,7 @@ sudo docker run \ --volume ${DLE_TEST_MOUNT_DIR}:${DLE_TEST_MOUNT_DIR}/:rshared \ --volume "${configDir}":/home/dblab/configs \ --volume "${metaDir}":/home/dblab/meta \ + --volume "${logsDir}":/home/dblab/logs \ --env DOCKER_API_VERSION=1.39 \ --detach \ "${IMAGE2TEST}" @@ -156,18 +160,29 @@ dblab init \ dblab instance status # Check the snapshot list - if [[ $(dblab snapshot list | jq length) -eq 0 ]] ; then - echo "No snapshot found" && exit 1 - fi +if [[ $(dblab snapshot list | jq length) -eq 0 ]] ; then + echo "No snapshot found" && exit 1 +fi + +dblab snapshot create + +if [[ $(dblab snapshot list | jq length) -eq 0 ]] ; then + echo "Snapshot has not been created" && exit 1 +fi ## Create a clone +CLONE_ID="testclone" + dblab clone create \ --username dblab_user_1 \ --password secret_password \ - --id testclone + --id ${CLONE_ID} ### Check that database system was properly shut down (clone data dir) -CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/clones/dblab_clone_"${DLE_PORT_POOL_FROM}"/data/log +BRANCH_MAIN="main" +REVISION_0="r0" +# /var/lib/test/dblab_mount/test_dblab_pool/branch/main/testclone/r0 +CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/branch/"${BRANCH_MAIN}"/"${CLONE_ID}"/"${REVISION_0}"/data/log LOG_FILE_CSV=$(sudo ls -t "$CLONE_LOG_DIR" | grep .csv | head -n 1) if sudo test -d "$CLONE_LOG_DIR" then @@ -231,6 +246,55 @@ PGPASSWORD=secret_password psql \ dblab clone destroy testclone dblab clone list +### Data branching. +dblab branch || (echo "Failed when data branching is not initialized" && exit 1) +dblab branch 001-branch || (echo "Failed to create a data branch" && exit 1) +dblab branch + +dblab clone create \ + --username john \ + --password secret_test_123 \ + --branch 001-branch \ + --id branchclone001 || (echo "Failed to create a clone on branch" && exit 1) + +dblab commit --clone-id branchclone001 --message branchclone001 || (echo "Failed to create a snapshot" && exit 1) + +dblab clone create \ + --username alice \ + --password secret_password_123 \ + --branch 001-branch \ + --id branchclone002 || (echo "Failed to create a clone on branch" && exit 1) + +dblab commit --clone-id branchclone002 -m branchclone002 || (echo "Failed to create a snapshot" && exit 1) + +dblab log 001-branch || (echo "Failed to show branch history" && exit 1) + +dblab clone destroy branchclone001 || (echo "Failed to destroy clone" && exit 1) +dblab clone destroy branchclone002 || (echo "Failed to destroy clone" && exit 1) + +sudo docker wait branchclone001 branchclone002 || echo "Clones have been removed" + +dblab clone list +dblab snapshot list + +dblab switch main + +dblab clone create \ + --username alice \ + --password secret_password_123 \ + --branch 001-branch \ + --id branchclone003 || (echo "Failed to create a clone on branch" && exit 1) + +dblab commit --clone-id branchclone003 --message branchclone001 || (echo "Failed to create a snapshot" && exit 1) + +dblab snapshot delete "$(dblab snapshot list | jq -r .[0].id)" || (echo "Failed to delete a snapshot" && exit 1) + +dblab clone destroy branchclone003 || (echo "Failed to destroy clone" && exit 1) + +dblab branch --delete 001-branch || (echo "Failed to delete data branch" && exit 1) + +dblab branch + ## Stop DLE. sudo docker stop ${DLE_SERVER_NAME} diff --git a/engine/test/2.logical_generic.sh b/engine/test/2.logical_generic.sh index eb185c7a..9ce5f7ca 100644 --- a/engine/test/2.logical_generic.sh +++ b/engine/test/2.logical_generic.sh @@ -77,10 +77,12 @@ source "${DIR}/_zfs.file.sh" configDir="$HOME/.dblab/engine/configs" metaDir="$HOME/.dblab/engine/meta" +logsDir="$HOME/.dblab/engine/logs" # Copy the contents of configuration example mkdir -p "${configDir}" mkdir -p "${metaDir}" +mkdir -p "${logsDir}" curl https://gitlab.com/postgres-ai/database-lab/-/raw/"${TAG:-master}"/engine/configs/config.example.logical_generic.yml \ --output "${configDir}/server.yml" @@ -130,6 +132,7 @@ sudo docker run \ --volume ${DLE_TEST_MOUNT_DIR}:${DLE_TEST_MOUNT_DIR}/:rshared \ --volume "${configDir}":/home/dblab/configs \ --volume "${metaDir}":/home/dblab/meta \ + --volume "${logsDir}":/home/dblab/logs \ --env DOCKER_API_VERSION=1.39 \ --detach \ "${IMAGE2TEST}" @@ -284,13 +287,18 @@ dblab instance status ## Create a clone +CLONE_ID="testclone" + dblab clone create \ --username dblab_user_1 \ --password secret_password \ - --id testclone + --id ${CLONE_ID} ### Check that database system was properly shut down (clone data dir) -CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/clones/dblab_clone_"${DLE_PORT_POOL_FROM}"/data/log +BRANCH_MAIN="main" +REVISION_0="r0" +# /var/lib/test/dblab_mount/test_dblab_pool/branch/main/testclone/r0 +CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/branch/"${BRANCH_MAIN}"/"${CLONE_ID}"/"${REVISION_0}"/data/log LOG_FILE_CSV=$(sudo ls -t "$CLONE_LOG_DIR" | grep .csv | head -n 1) if sudo test -d "$CLONE_LOG_DIR" then diff --git a/engine/test/3.physical_walg.sh b/engine/test/3.physical_walg.sh index a311367d..f3c5e8bc 100644 --- a/engine/test/3.physical_walg.sh +++ b/engine/test/3.physical_walg.sh @@ -174,13 +174,17 @@ dblab instance status ## Create a clone +CLONE_ID="testclone" + dblab clone create \ --username dblab_user_1 \ --password secret_password \ - --id testclone + --id ${CLONE_ID} ### Check that database system was properly shut down (clone data dir) -CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/clones/dblab_clone_"${DLE_PORT_POOL_FROM}"/data/log +BRANCH_MAIN="main" +REVISION_0="r0" +CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/branch/"${BRANCH_MAIN}"/"${CLONE_ID}"/"${REVISION_0}"/data/log LOG_FILE_CSV=$(sudo ls -t "$CLONE_LOG_DIR" | grep .csv | head -n 1) if sudo test -d "$CLONE_LOG_DIR" then diff --git a/engine/test/4.physical_basebackup.sh b/engine/test/4.physical_basebackup.sh index f72508b5..0d6ab7b2 100644 --- a/engine/test/4.physical_basebackup.sh +++ b/engine/test/4.physical_basebackup.sh @@ -92,9 +92,11 @@ source "${DIR}/_zfs.file.sh" configDir="$HOME/.dblab/engine/configs" metaDir="$HOME/.dblab/engine/meta" +logsDir="$HOME/.dblab/engine/logs" # Copy the contents of configuration example mkdir -p "${configDir}" +mkdir -p "${logsDir}" curl https://gitlab.com/postgres-ai/database-lab/-/raw/"${TAG:-master}"/engine/configs/config.example.physical_generic.yml \ --output "${configDir}/server.yml" @@ -144,6 +146,7 @@ sudo docker run \ --volume ${DLE_TEST_MOUNT_DIR}:${DLE_TEST_MOUNT_DIR}/:rshared \ --volume "${configDir}":/home/dblab/configs \ --volume "${metaDir}":/home/dblab/meta \ + --volume "${logsDir}":/home/dblab/logs \ --env DOCKER_API_VERSION=1.39 \ --detach \ "${IMAGE2TEST}" @@ -191,13 +194,17 @@ dblab instance status ## Create a clone +CLONE_ID="testclone" + dblab clone create \ --username dblab_user_1 \ --password secret_password \ - --id testclone + --id ${CLONE_ID} ### Check that database system was properly shut down (clone data dir) -CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/clones/dblab_clone_"${DLE_PORT_POOL_FROM}"/data/log +BRANCH_MAIN="main" +REVISION_0="r0" +CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/branch/"${BRANCH_MAIN}"/"${CLONE_ID}"/"${REVISION_0}"/data/log LOG_FILE_CSV=$(sudo ls -t "$CLONE_LOG_DIR" | grep .csv | head -n 1) if sudo test -d "$CLONE_LOG_DIR" then diff --git a/engine/test/5.logical_rds.sh b/engine/test/5.logical_rds.sh index a05e325d..6d00db3e 100644 --- a/engine/test/5.logical_rds.sh +++ b/engine/test/5.logical_rds.sh @@ -125,13 +125,17 @@ dblab instance status ## Create a clone +CLONE_ID="testclone" + dblab clone create \ --username dblab_user_1 \ --password secret_password \ - --id testclone + --id ${CLONE_ID} ### Check that database system was properly shut down (clone data dir) -CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/clones/dblab_clone_"${DLE_PORT_POOL_FROM}"/data/log +BRANCH_MAIN="main" +REVISION_0="r0" +CLONE_LOG_DIR="${DLE_TEST_MOUNT_DIR}"/"${DLE_TEST_POOL_NAME}"/branch/"${BRANCH_MAIN}"/"${CLONE_ID}"/"${REVISION_0}"/data/log LOG_FILE_CSV=$(sudo ls -t "$CLONE_LOG_DIR" | grep .csv | head -n 1) if sudo test -d "$CLONE_LOG_DIR" then diff --git a/engine/test/_cleanup.sh b/engine/test/_cleanup.sh index 6fb304a7..1d09a812 100644 --- a/engine/test/_cleanup.sh +++ b/engine/test/_cleanup.sh @@ -8,6 +8,8 @@ ZFS_FILE="$(pwd)/zfs_file" # Stop and remove test Docker containers sudo docker ps -aq --filter label="test_dblab_pool" | xargs --no-run-if-empty sudo docker rm -f \ || echo "Failed to remove test Docker containers, continuing..." +sudo docker ps -aq --filter label="dblab_clone=test_dblab_pool" | xargs --no-run-if-empty sudo docker rm -f \ + || echo "Failed to remove test Docker containers, continuing..." sudo docker ps -aq --filter label="dblab_test" | xargs --no-run-if-empty sudo docker rm -f \ || echo "Failed to remove dblab_test Docker containers, continuing..." @@ -19,6 +21,10 @@ sudo docker images --filter=reference='registry.gitlab.com/postgres-ai/database- sudo rm -rf ${DLE_TEST_MOUNT_DIR}/${DLE_TEST_POOL_NAME}/data/* \ || echo "Data directory cleanup finished with errors but continuing..." +# Clean up branch directory +sudo rm -rf ${DLE_TEST_MOUNT_DIR}/${DLE_TEST_POOL_NAME}/branch/* \ + || echo "Branch directory cleanup finished with errors but continuing..." + # Remove dump directory sudo umount ${DLE_TEST_MOUNT_DIR}/${DLE_TEST_POOL_NAME}/dump \ || echo "Unmounting dump directory finished with errors but it is OK to ignore them." diff --git a/ui/.dockerignore b/ui/.dockerignore index 7e3cab0d..3ec5991a 100644 --- a/ui/.dockerignore +++ b/ui/.dockerignore @@ -6,4 +6,4 @@ **/build/** ui/node_modules/ ui/packages/ce/node_modules/ -ui/packages/shared/node_modules/ +ui/packages/shared/node_modules/ \ No newline at end of file diff --git a/ui/.gitlab-ci.yml b/ui/.gitlab-ci.yml index 71774968..ffbfde13 100644 --- a/ui/.gitlab-ci.yml +++ b/ui/.gitlab-ci.yml @@ -68,10 +68,14 @@ e2e-ce-ui-test: - apt install -y curl libgtk2.0-0 libgtk-3-0 libgbm-dev libnotify-dev libgconf-2-4 libnss3 libxss1 libasound2 libxtst6 xauth xvfb - npm install -g wait-on - npm install -g pnpm + - pnpm config set verify-store-integrity false # TODO: Set up caching. -# - pnpm config set store-dir /builds/postgres-ai/database-lab/.pnpm-store/ + #- pnpm config set store-dir /builds/postgres-ai/database-lab/.pnpm-store/ script: - - pnpm --dir ui/ i --no-frozen-lockfile + - pnpm --dir ui/ --filter @postgres.ai/ce install + - pnpm --dir ui/ --filter @postgres.ai/ce build - pnpm --dir ui/ --filter @postgres.ai/ce exec cypress install - - pnpm --dir ui/ --filter @postgres.ai/ce start & wait-on http://localhost:3001 + - npx serve -s ui/packages/ce/build -l 3001 > server.log 2>&1 & + - sleep 20 + - timeout 120s wait-on http://localhost:3001 || (echo "❌ UI didn't start in time"; cat server.log; exit 1) - pnpm --dir ui/ --filter @postgres.ai/ce cy:run diff --git a/ui/package.json b/ui/package.json index 9e92dbc2..63a3af14 100644 --- a/ui/package.json +++ b/ui/package.json @@ -34,7 +34,7 @@ "semver@>=7.0.0 <7.5.2": ">=7.5.2", "semver@<5.7.2": ">=5.7.2", "semver@>=6.0.0 <6.3.1": ">=6.3.1", - "minimatch@<3.0.5": ">=3.0.5", + "minimatch": "3.1.2", "json5@<1.0.2": ">=1.0.2", "json5@>=2.0.0 <2.2.2": ">=2.2.2", "ip@<1.1.9": ">=1.1.9", diff --git a/ui/packages/ce/.dockerignore b/ui/packages/ce/.dockerignore index 00dbf44f..ce733752 100644 --- a/ui/packages/ce/.dockerignore +++ b/ui/packages/ce/.dockerignore @@ -6,4 +6,4 @@ **/build/** /ui/node_modules/ /ui/packages/ce/node_modules/ -/ui/packages/shared/node_modules/ \ No newline at end of file +/ui/packages/shared/node_modules/ diff --git a/ui/packages/ce/cypress/e2e/tabs.cy.js b/ui/packages/ce/cypress/e2e/tabs.cy.js index d9c6dc59..db2afe82 100644 --- a/ui/packages/ce/cypress/e2e/tabs.cy.js +++ b/ui/packages/ce/cypress/e2e/tabs.cy.js @@ -4,7 +4,6 @@ Cypress.on('uncaught:exception', () => { return false }) -// Function to set up intercepts for the requests function setupIntercepts() { const exceptions = [ '/healthz', @@ -44,7 +43,6 @@ function setupIntercepts() { }, }) - // Intercept all fetch requests and return a 200 cy.intercept('GET', '*', (req) => { if ( req.resourceType === 'fetch' && @@ -61,7 +59,6 @@ function setupIntercepts() { } describe('Configuration tab', () => { - // It should intercept the requests beforeEach(() => { setupIntercepts() }) @@ -71,19 +68,10 @@ describe('Configuration tab', () => { retryOnStatusCodeFailure: true, onLoad: () => { cy.get('.MuiTabs-flexContainer') - .contains('Configuration', { timeout: 10000 }) + .contains('Configuration') .should('be.visible') .click({ force: true }) }, }) }) - - it('should have form inputs in the "Configuration" tab', () => { - cy.get('.MuiTabs-flexContainer') - .contains('Configuration', { timeout: 10000 }) - .should('be.visible') - .click({ force: true }) - - cy.get('button[type="button"]').should('exist') - }) }) diff --git a/ui/packages/ce/package.json b/ui/packages/ce/package.json index b63ae9c0..55e54843 100644 --- a/ui/packages/ce/package.json +++ b/ui/packages/ce/package.json @@ -1,6 +1,6 @@ { "name": "@postgres.ai/ce", - "version": "3.5.0", + "version": "4.0.0", "private": true, "dependencies": { "@craco/craco": "^6.4.3", @@ -19,6 +19,7 @@ "@types/react-dom": "^17.0.10", "@types/react-router": "^5.1.17", "@types/react-router-dom": "^5.3.1", + "@types/react-syntax-highlighter": "^15.5.6", "byte-size": "^8.1.0", "classnames": "^2.3.1", "clsx": "^1.1.1", @@ -39,6 +40,7 @@ "react-router": "^5.1.2", "react-router-dom": "^5.1.2", "react-scripts": "^5.0.0", + "react-syntax-highlighter": "^15.5.0", "stream-browserify": "^3.0.0", "typescript": "^4.4.4", "use-timer": "^2.0.1", diff --git a/ui/packages/ce/src/App/Instance/Branches/Branch/index.tsx b/ui/packages/ce/src/App/Instance/Branches/Branch/index.tsx new file mode 100644 index 00000000..8da308a3 --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Branches/Branch/index.tsx @@ -0,0 +1,59 @@ +import { useParams } from 'react-router-dom' + +import { getBranches } from 'api/branches/getBranches' +import { deleteBranch } from 'api/branches/deleteBranch' +import { getSnapshotList } from 'api/branches/getSnapshotList' +import { initWS } from 'api/engine/initWS' + +import { PageContainer } from 'components/PageContainer' +import { NavPath } from 'components/NavPath' +import { ROUTES } from 'config/routes' +import { BranchesPage } from '@postgres.ai/shared/pages/Branches/Branch' + +type Params = { + branchId: string +} + +export const Branch = () => { + const { branchId } = useParams() + + const api = { + getBranches, + deleteBranch, + getSnapshotList, + initWS + } + + const elements = { + breadcrumbs: ( + + ), + } + + return ( + + ROUTES.INSTANCE.BRANCHES.BRANCHES.path, + branches: () => ROUTES.INSTANCE.BRANCHES.BRANCHES.path, + snapshot: (snapshotId: string) => + ROUTES.INSTANCE.SNAPSHOTS.SNAPSHOT.createPath(snapshotId), + createClone: (branchId: string) => ROUTES.INSTANCE.CLONES.CREATE.createPath(branchId), + }} + /> + + ) +} diff --git a/ui/packages/ce/src/App/Instance/Branches/CreateBranch/index.tsx b/ui/packages/ce/src/App/Instance/Branches/CreateBranch/index.tsx new file mode 100644 index 00000000..e0533e05 --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Branches/CreateBranch/index.tsx @@ -0,0 +1,47 @@ +import { getBranches } from 'api/branches/getBranches' +import { createBranch } from 'api/branches/createBranch' +import { getSnapshots } from 'api/snapshots/getSnapshots' +import { initWS } from 'api/engine/initWS' + +import { CreateBranchPage } from '@postgres.ai/shared/pages/CreateBranch' + +import { PageContainer } from 'components/PageContainer' +import { NavPath } from 'components/NavPath' +import { ROUTES } from 'config/routes' + +export const CreateBranch = () => { + const routes = { + branch: (branchName: string) => + ROUTES.INSTANCE.BRANCHES.BRANCH.createPath(branchName), + } + + const api = { + getBranches, + createBranch, + getSnapshots, + initWS + } + + const elements = { + breadcrumbs: ( + + ), + } + + return ( + + + + ) +} diff --git a/ui/packages/ce/src/App/Instance/Branches/index.tsx b/ui/packages/ce/src/App/Instance/Branches/index.tsx new file mode 100644 index 00000000..ecf327b9 --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Branches/index.tsx @@ -0,0 +1,25 @@ +import { Switch, Route, Redirect } from 'react-router-dom' + +import { ROUTES } from 'config/routes' +import { TABS_INDEX } from '@postgres.ai/shared/pages/Instance/Tabs' + +import { Page } from '../Page' +import { Branch } from './Branch' +import { CreateBranch } from './CreateBranch' + +export const Branches = () => { + return ( + + + + + + + + + + + + + ) +} diff --git a/ui/packages/ce/src/App/Instance/Clones/Clone/index.tsx b/ui/packages/ce/src/App/Instance/Clones/Clone/index.tsx index f5bc914d..96c8b8a1 100644 --- a/ui/packages/ce/src/App/Instance/Clones/Clone/index.tsx +++ b/ui/packages/ce/src/App/Instance/Clones/Clone/index.tsx @@ -9,6 +9,10 @@ import { getClone } from 'api/clones/getClone' import { resetClone } from 'api/clones/resetClone' import { destroyClone } from 'api/clones/destroyClone' import { updateClone } from 'api/clones/updateClone' +import { createSnapshot } from 'api/snapshots/createSnapshot' +import { initWS } from 'api/engine/initWS' +import { destroySnapshot } from 'api/snapshots/destroySnapshot' + import { PageContainer } from 'components/PageContainer' import { NavPath } from 'components/NavPath' import { ROUTES } from 'config/routes' @@ -27,7 +31,10 @@ export const Clone = () => { getClone, resetClone, destroyClone, + destroySnapshot, updateClone, + createSnapshot, + initWS, } const elements = { @@ -35,9 +42,9 @@ export const Clone = () => { { cloneId={cloneId} routes={{ instance: () => ROUTES.INSTANCE.path, + snapshot: (snapshotId: string) => + ROUTES.INSTANCE.SNAPSHOTS.SNAPSHOT.createPath(snapshotId), + createSnapshot: (cloneId: string) => ROUTES.INSTANCE.SNAPSHOTS.CREATE.createPath(cloneId), }} api={api} elements={elements} diff --git a/ui/packages/ce/src/App/Instance/Clones/CreateClone/index.tsx b/ui/packages/ce/src/App/Instance/Clones/CreateClone/index.tsx index bf5ccebc..aa17c80c 100644 --- a/ui/packages/ce/src/App/Instance/Clones/CreateClone/index.tsx +++ b/ui/packages/ce/src/App/Instance/Clones/CreateClone/index.tsx @@ -5,9 +5,11 @@ import { NavPath } from 'components/NavPath' import { ROUTES } from 'config/routes' import { getInstance } from 'api/instances/getInstance' import { getInstanceRetrieval } from 'api/instances/getInstanceRetrieval' -import { getSnapshots } from 'api/snapshots/getSnapshots' import { createClone } from 'api/clones/createClone' import { getClone } from 'api/clones/getClone' +import { getBranches } from 'api/branches/getBranches' +import { getSnapshots } from 'api/snapshots/getSnapshots' +import { initWS } from 'api/engine/initWS' export const CreateClone = () => { const routes = { @@ -16,17 +18,23 @@ export const CreateClone = () => { } const api = { - getSnapshots, getInstance, getInstanceRetrieval, createClone, getClone, + getBranches, + getSnapshots, + initWS } const elements = { breadcrumbs: ( ), } diff --git a/ui/packages/ce/src/App/Instance/Clones/index.tsx b/ui/packages/ce/src/App/Instance/Clones/index.tsx index 390f3e11..a39efa94 100644 --- a/ui/packages/ce/src/App/Instance/Clones/index.tsx +++ b/ui/packages/ce/src/App/Instance/Clones/index.tsx @@ -1,9 +1,12 @@ import { Switch, Route, Redirect } from 'react-router-dom' +import { TABS_INDEX } from '@postgres.ai/shared/pages/Instance/Tabs' + import { ROUTES } from 'config/routes' import { CreateClone } from './CreateClone' import { Clone } from './Clone' +import { Page } from '../Page' export const Clones = () => { return ( @@ -16,6 +19,10 @@ export const Clones = () => { + + + + ) diff --git a/ui/packages/ce/src/App/Instance/Configuration/index.tsx b/ui/packages/ce/src/App/Instance/Configuration/index.tsx new file mode 100644 index 00000000..93981d6c --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Configuration/index.tsx @@ -0,0 +1,10 @@ +import { TABS_INDEX } from '@postgres.ai/shared/pages/Instance/Tabs' +import { ROUTES } from 'config/routes' +import { Route } from 'react-router' +import { Page } from '../Page' + +export const Configuration = () => ( + + + +) diff --git a/ui/packages/ce/src/App/Instance/Logs/index.tsx b/ui/packages/ce/src/App/Instance/Logs/index.tsx new file mode 100644 index 00000000..584494b6 --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Logs/index.tsx @@ -0,0 +1,10 @@ +import { TABS_INDEX } from '@postgres.ai/shared/pages/Instance/Tabs' +import { ROUTES } from 'config/routes' +import { Route } from 'react-router' +import { Page } from '../Page' + +export const Logs = () => ( + + + +) diff --git a/ui/packages/ce/src/App/Instance/Page/index.tsx b/ui/packages/ce/src/App/Instance/Page/index.tsx index 60a92f16..a44b559b 100644 --- a/ui/packages/ce/src/App/Instance/Page/index.tsx +++ b/ui/packages/ce/src/App/Instance/Page/index.tsx @@ -6,6 +6,7 @@ import { ROUTES } from 'config/routes' import { getInstance } from 'api/instances/getInstance' import { getInstanceRetrieval } from 'api/instances/getInstanceRetrieval' import { getSnapshots } from 'api/snapshots/getSnapshots' +import { createSnapshot } from 'api/snapshots/createSnapshot' import { destroyClone } from 'api/clones/destroyClone' import { resetClone } from 'api/clones/resetClone' import { getWSToken } from 'api/engine/getWSToken' @@ -16,18 +17,33 @@ import { getSeImages } from 'api/configs/getSeImages' import { updateConfig } from 'api/configs/updateConfig' import { testDbSource } from 'api/configs/testDbSource' import { getEngine } from 'api/engine/getEngine' +import { createBranch } from 'api/branches/createBranch' +import { getBranches } from 'api/branches/getBranches' +import { getSnapshotList } from 'api/branches/getSnapshotList' +import { deleteBranch } from 'api/branches/deleteBranch' +import { destroySnapshot } from 'api/snapshots/destroySnapshot' +import { fullRefresh } from 'api/instances/fullRefresh' -export const Page = () => { +export const Page = ({ renderCurrentTab }: { renderCurrentTab?: number }) => { const routes = { createClone: () => ROUTES.INSTANCE.CLONES.CREATE.path, + createBranch: () => ROUTES.INSTANCE.BRANCHES.CREATE.path, + createSnapshot: () => ROUTES.INSTANCE.SNAPSHOTS.CREATE.path, clone: (cloneId: string) => ROUTES.INSTANCE.CLONES.CLONE.createPath(cloneId), + branch: (branchId: string) => + ROUTES.INSTANCE.BRANCHES.BRANCH.createPath(branchId), + branches: () => ROUTES.INSTANCE.BRANCHES.path, + snapshots: () => ROUTES.INSTANCE.SNAPSHOTS.path, + snapshot: (snapshotId: string) => + ROUTES.INSTANCE.SNAPSHOTS.SNAPSHOT.createPath(snapshotId), } const api = { getInstance, getInstanceRetrieval, getSnapshots, + createSnapshot, destroyClone, resetClone, getWSToken, @@ -38,6 +54,12 @@ export const Page = () => { testDbSource, initWS, getEngine, + createBranch, + getBranches, + getSnapshotList, + deleteBranch, + destroySnapshot, + fullRefresh, } const elements = { @@ -52,6 +74,7 @@ export const Page = () => { routes={routes} api={api} elements={elements} + renderCurrentTab={renderCurrentTab} /> ) diff --git a/ui/packages/ce/src/App/Instance/Snapshots/CreateSnapshot/index.tsx b/ui/packages/ce/src/App/Instance/Snapshots/CreateSnapshot/index.tsx new file mode 100644 index 00000000..55598d36 --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Snapshots/CreateSnapshot/index.tsx @@ -0,0 +1,43 @@ +import { createSnapshot } from 'api/snapshots/createSnapshot' +import { getInstance } from 'api/instances/getInstance' +import { initWS } from 'api/engine/initWS' + +import { CreateSnapshotPage } from '@postgres.ai/shared/pages/CreateSnapshot' + +import { PageContainer } from 'components/PageContainer' +import { NavPath } from 'components/NavPath' +import { ROUTES } from 'config/routes' + +export const CreateSnapshot = () => { + const api = { + createSnapshot, + getInstance, + initWS + } + + const elements = { + breadcrumbs: ( + + ), + } + + return ( + + + ROUTES.INSTANCE.SNAPSHOTS.SNAPSHOT.createPath(snapshotId), + }} + /> + + ) +} diff --git a/ui/packages/ce/src/App/Instance/Snapshots/Snapshot/index.tsx b/ui/packages/ce/src/App/Instance/Snapshots/Snapshot/index.tsx new file mode 100644 index 00000000..573a0f32 --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Snapshots/Snapshot/index.tsx @@ -0,0 +1,62 @@ +import { useParams } from 'react-router-dom' + +import { SnapshotPage } from '@postgres.ai/shared/pages/Snapshots/Snapshot' + +import { NavPath } from 'components/NavPath' +import { ROUTES } from 'config/routes' +import { PageContainer } from 'components/PageContainer' + +import { destroySnapshot } from 'api/snapshots/destroySnapshot' +import { getSnapshots } from 'api/snapshots/getSnapshots' +import { getBranchSnapshot } from 'api/snapshots/getBranchSnapshot' +import { initWS } from 'api/engine/initWS' + +type Params = { + snapshotId: string +} + +export const Snapshot = () => { + const { snapshotId } = useParams() + + const api = { + destroySnapshot, + getSnapshots, + getBranchSnapshot, + initWS, + } + + const elements = { + breadcrumbs: ( + + ), + } + + return ( + + ROUTES.INSTANCE.SNAPSHOTS.SNAPSHOTS.path, + snapshot: () => ROUTES.INSTANCE.SNAPSHOTS.SNAPSHOTS.path, + branch: (branchName: string) => + ROUTES.INSTANCE.BRANCHES.BRANCH.createPath(branchName), + clone: (cloneId: string) => + ROUTES.INSTANCE.CLONES.CLONE.createPath(cloneId), + createClone: (branchId: string, snapshotId: string) => ROUTES.INSTANCE.CLONES.CREATE.createPath(branchId, snapshotId), + }} + api={api} + elements={elements} + /> + + ) +} diff --git a/ui/packages/ce/src/App/Instance/Snapshots/index.tsx b/ui/packages/ce/src/App/Instance/Snapshots/index.tsx new file mode 100644 index 00000000..d1521a6e --- /dev/null +++ b/ui/packages/ce/src/App/Instance/Snapshots/index.tsx @@ -0,0 +1,26 @@ +import { Switch, Route, Redirect } from 'react-router-dom' + +import { TABS_INDEX } from '@postgres.ai/shared/pages/Instance/Tabs' + +import { ROUTES } from 'config/routes' + +import { Page } from '../Page' +import { Snapshot } from './Snapshot' +import { CreateSnapshot } from './CreateSnapshot' + +export const Snapshots = () => { + return ( + + + + + + + + + + + + + ) +} diff --git a/ui/packages/ce/src/App/Instance/index.tsx b/ui/packages/ce/src/App/Instance/index.tsx index 65422988..7c26ba3d 100644 --- a/ui/packages/ce/src/App/Instance/index.tsx +++ b/ui/packages/ce/src/App/Instance/index.tsx @@ -2,8 +2,12 @@ import { Switch, Route, Redirect } from 'react-router-dom' import { ROUTES } from 'config/routes' +import { Logs } from './Logs' import { Page } from './Page' import { Clones } from './Clones' +import { Branches } from './Branches' +import { Snapshots } from './Snapshots' +import { Configuration } from './Configuration' export const Instance = () => { return ( @@ -14,6 +18,18 @@ export const Instance = () => { + + + + + + + + + + + + ) diff --git a/ui/packages/ce/src/App/Menu/Header/index.tsx b/ui/packages/ce/src/App/Menu/Header/index.tsx index 8d961cfe..9c74e6da 100644 --- a/ui/packages/ce/src/App/Menu/Header/index.tsx +++ b/ui/packages/ce/src/App/Menu/Header/index.tsx @@ -1,14 +1,11 @@ import cn from 'classnames' import { Link } from 'react-router-dom' -import { linksConfig } from '@postgres.ai/shared/config/links' -import { Button } from '@postgres.ai/shared/components/MenuButton' - import { ROUTES } from 'config/routes' import styles from './styles.module.scss' import { DLEEdition } from 'helpers/edition' -import { LogoIcon, StarsIcon } from './icons' +import { LogoIcon } from './icons' type Props = { isCollapsed: boolean @@ -31,17 +28,6 @@ export const Header = (props: Props) => { )} - - {!props.isCollapsed && ( - - )} ) } diff --git a/ui/packages/ce/src/App/Menu/Header/styles.module.scss b/ui/packages/ce/src/App/Menu/Header/styles.module.scss index f08de9c0..c60279aa 100644 --- a/ui/packages/ce/src/App/Menu/Header/styles.module.scss +++ b/ui/packages/ce/src/App/Menu/Header/styles.module.scss @@ -20,6 +20,7 @@ height: 32px; color: inherit; text-decoration: none; + align-items: center; &.collapsed { justify-content: center; diff --git a/ui/packages/ce/src/App/Menu/StickyTopBar/index.tsx b/ui/packages/ce/src/App/Menu/StickyTopBar/index.tsx index 9f121b16..5390e0b8 100644 --- a/ui/packages/ce/src/App/Menu/StickyTopBar/index.tsx +++ b/ui/packages/ce/src/App/Menu/StickyTopBar/index.tsx @@ -100,7 +100,7 @@ export const StickyTopBar = () => { message: 'All DBLab SE features are now active.', type: 'success', }) - } else { + } else if (res.error?.message) { setSnackbarState({ isOpen: true, message: capitalizeFirstLetter(res?.error?.message), diff --git a/ui/packages/ce/src/api/branches/createBranch.ts b/ui/packages/ce/src/api/branches/createBranch.ts new file mode 100644 index 00000000..90d38927 --- /dev/null +++ b/ui/packages/ce/src/api/branches/createBranch.ts @@ -0,0 +1,26 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { request } from 'helpers/request' + +import { CreateBranchFormValues } from '@postgres.ai/shared/types/api/endpoints/createBranch' + +export const createBranch = async (req: CreateBranchFormValues) => { + const response = await request('/branch', { + method: 'POST', + body: JSON.stringify({ + branchName: req.branchName, + ...(req.baseBranch && { baseBranch: req.baseBranch }), + ...(req.snapshotID && { snapshotID: req.snapshotID }), + }), + }) + + return { + response: response.ok ? await response.json() : null, + error: response.ok ? null : response, + } +} diff --git a/ui/packages/ce/src/api/branches/deleteBranch.ts b/ui/packages/ce/src/api/branches/deleteBranch.ts new file mode 100644 index 00000000..ad019688 --- /dev/null +++ b/ui/packages/ce/src/api/branches/deleteBranch.ts @@ -0,0 +1,19 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { request } from 'helpers/request' + +export const deleteBranch = async (branchName: string) => { + const response = await request(`/branch/${branchName}`, { + method: 'DELETE' + }) + + return { + response: response.ok ? await response.json() : null, + error: response.ok ? null : await response.json(), + } +} diff --git a/ui/packages/ce/src/api/branches/getBranches.ts b/ui/packages/ce/src/api/branches/getBranches.ts new file mode 100644 index 00000000..c8185e23 --- /dev/null +++ b/ui/packages/ce/src/api/branches/getBranches.ts @@ -0,0 +1,18 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { request } from 'helpers/request' +import { formatBranchesDto } from '@postgres.ai/shared/types/api/endpoints/getBranches' + +export const getBranches = async () => { + const response = await request(`/branches`) + + return { + response: response.ok ? formatBranchesDto(await response.json()) : null, + error: response.ok ? null : response, + } +} diff --git a/ui/packages/ce/src/api/branches/getSnapshotList.ts b/ui/packages/ce/src/api/branches/getSnapshotList.ts new file mode 100644 index 00000000..46cd096d --- /dev/null +++ b/ui/packages/ce/src/api/branches/getSnapshotList.ts @@ -0,0 +1,19 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { request } from 'helpers/request' + +export const getSnapshotList = async (branchName: string) => { + const response = await request(`/branch/${branchName}/log`, { + method: 'GET' + }) + + return { + response: response.ok ? await response.json() : null, + error: response.ok ? null : response, + } +} diff --git a/ui/packages/ce/src/api/clones/createClone.ts b/ui/packages/ce/src/api/clones/createClone.ts index 5ca1f168..e3fbacd1 100644 --- a/ui/packages/ce/src/api/clones/createClone.ts +++ b/ui/packages/ce/src/api/clones/createClone.ts @@ -15,6 +15,7 @@ export const createClone: CreateClone = async (req) => { id: req.snapshotId, }, protected: req.isProtected, + ...(req.branch && { branch: req.branch }), db: { username: req.dbUser, password: req.dbPassword, diff --git a/ui/packages/ce/src/api/configs/updateConfig.ts b/ui/packages/ce/src/api/configs/updateConfig.ts index 9c40b4f1..093c11f3 100644 --- a/ui/packages/ce/src/api/configs/updateConfig.ts +++ b/ui/packages/ce/src/api/configs/updateConfig.ts @@ -1,7 +1,7 @@ import { postUniqueCustomOptions, postUniqueDatabases, -} from '@postgres.ai/shared/pages/Configuration/utils' +} from '@postgres.ai/shared/pages/Instance/Configuration/utils' import { Config } from '@postgres.ai/shared/types/api/entities/config' import { request } from 'helpers/request' diff --git a/ui/packages/ce/src/api/instances/fullRefresh.ts b/ui/packages/ce/src/api/instances/fullRefresh.ts new file mode 100644 index 00000000..bf63b240 --- /dev/null +++ b/ui/packages/ce/src/api/instances/fullRefresh.ts @@ -0,0 +1,22 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { request } from 'helpers/request' +import { FullRefresh } from "@postgres.ai/shared/types/api/endpoints/fullRefresh"; + +export const fullRefresh: FullRefresh = async () => { + const response = await request('/full-refresh', { + method: "POST", + }) + + const result = response.ok ? await response.json() : null + + return { + response: result, + error: response.ok ? null : response, + } +} diff --git a/ui/packages/ce/src/api/snapshots/createSnapshot.ts b/ui/packages/ce/src/api/snapshots/createSnapshot.ts new file mode 100644 index 00000000..212d6245 --- /dev/null +++ b/ui/packages/ce/src/api/snapshots/createSnapshot.ts @@ -0,0 +1,25 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { CreateSnapshot } from '@postgres.ai/shared/types/api/endpoints/createSnapshot' + +import { request } from 'helpers/request' + +export const createSnapshot: CreateSnapshot = async (cloneId, message) => { + const response = await request(`/branch/snapshot`, { + method: 'POST', + body: JSON.stringify({ + cloneID: cloneId, + ...(message && { message: message }), + }), + }) + + return { + response: response.ok ? await response.json() : null, + error: response.ok ? null : response, + } +} diff --git a/ui/packages/ce/src/api/snapshots/destroySnapshot.ts b/ui/packages/ce/src/api/snapshots/destroySnapshot.ts new file mode 100644 index 00000000..b076444f --- /dev/null +++ b/ui/packages/ce/src/api/snapshots/destroySnapshot.ts @@ -0,0 +1,19 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { request } from 'helpers/request' + +export const destroySnapshot = async (snapshotId: string, forceDelete: boolean) => { + const response = await request(`/snapshot/${snapshotId}?force=${forceDelete}`, { + method: 'DELETE' + }) + + return { + response: response.ok ? true : null, + error: response.ok ? null : response, + } +} diff --git a/ui/packages/ce/src/api/snapshots/getBranchSnapshot.ts b/ui/packages/ce/src/api/snapshots/getBranchSnapshot.ts new file mode 100644 index 00000000..26f0e2ce --- /dev/null +++ b/ui/packages/ce/src/api/snapshots/getBranchSnapshot.ts @@ -0,0 +1,17 @@ +/*-------------------------------------------------------------------------- + * Copyright (c) 2019-2021, Postgres.ai, Nikolay Samokhvalov nik@postgres.ai + * All Rights Reserved. Proprietary and confidential. + * Unauthorized copying of this file, via any medium is strictly prohibited + *-------------------------------------------------------------------------- + */ + +import { request } from 'helpers/request' + +export const getBranchSnapshot = async (snapshotId: string) => { + const response = await request(`/branch/snapshot/${snapshotId}`) + + return { + response: response.ok ? await response.json() : null, + error: response.ok ? null : response, + } +} diff --git a/ui/packages/ce/src/api/snapshots/getSnapshots.ts b/ui/packages/ce/src/api/snapshots/getSnapshots.ts index d9ae5fb4..b26788eb 100644 --- a/ui/packages/ce/src/api/snapshots/getSnapshots.ts +++ b/ui/packages/ce/src/api/snapshots/getSnapshots.ts @@ -13,7 +13,8 @@ import { import { request } from 'helpers/request' export const getSnapshots: GetSnapshots = async (req) => { - const response = await request('/snapshots') + const url = `/snapshots${req.branchName ? `?branch=${req.branchName}` : ''}`; + const response = await request(url); return { response: response.ok diff --git a/ui/packages/ce/src/components/NavPath/index.tsx b/ui/packages/ce/src/components/NavPath/index.tsx index 1b69baaa..c999e62d 100644 --- a/ui/packages/ce/src/components/NavPath/index.tsx +++ b/ui/packages/ce/src/components/NavPath/index.tsx @@ -19,6 +19,7 @@ export const NavPath = (props: Props) => {