diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 3187419d..c1f52afb 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -3,7 +3,7 @@ name: CI on: push: branches: - - '*' + - '**' paths-ignore: &pushPathsIgnore - .dockerignore - .editorconfig @@ -23,7 +23,7 @@ on: - mise.toml pull_request: branches: - - '*' + - '**' paths-ignore: *pushPathsIgnore types: - opened diff --git a/README.md b/README.md index ef48a024..ec78442c 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,8 @@ ## TO-DO +- [ ] Note service + - [ ] Routing kafka message based on metadata workspace id if partitioning or listening - [ ] Document service - [ ] Currently only hocuspocus guard the document, other like create/update/delete revision not check, bcs I'm lazy > If do, should create a guard outside of it diff --git a/api/bundled/note.json b/api/bundled/note.json index 8445b020..7b7c646e 100644 --- a/api/bundled/note.json +++ b/api/bundled/note.json @@ -1843,7 +1843,6 @@ }, "User_properties-id": { "type": "string", - "format": "number", "description": "User ID from Authentik (need to change subject mode to User's ID instead of hashed)", "example": "100" }, diff --git a/api/bundled/openapi.json b/api/bundled/openapi.json index 1cc39998..9cc35e74 100644 --- a/api/bundled/openapi.json +++ b/api/bundled/openapi.json @@ -1779,26 +1779,25 @@ }, { "properties": { + "userId": { + "type": "string" + }, "tags": { "type": "array", "items": { "type": "string" - }, - "example": [ - "work", - "meeting" - ] + } }, "outgoingLinkIds": { "type": "array", "items": { "type": "string", - "format": "uuid", - "example": "46f94d4d-41b5-4df6-bf8d-270fb9a49c97" + "format": "uuid" } } }, "required": [ + "userId", "tags", "outgoingLinkIds" ] @@ -1809,14 +1808,15 @@ "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/share_id", - "readOnly": false + "type": "string", + "format": "uuid" }, "name": { - "$ref": "#/components/schemas/share_name" + "type": "string" }, "icon": { - "$ref": "#/components/schemas/share_icon" + "type": "string", + "nullable": true } }, "required": [ @@ -1828,8 +1828,8 @@ "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/share_id", - "readOnly": false + "type": "string", + "format": "uuid" } }, "required": [ @@ -1840,26 +1840,21 @@ "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/share_id", - "readOnly": false + "type": "string", + "format": "uuid" }, "name": { - "$ref": "#/components/schemas/share_name" + "type": "string" }, "plainTextContent": { "type": "string", - "description": "Plain text content", - "example": "This is the content of the note" + "description": "Plain text content" }, "tags": { "type": "array", "items": { "type": "string" - }, - "example": [ - "tag1", - "tag2" - ] + } } }, "required": [ @@ -1868,14 +1863,48 @@ ] }, "share_NoteUpdatedEvent": { - "$ref": "#/components/schemas/share_Note" + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "name": { + "type": "string" + }, + "icon": { + "type": "string", + "nullable": true + }, + "folderId": { + "type": "string", + "format": "uuid" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + }, + "updatedAt": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "name", + "icon", + "folderId", + "tags", + "updatedAt" + ] }, "share_UserDeletedEvent": { "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/share_User_properties-id", - "readOnly": false + "$ref": "#/components/schemas/share_id" } }, "required": [ @@ -2069,111 +2098,6 @@ }, "share_id": { "type": "string", - "format": "uuid", - "example": "46f94d4d-41b5-4df6-bf8d-270fb9a49c97", - "readOnly": true - }, - "share_name": { - "type": "string", - "minLength": 1, - "maxLength": 255, - "example": "Meeting Notes", - "description": "Can be empty string when creating but will be set to \"Untitled Note\" internally" - }, - "share_icon": { - "type": "string", - "nullable": true, - "example": "๐Ÿ““" - }, - "share_properties-id": { - "type": "string", - "format": "uuid", - "example": "123e4567-e89b-12d3-a456-426614174000", - "readOnly": true - }, - "share_TrashedBy": { - "type": "string", - "enum": [ - "purpose", - "parent" - ], - "example": "purpose" - }, - "share_Note": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid", - "example": "46f94d4d-41b5-4df6-bf8d-270fb9a49c97", - "readOnly": true - }, - "name": { - "type": "string", - "minLength": 1, - "maxLength": 255, - "example": "Meeting Notes", - "description": "Can be empty string when creating but will be set to \"Untitled Note\" internally" - }, - "icon": { - "type": "string", - "nullable": true, - "example": "๐Ÿ““" - }, - "folderId": { - "$ref": "#/components/schemas/share_properties-id" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "example": [ - "work", - "meeting" - ], - "readOnly": true - }, - "updatedAt": { - "type": "string", - "format": "date-time", - "example": "2024-06-01T12:34:56Z", - "readOnly": true - }, - "trashed": { - "type": "object", - "nullable": true, - "readOnly": true, - "x-go-type-name": "NoteTrashed", - "properties": { - "trashedBy": { - "$ref": "#/components/schemas/share_TrashedBy" - }, - "trashedAt": { - "type": "string", - "format": "date-time", - "example": "2024-06-01T12:00:00Z" - } - }, - "required": [ - "trashedBy", - "trashedAt" - ] - } - }, - "required": [ - "id", - "name", - "icon", - "folderId", - "tags", - "updatedAt", - "trashed" - ] - }, - "share_User_properties-id": { - "type": "string", - "format": "number", "description": "User ID from Authentik (need to change subject mode to User's ID instead of hashed)", "example": "100" }, @@ -2832,7 +2756,6 @@ }, "note_User_properties-id": { "type": "string", - "format": "number", "description": "User ID from Authentik (need to change subject mode to User's ID instead of hashed)", "example": "100" }, diff --git a/api/bundled/share.json b/api/bundled/share.json index 395b30ca..53a783c4 100644 --- a/api/bundled/share.json +++ b/api/bundled/share.json @@ -19,26 +19,25 @@ }, { "properties": { + "userId": { + "type": "string" + }, "tags": { "type": "array", "items": { "type": "string" - }, - "example": [ - "work", - "meeting" - ] + } }, "outgoingLinkIds": { "type": "array", "items": { "type": "string", - "format": "uuid", - "example": "46f94d4d-41b5-4df6-bf8d-270fb9a49c97" + "format": "uuid" } } }, "required": [ + "userId", "tags", "outgoingLinkIds" ] @@ -49,14 +48,15 @@ "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/id", - "readOnly": false + "type": "string", + "format": "uuid" }, "name": { - "$ref": "#/components/schemas/name" + "type": "string" }, "icon": { - "$ref": "#/components/schemas/icon" + "type": "string", + "nullable": true } }, "required": [ @@ -68,8 +68,8 @@ "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/id", - "readOnly": false + "type": "string", + "format": "uuid" } }, "required": [ @@ -80,26 +80,21 @@ "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/id", - "readOnly": false + "type": "string", + "format": "uuid" }, "name": { - "$ref": "#/components/schemas/name" + "type": "string" }, "plainTextContent": { "type": "string", - "description": "Plain text content", - "example": "This is the content of the note" + "description": "Plain text content" }, "tags": { "type": "array", "items": { "type": "string" - }, - "example": [ - "tag1", - "tag2" - ] + } } }, "required": [ @@ -108,14 +103,48 @@ ] }, "NoteUpdatedEvent": { - "$ref": "#/components/schemas/Note" + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "name": { + "type": "string" + }, + "icon": { + "type": "string", + "nullable": true + }, + "folderId": { + "type": "string", + "format": "uuid" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + }, + "updatedAt": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "name", + "icon", + "folderId", + "tags", + "updatedAt" + ] }, "UserDeletedEvent": { "type": "object", "properties": { "id": { - "$ref": "#/components/schemas/User_properties-id", - "readOnly": false + "$ref": "#/components/schemas/id" } }, "required": [ @@ -309,111 +338,6 @@ }, "id": { "type": "string", - "format": "uuid", - "example": "46f94d4d-41b5-4df6-bf8d-270fb9a49c97", - "readOnly": true - }, - "name": { - "type": "string", - "minLength": 1, - "maxLength": 255, - "example": "Meeting Notes", - "description": "Can be empty string when creating but will be set to \"Untitled Note\" internally" - }, - "icon": { - "type": "string", - "nullable": true, - "example": "๐Ÿ““" - }, - "properties-id": { - "type": "string", - "format": "uuid", - "example": "123e4567-e89b-12d3-a456-426614174000", - "readOnly": true - }, - "TrashedBy": { - "type": "string", - "enum": [ - "purpose", - "parent" - ], - "example": "purpose" - }, - "Note": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid", - "example": "46f94d4d-41b5-4df6-bf8d-270fb9a49c97", - "readOnly": true - }, - "name": { - "type": "string", - "minLength": 1, - "maxLength": 255, - "example": "Meeting Notes", - "description": "Can be empty string when creating but will be set to \"Untitled Note\" internally" - }, - "icon": { - "type": "string", - "nullable": true, - "example": "๐Ÿ““" - }, - "folderId": { - "$ref": "#/components/schemas/properties-id" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "example": [ - "work", - "meeting" - ], - "readOnly": true - }, - "updatedAt": { - "type": "string", - "format": "date-time", - "example": "2024-06-01T12:34:56Z", - "readOnly": true - }, - "trashed": { - "type": "object", - "nullable": true, - "readOnly": true, - "x-go-type-name": "NoteTrashed", - "properties": { - "trashedBy": { - "$ref": "#/components/schemas/TrashedBy" - }, - "trashedAt": { - "type": "string", - "format": "date-time", - "example": "2024-06-01T12:00:00Z" - } - }, - "required": [ - "trashedBy", - "trashedAt" - ] - } - }, - "required": [ - "id", - "name", - "icon", - "folderId", - "tags", - "updatedAt", - "trashed" - ] - }, - "User_properties-id": { - "type": "string", - "format": "number", "description": "User ID from Authentik (need to change subject mode to User's ID instead of hashed)", "example": "100" } diff --git a/api/common/components/schemas/User.yaml b/api/common/components/schemas/User.yaml index 1fd8cd6d..52d280af 100644 --- a/api/common/components/schemas/User.yaml +++ b/api/common/components/schemas/User.yaml @@ -4,7 +4,6 @@ type: object properties: id: type: string - format: number description: User ID from Authentik (need to change subject mode to User's ID instead of hashed) example: '100' diff --git a/api/redocly.yaml b/api/redocly.yaml index d1f7ba0a..5a208ba2 100644 --- a/api/redocly.yaml +++ b/api/redocly.yaml @@ -13,6 +13,7 @@ apis: rules: struct: off no-empty-servers: off + scalar-property-missing-example: off extends: - recommended rules: diff --git a/api/share/components/schemas/DocumentCommittedEvent.yaml b/api/share/components/schemas/DocumentCommittedEvent.yaml index de1bd021..b952dbec 100644 --- a/api/share/components/schemas/DocumentCommittedEvent.yaml +++ b/api/share/components/schemas/DocumentCommittedEvent.yaml @@ -2,17 +2,21 @@ allOf: - $ref: ../../../document/components/schemas/Document.yaml - properties: + userId: + type: string + tags: type: array items: type: string - example: [work, meeting] + outgoingLinkIds: type: array items: type: string format: uuid - example: 46f94d4d-41b5-4df6-bf8d-270fb9a49c97 + required: + - userId - tags - outgoingLinkIds diff --git a/api/share/components/schemas/EventBase.yaml.bak b/api/share/components/schemas/EventBase.yaml.bak deleted file mode 100644 index bff0a420..00000000 --- a/api/share/components/schemas/EventBase.yaml.bak +++ /dev/null @@ -1,29 +0,0 @@ -# vim: ft=yaml.openapi -type: object - -properties: - id: - type: string - format: uuid - example: 3deadc5b-bde7-4b64-9c3b-1bc534378f36 - - metadata: - type: object - properties: - timestamp: - type: string - format: date-time - example: 2024-01-01T12:00:00Z - correlationId: - type: string - example: ec601c67-209c-4e28-977b-30ea3e24a963 - required: - - timestamp - - data: - type: object - -required: - - id - - metadata - - data diff --git a/api/share/components/schemas/NoteCreatedEvent.yaml b/api/share/components/schemas/NoteCreatedEvent.yaml index f280b130..b7a8f70e 100644 --- a/api/share/components/schemas/NoteCreatedEvent.yaml +++ b/api/share/components/schemas/NoteCreatedEvent.yaml @@ -3,12 +3,15 @@ type: object properties: id: - $ref: ../../../note/components/schemas/Note.yaml#/properties/id - readOnly: false + type: string + format: uuid + name: - $ref: ../../../note/components/schemas/Note.yaml#/properties/name + type: string + icon: - $ref: ../../../note/components/schemas/Note.yaml#/properties/icon + type: string + nullable: true required: - id diff --git a/api/share/components/schemas/NoteDeletedEvent.yaml b/api/share/components/schemas/NoteDeletedEvent.yaml index b71cb169..f8b01379 100644 --- a/api/share/components/schemas/NoteDeletedEvent.yaml +++ b/api/share/components/schemas/NoteDeletedEvent.yaml @@ -3,8 +3,8 @@ type: object properties: id: - $ref: ../../../note/components/schemas/Note.yaml#/properties/id - readOnly: false + type: string + format: uuid required: - id diff --git a/api/share/components/schemas/NoteSearch.yaml b/api/share/components/schemas/NoteSearch.yaml index a2bd488c..9c541353 100644 --- a/api/share/components/schemas/NoteSearch.yaml +++ b/api/share/components/schemas/NoteSearch.yaml @@ -3,22 +3,20 @@ type: object properties: id: - $ref: ../../../note/components/schemas/Note.yaml#/properties/id - readOnly: false + type: string + format: uuid name: - $ref: ../../../note/components/schemas/Note.yaml#/properties/name + type: string plainTextContent: type: string description: Plain text content - example: This is the content of the note tags: type: array items: type: string - example: ['tag1', 'tag2'] required: - id diff --git a/api/share/components/schemas/NoteUpdatedEvent.yaml b/api/share/components/schemas/NoteUpdatedEvent.yaml index a19b7923..fbad2372 100644 --- a/api/share/components/schemas/NoteUpdatedEvent.yaml +++ b/api/share/components/schemas/NoteUpdatedEvent.yaml @@ -1,2 +1,35 @@ # vim: ft=yaml.openapi -$ref: ../../../note/components/schemas/Note.yaml +type: object + +properties: + id: + type: string + format: uuid + + name: + type: string + + icon: + type: string + nullable: true + + folderId: + type: string + format: uuid + + tags: + type: array + items: + type: string + + updatedAt: + type: string + format: date-time + +required: + - id + - name + - icon + - folderId + - tags + - updatedAt diff --git a/api/share/components/schemas/UserDeletedEvent.yaml b/api/share/components/schemas/UserDeletedEvent.yaml index 2da836bd..4ba2846b 100644 --- a/api/share/components/schemas/UserDeletedEvent.yaml +++ b/api/share/components/schemas/UserDeletedEvent.yaml @@ -4,7 +4,6 @@ type: object properties: id: $ref: ../../../common/components/schemas/User.yaml#/properties/id - readOnly: false required: - id diff --git a/apps/document/.env.development-mon b/apps/document/.env.development-mon index f605a6b0..580d2f54 100644 --- a/apps/document/.env.development-mon +++ b/apps/document/.env.development-mon @@ -4,4 +4,3 @@ OTEL_SERVICE_NAME="document" OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:4317" OTEL_EXPORTER_OTLP_PROTOCOL="grpc" OTEL_TRACES_SAMPLER="always_on" -OTEL_NODE_ENABLED_INSTRUMENTATIONS="grpc,http,kafkajs,pino,pg,runtime-node" diff --git a/apps/document/src/otel.ts b/apps/document/src/otel.ts index 0ffb8b01..cd7a2c42 100644 --- a/apps/document/src/otel.ts +++ b/apps/document/src/otel.ts @@ -4,6 +4,9 @@ import { NodeSDK } from '@opentelemetry/sdk-node'; if (process.env.OTEL_SDK_DISABLED === undefined) { process.env.OTEL_SDK_DISABLED = 'true'; process.env.OTEL_NODE_DISABLED_INSTRUMENTATIONS = 'all'; +} else { + process.env.OTEL_NODE_ENABLED_INSTRUMENTATIONS = + 'grpc,http,kafkajs,pino,pg,runtime-node'; } export const otelSdk = new NodeSDK({ diff --git a/apps/search-worker/.env.development-mon b/apps/search-worker/.env.development-mon index 0dcd2037..817ce764 100644 --- a/apps/search-worker/.env.development-mon +++ b/apps/search-worker/.env.development-mon @@ -4,4 +4,3 @@ OTEL_SERVICE_NAME="search-worker" OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:4317" OTEL_EXPORTER_OTLP_PROTOCOL="grpc" OTEL_TRACES_SAMPLER="always_on" -OTEL_NODE_ENABLED_INSTRUMENTATIONS="kafkajs,pino,runtime-node" diff --git a/apps/search-worker/src/otel.ts b/apps/search-worker/src/otel.ts index 0ffb8b01..3966521b 100644 --- a/apps/search-worker/src/otel.ts +++ b/apps/search-worker/src/otel.ts @@ -4,6 +4,8 @@ import { NodeSDK } from '@opentelemetry/sdk-node'; if (process.env.OTEL_SDK_DISABLED === undefined) { process.env.OTEL_SDK_DISABLED = 'true'; process.env.OTEL_NODE_DISABLED_INSTRUMENTATIONS = 'all'; +} else { + process.env.OTEL_NODE_ENABLED_INSTRUMENTATIONS = 'kafkajs,pino,runtime-node'; } export const otelSdk = new NodeSDK({ diff --git a/cmd/note/gen-jet.sh b/cmd/note/gen-jet.sh deleted file mode 100755 index c946a662..00000000 --- a/cmd/note/gen-jet.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -set -e - -CONTAINER_NAME='notopia-note-jet-gen-db' -DB_NAME='pgjet' -DB_USER='postgres' -DB_PASS='postgres' -DB_PORT='15433' -OUTPUT_DIR="../../internal/note/infra/persistence/" - -echo "๐Ÿš€ Starting ephemeral Postgres $CONTAINER_NAME on port $DB_PORT..." -CONTAINER_ID=$(docker run --rm -d \ - --name "$CONTAINER_NAME" \ - -e POSTGRES_PASSWORD=$DB_PASS \ - -e POSTGRES_DB=$DB_NAME \ - -p $DB_PORT:5432 \ - postgres:18.1-alpine3.23) - -cleanup() { - echo "๐Ÿงน Cleaning up container..." - docker stop "$CONTAINER_ID" >/dev/null -} -trap cleanup EXIT - -echo "Wait for Postgres to be ready..." -until docker exec "$CONTAINER_ID" pg_isready -U "$DB_USER" >/dev/null 2>&1; do - sleep 1 -done - -until docker exec "$CONTAINER_ID" psql -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1" >/dev/null 2>&1; do - sleep 1 -done - -echo "๐Ÿ“ฆ Applying migrations..." -export GOOSE_DRIVER=postgres -export GOOSE_DBSTRING="host=127.0.0.1 port=$DB_PORT user=$DB_USER password=$DB_PASS dbname=$DB_NAME sslmode=disable" -export GOOSE_MIGRATION_DIR=../../internal/note/infra/persistence/pgmigration/ -goose up - -echo "โœˆ๏ธ Generating Jet code..." -jet -source=postgres \ - -host=127.0.0.1 \ - -port="$DB_PORT" \ - -user="$DB_USER" \ - -password="$DB_PASS" \ - -dbname="$DB_NAME" \ - -schema=public \ - -path="$OUTPUT_DIR" - -echo "โœ… Success! Code generated in $OUTPUT_DIR" diff --git a/cmd/note/project.json b/cmd/note/project.json index 68abe4e4..625bdde6 100644 --- a/cmd/note/project.json +++ b/cmd/note/project.json @@ -137,18 +137,8 @@ }, "cache": true }, - "gen:jet": { - "inputs": ["migrations"], - "outputs": ["{workspaceRoot}/internal/note/infra/persistence/pgjet/*.go"], - "executor": "nx:run-commands", - "options": { - "command": "./gen-jet.sh", - "cwd": "{projectRoot}" - }, - "cache": true - }, "gen": { - "dependsOn": ["gen:wire", "gen:sqlc", "gen:jet"] + "dependsOn": ["gen:wire", "gen:sqlc"] }, "nx-release-publish": { "parallelism": false diff --git a/cmd/note/sqlc.yaml b/cmd/note/sqlc.yaml index de7eb97c..02fcd209 100644 --- a/cmd/note/sqlc.yaml +++ b/cmd/note/sqlc.yaml @@ -1,35 +1,51 @@ # yaml-language-server: $schema=https://www.schemastore.org/sqlc-2.0.json version: '2' +plugins: + - name: golang + wasm: + url: https://github.com/vtuanjs/sqlc-gen-go/releases/download/v2.3.0-stable/sqlc-gen-go.wasm + sha256: 9b33820707e741e68ef49000189455ee4a3985a96a29e3e665d766709fdf5007 + sql: - engine: postgresql schema: - ../../internal/note/infra/persistence/pgmigration/ - ../../internal/note/infra/persistence/pgsqlc/temp_tables.sql queries: ../../internal/note/infra/persistence/pgsqlc/ - gen: - go: - emit_result_struct_pointers: true - emit_params_struct_pointers: true - emit_pointers_for_null_types: true - emit_enum_valid_method: true - emit_all_enum_values: true + codegen: + - plugin: golang out: ../../internal/note/infra/persistence/pgsqlc/ - sql_driver: github.com/jackc/pgx/v5 - sql_package: pgx/v5 - initialisms: - - id - - url - rename: - source_ids: SourceIDs - target_ids: TargetIDs + options: + emit_result_struct_pointers: true + emit_params_struct_pointers: true + emit_pointers_for_null_types: true + emit_enum_valid_method: true + emit_all_enum_values: true + sql_driver: github.com/jackc/pgx/v5 + sql_package: pgx/v5 + initialisms: + - id + - url + rename: + source_ids: SourceIDs + target_ids: TargetIDs + package: pgsqlc + emit_dynamic_filter: true + emit_tracing: + import: go.opentelemetry.io/otel + package: otel + code: + - ctx, span := otel.Tracer("{{.StructName}}").Start(ctx, "{{.MethodName}}") + - defer span.End() + rules: - sqlc/db-prepare - postgresql-query-too-costly - postgresql-no-seq-scan overrides: - go: + go: &golangOptions rename: ids: IDs urls: URLs @@ -66,6 +82,9 @@ overrides: type: string pointer: true +options: + golang: *golangOptions + rules: - name: postgresql-query-too-costly message: Query cost estimate is too high diff --git a/cmd/note/wire_gen.go b/cmd/note/wire_gen.go index 0aac56ec..6d99ffb4 100644 --- a/cmd/note/wire_gen.go +++ b/cmd/note/wire_gen.go @@ -13,15 +13,19 @@ import ( "github.com/notopia-uit/notopia/internal/note/app" "github.com/notopia-uit/notopia/internal/note/component" "github.com/notopia-uit/notopia/internal/note/config" + "github.com/notopia-uit/notopia/internal/note/controller/event" "github.com/notopia-uit/notopia/internal/note/controller/grpc" "github.com/notopia-uit/notopia/internal/note/controller/health" "github.com/notopia-uit/notopia/internal/note/controller/http" - "github.com/notopia-uit/notopia/internal/note/controller/integrationevent" "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/infra/common" + "github.com/notopia-uit/notopia/internal/note/infra/integrationpublisher" + "github.com/notopia-uit/notopia/internal/note/infra/outbox" "github.com/notopia-uit/notopia/internal/note/infra/persistence" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pg" - "github.com/notopia-uit/notopia/internal/note/infra/pubsub" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgreadmodel" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgrepo" "github.com/notopia-uit/notopia/internal/note/infra/service" + "github.com/notopia-uit/notopia/internal/note/infra/workspaceevent" "github.com/notopia-uit/notopia/pkg/common/http" "github.com/notopia-uit/notopia/pkg/logging" "github.com/notopia-uit/notopia/pkg/otel" @@ -30,84 +34,122 @@ import ( // Injectors from wire.go: func InitializeServer(ctx context.Context) (*note.Server, func(), error) { - validate := components.ProvideValidate() + serviceName := _wireServiceNameValue + serviceVersion := _wireServiceVersionValue + resource, err := otel.NewResource(ctx, serviceName, serviceVersion) + if err != nil { + return nil, nil, err + } + tracerProvider, cleanup, err := otel.NewTracerProvider(ctx, resource) + if err != nil { + return nil, nil, err + } + validate := component.ProvideValidate() viper := config.NewViper() configConfig, err := config.New(validate, viper) if err != nil { + cleanup() return nil, nil, err } - log := &configConfig.Log - stdoutHandler := logging.NewStdoutHandler(log) - serviceName := _wireServiceNameValue - serviceVersion := _wireServiceVersionValue - resource, err := otel.NewResource(ctx, serviceName, serviceVersion) + sql := &configConfig.Database + pool, cleanup2, err := persistence.NewPgPool(ctx, tracerProvider, sql) if err != nil { + cleanup() return nil, nil, err } - loggerProvider, cleanup, err := otel.NewLoggerProvider(ctx, resource) + db := persistence.NewPgxPoolStdlib(pool) + log := &configConfig.Log + stdoutHandler := logging.NewStdoutHandler(log) + loggerProvider, cleanup3, err := otel.NewLoggerProvider(ctx, resource) if err != nil { + cleanup2() + cleanup() return nil, nil, err } slogHandler := otel.NewSlogHandler(serviceName, loggerProvider) logger := logging.New(stdoutHandler, slogHandler, log) - ginSlogHandlerFunc := commonhttp.NewGinSlogHandler(log, logger) - otelGinHandlerFunc := commonhttp.NewOtelGinHandler(serviceName) - engine := commonhttp.NewGin(ginSlogHandlerFunc, otelGinHandlerFunc) - services := &configConfig.Services - loggingLogger := otel.MapSlogToGRPCMiddlewareLogger(logger) - authorization, cleanup2, err := service.NewAuthorization(services, loggingLogger) + provider, err := persistence.NewGooseProvider(db, logger) if err != nil { + cleanup3() + cleanup2() cleanup() return nil, nil, err } - tracerProvider, cleanup3, err := otel.NewTracerProvider(ctx, resource) + pg, err := persistence.NewPg(pool, provider) if err != nil { + cleanup3() cleanup2() cleanup() return nil, nil, err } - sql := &configConfig.Database - pool, cleanup4, err := pg.NewPgPool(ctx, tracerProvider, sql) + ginSlogHandlerFunc := commonhttp.NewGinSlogHandler(log, logger) + otelGinHandlerFunc := commonhttp.NewOtelGinHandler(serviceName) + engine := commonhttp.NewGin(ginSlogHandlerFunc, otelGinHandlerFunc) + services := &configConfig.Services + loggingLogger := otel.MapSlogToGRPCMiddlewareLogger(logger) + authorization, cleanup4, err := service.NewAuthorization(services, loggingLogger) if err != nil { cleanup3() cleanup2() cleanup() return nil, nil, err } - queries := pg.NewQueries(pool) - db := pg.NewStdlib(pool) - folder := pg.NewNoTransactionFolder(pool, queries, db) + queries := persistence.NewSQLCQueries(pool) + advanced := &configConfig.Advanced + domainEvent := advanced.DomainEvent + loggerAdapter := component.NewWatermillLogger(logger) + configDomainEvent := &advanced.DomainEvent + defaultPostgreSQLSchema := outbox.NewSchemaAdapter(configDomainEvent) + fromPersistenceToQSLForwarder := outbox.NewFromPersistenceToQSLForwarder(domainEvent, loggerAdapter, defaultPostgreSQLSchema) + runInTx := pgrepo.NewRunInTx(fromPersistenceToQSLForwarder) + folder := pgrepo.NewNoTransactionFolder(pool, queries, runInTx) createFolderHandler := app.NewCreateFolderHandler(authorization, folder) - pgNote := pg.NewNoTransactionNote(pool, queries, db) - createNoteHandler := app.NewCreateNoteHandler(authorization, pgNote, folder) - workspace := pg.NewNoTransactionWorkspace(pool, queries, db) - unitOfWork := pg.NewUnitOfWork(queries, db) - createWorkspaceHandler := app.NewCreateWorkspaceHandler(workspace, folder, unitOfWork) - permanentlyDeleteFolderHandler := app.PermanentlyNewDeleteFolderHandler(authorization, folder) - permanentlyDeleteNoteHandler := app.PermanentlyNewDeleteNoteHandler(authorization, pgNote) - deleteWorkspaceHandler := app.NewDeleteWorkspaceHandler(authorization, workspace) - generateDailyNoteHandler := app.NewGenerateDailyNoteHandler(pgNote, folder, workspace) - moveWorkspaceItemsHandler := app.NewMoveWorkspaceItemsHandler(authorization, pgNote, folder, unitOfWork) - permanentlyDeleteWorkspaceItemsHandler := app.NewPermanentlyDeleteWorkspaceItemsHandler(authorization, pgNote, folder) - publishNoteHandler := app.NewPublishNoteHandler(pgNote) + pgrepoNote := pgrepo.NewNoTransactionNote(pool, queries, runInTx) + createNoteHandler := app.NewCreateNoteHandler(authorization, pgrepoNote, folder) + workspace := pgrepo.NewNoTransactionWorkspace(pool, queries, runInTx) + unitOfWork := pgrepo.NewUnitOfWork(pool, fromPersistenceToQSLForwarder, runInTx) + createWorkspaceHandler := app.NewCreateWorkspaceHandler(workspace, folder, unitOfWork, authorization) + permanentlyDeleteFolderHandler := app.PermanentlyNewDeleteFolderHandler(authorization, folder, unitOfWork) + permanentlyDeleteNoteHandler := app.PermanentlyNewDeleteNoteHandler(authorization, pgrepoNote, unitOfWork) + deleteWorkspaceHandler := app.NewDeleteWorkspaceHandler(authorization, workspace, unitOfWork) + moveWorkspaceItemsHandler := app.NewMoveWorkspaceItemsHandler(authorization, pgrepoNote, folder, unitOfWork) + permanentlyDeleteWorkspaceItemsHandler := app.NewPermanentlyDeleteWorkspaceItemsHandler(authorization, unitOfWork) + publishNoteHandler := app.NewPublishNoteHandler(pgrepoNote) publishWorkspaceHandler := app.NewPublishWorkspaceHandler(workspace) - renameFolderHandler := app.NewRenameFolderHandler(authorization, folder) - renameNoteHandler := app.NewRenameNoteHandler(authorization, pgNote) - renameWorkspaceHandler := app.NewRenameWorkspaceHandler(authorization, workspace) + renameFolderHandler := app.NewRenameFolderHandler(authorization, folder, unitOfWork) + renameNoteHandler := app.NewRenameNoteHandler(authorization, pgrepoNote, unitOfWork) + renameWorkspaceHandler := app.NewRenameWorkspaceHandler(authorization, workspace, unitOfWork) trashService := domain.NewTrashService() - restoreTrashedWorkspaceItemsHandler := app.NewRestoreTrashedWorkspaceItemsHandler(pgNote, folder, trashService) + restoreTrashedWorkspaceItemsHandler := app.NewRestoreTrashedWorkspaceItemsHandler(authorization, pgrepoNote, folder, trashService, unitOfWork) trashWorkspaceItemsHandler := app.NewTrashWorkspaceItemsHandler(authorization, unitOfWork, trashService) - unpublishNoteHandler := app.NewUnpublishNoteHandler(pgNote) + unpublishNoteHandler := app.NewUnpublishNoteHandler(pgrepoNote) unpublishWorkspaceHandler := app.NewUnpublishWorkspaceHandler(workspace) - updateWorkspaceMembersHandler := app.NewUpdateWorkspaceMembersHandler() - commandHandlers := &app.CommandHandlers{ + kafka := &configConfig.Kafka + watermillKafkaTracer := otel.NewOTELSaramaTracer(tracerProvider) + kafkaPublisher, err := common.NewKafkaPublisher(kafka, loggerAdapter, watermillKafkaTracer) + if err != nil { + cleanup4() + cleanup3() + cleanup2() + cleanup() + return nil, nil, err + } + integrationPublisher, err := integrationpublisher.NewIntegrationPublisher(kafkaPublisher) + if err != nil { + cleanup4() + cleanup3() + cleanup2() + cleanup() + return nil, nil, err + } + updateWorkspaceMembersHandler := app.NewUpdateWorkspaceMembersHandler(integrationPublisher) + cmds := &app.Cmds{ CreateFolderHandler: createFolderHandler, CreateNoteHandler: createNoteHandler, CreateWorkspaceHandler: createWorkspaceHandler, DeleteFolderHandler: permanentlyDeleteFolderHandler, DeleteNoteHandler: permanentlyDeleteNoteHandler, DeleteWorkspaceHandler: deleteWorkspaceHandler, - GenerateDailyNoteHandler: generateDailyNoteHandler, MoveWorkspaceItemsHandler: moveWorkspaceItemsHandler, PermanentlyDeleteWorkspaceItemsHandler: permanentlyDeleteWorkspaceItemsHandler, PublishNoteHandler: publishNoteHandler, @@ -122,21 +164,42 @@ func InitializeServer(ctx context.Context) (*note.Server, func(), error) { UpdateWorkspaceMembersHandler: updateWorkspaceMembersHandler, } noteService := domain.NewNoteService() - documentCommittedHandler := app.NewDocumentCommittedHandler(pgNote, noteService) - integrationEventHandlers := &app.IntegrationEventHandlers{ - DocumentCommittedHandler: documentCommittedHandler, + documentCommittedHandler := app.NewDocumentCommittedHandler(pgrepoNote, noteService) + workspaceEvent := &advanced.WorkspaceEvent + redis := &configConfig.Redis + redisClient, cleanup5 := workspaceevent.NewRedisClient(ctx, redis, logger) + workspaceEventHub, err := workspaceevent.NewWorkspaceEventHub(workspaceEvent, loggerAdapter, redisClient) + if err != nil { + cleanup5() + cleanup4() + cleanup3() + cleanup2() + cleanup() + return nil, nil, err + } + notifyWorkspaceItemsUpdatedHandler := app.NewNotifyWorkspaceItemsUpdatedHandler(workspaceEventHub) + events := &app.Events{ + DocumentCommittedHandler: documentCommittedHandler, + NotifyWorkspaceItemsUpdated: notifyWorkspaceItemsUpdatedHandler, } - readModel := pg.NewReadModel(queries) - checkWorkspaceSlugExistsHandler := app.NewCheckWorkspaceSlugExistsHandler(readModel) - getNoteGraphHandler := app.NewGetNoteGraphHandler(readModel) - getNoteHandler := app.NewGetNoteHandler(authorization, pgNote, readModel) - getNoteLinksHandler := app.NewGetNoteLinksHandler(readModel) - getWorkspaceGraphHandler := app.NewGetWorkspaceGraphHandler(readModel) - getWorkspaceHandler := app.NewGetWorkspaceBySlugHandler(readModel) + checkWorkspaceSlugExists := pgreadmodel.NewCheckWorkspaceSlugExists(queries) + checkWorkspaceSlugExistsHandler := app.NewCheckWorkspaceSlugExistsHandler(checkWorkspaceSlugExists) + noteGraph := pgreadmodel.NewNoteGraph(queries) + getNoteGraphHandler := app.NewGetNoteGraphHandler(authorization, pgrepoNote, noteGraph) + pgreadmodelNote := pgreadmodel.GetNote(queries) + getNoteHandler := app.NewGetNoteHandler(authorization, pgrepoNote, pgreadmodelNote) + noteLinks := pgreadmodel.GetNoteLinks(queries) + getNoteLinksHandler := app.NewGetNoteLinksHandler(authorization, pgrepoNote, noteLinks) + workspaceGraph := pgreadmodel.GetWorkspaceGraph(queries) + getWorkspaceGraphHandler := app.NewGetWorkspaceGraphHandler(authorization, workspaceGraph) + workspaceBySlug := pgreadmodel.NewWorkspaceBySlug(queries) + getWorkspaceHandler := app.NewGetWorkspaceBySlugHandler(authorization, workspaceBySlug) getWorkspaceMembersHandler := app.NewGetWorkspaceMembersHandler() - getWorkspaceTreeHandler := app.NewGetWorkspaceTreeHandler(readModel) - showTrashHandler := app.NewShowTrashHandler(readModel) - queryHandlers := &app.QueryHandlers{ + workspaceTree := pgreadmodel.NewWorkspaceTree(queries) + getWorkspaceTreeHandler := app.NewGetWorkspaceTreeHandler(authorization, workspaceTree) + showTrash := pgreadmodel.NewShowTrash(queries) + showTrashHandler := app.NewShowTrashHandler(authorization, showTrash) + appQueries := &app.Queries{ CheckWorkspaceSlugExistsHandler: checkWorkspaceSlugExistsHandler, GetNoteGraphHandler: getNoteGraphHandler, GetNoteHandler: getNoteHandler, @@ -147,50 +210,16 @@ func InitializeServer(ctx context.Context) (*note.Server, func(), error) { GetWorkspaceTreeHandler: getWorkspaceTreeHandler, ShowTrashHandler: showTrashHandler, } - kafka := &configConfig.Kafka - loggerAdapter := pubsub.NewWatermillLogger(logger) - commonconfigKafka := configConfig.Kafka - watermillKafkaTracer := otel.NewOTELSaramaTracer(tracerProvider) - kafkaPublisher, err := pubsub.NewKafkaPublisher(commonconfigKafka, loggerAdapter, watermillKafkaTracer) - if err != nil { - cleanup4() - cleanup3() - cleanup2() - cleanup() - return nil, nil, err - } - commandEventMarshaler := pubsub.NewIntegrationMarshaler() - integrationPubSub, err := pubsub.NewIntegrationPubSub(kafka, loggerAdapter, kafkaPublisher, watermillKafkaTracer, commandEventMarshaler) - if err != nil { - cleanup4() - cleanup3() - cleanup2() - cleanup() - return nil, nil, err - } - redis := &configConfig.Redis - redisClient, cleanup5 := pubsub.NewRedisClient(ctx, redis, logger) - workspaceEventInternalPubSub, err := pubsub.NewWorkspaceEventInternalPubSub(loggerAdapter, commandEventMarshaler, redisClient) - if err != nil { - cleanup5() - cleanup4() - cleanup3() - cleanup2() - cleanup() - return nil, nil, err - } - workspaceEventHubPubSub := pubsub.NewWorkspaceEventHubPubSub(loggerAdapter) - workspaceEvent := pubsub.NewWorkspaceEvent(workspaceEventInternalPubSub, workspaceEventHubPubSub) - provider, err := persistence.NewGooseProvider(db, logger) - if err != nil { - cleanup5() - cleanup4() - cleanup3() - cleanup2() - cleanup() - return nil, nil, err + server := &app.Server{ + Cmds: cmds, + Events: events, + Queries: appQueries, + WorkspaceEventHub: workspaceEventHub, } - persistencePg, err := persistence.NewPg(pool, provider) + configServer := &configConfig.Server + strictHandler := http.NewStrictHandler(server, configServer, workspaceEventHub) + serverInterface := http.NewHandler(strictHandler) + httpHTTP, cleanup6, err := http.New(ctx, engine, serverInterface, configServer, logger) if err != nil { cleanup5() cleanup4() @@ -199,19 +228,10 @@ func InitializeServer(ctx context.Context) (*note.Server, func(), error) { cleanup() return nil, nil, err } - server := &app.Server{ - CommandHandlers: commandHandlers, - IntegrationEventHandlers: integrationEventHandlers, - QueryHandlers: queryHandlers, - IntegrationPubSub: integrationPubSub, - WorkspaceEventPubSub: workspaceEvent, - Persistence: persistencePg, - } - configServer := &configConfig.Server - strictHandler := http.NewStrictHandler(server, configServer, workspaceEvent) - serverInterface := http.NewHandler(strictHandler) - httpHTTP, cleanup6, err := http.New(ctx, engine, serverInterface, configServer, logger) + serviceServer := grpc.NewServiceServer(server) + grpcGRPC, cleanup7, err := grpc.New(ctx, serviceServer, configServer, loggingLogger) if err != nil { + cleanup6() cleanup5() cleanup4() cleanup3() @@ -219,9 +239,10 @@ func InitializeServer(ctx context.Context) (*note.Server, func(), error) { cleanup() return nil, nil, err } - serviceServer := grpc.NewServiceServer(server) - grpcGRPC, cleanup7, err := grpc.New(ctx, serviceServer, configServer, loggingLogger) + jsonMarshaler := component.NewWatermillJsonMarshaler() + eventEvent, err := event.NewEvent(kafka, server, watermillKafkaTracer, loggerAdapter, jsonMarshaler, configDomainEvent) if err != nil { + cleanup7() cleanup6() cleanup5() cleanup4() @@ -230,7 +251,7 @@ func InitializeServer(ctx context.Context) (*note.Server, func(), error) { cleanup() return nil, nil, err } - integrationEvent, err := integrationevent.NewIntegrationEvent(integrationPubSub, server) + outboxOutbox, err := outbox.NewOutbox(kafkaPublisher, loggerAdapter, defaultPostgreSQLSchema, pool) if err != nil { cleanup7() cleanup6() @@ -241,7 +262,7 @@ func InitializeServer(ctx context.Context) (*note.Server, func(), error) { cleanup() return nil, nil, err } - healthHealth := health.New(persistencePg, configServer, workspaceEvent) + healthHealth := health.New(pg, configServer, workspaceEventHub, redisClient) meterProvider, cleanup8, err := otel.NewMeterProvider(ctx, resource) if err != nil { cleanup7() @@ -254,7 +275,7 @@ func InitializeServer(ctx context.Context) (*note.Server, func(), error) { return nil, nil, err } global := otel.ProvideGlobal(loggerProvider, meterProvider, tracerProvider) - noteServer := note.NewServer(httpHTTP, grpcGRPC, integrationEvent, healthHealth, server, logger, global) + noteServer := note.NewServer(pg, httpHTTP, grpcGRPC, eventEvent, workspaceEventHub, outboxOutbox, healthHealth, logger, global) return noteServer, func() { cleanup8() cleanup7() diff --git a/go.mod b/go.mod index 7f494932..a7de91db 100644 --- a/go.mod +++ b/go.mod @@ -9,23 +9,23 @@ require ( github.com/ThreeDotsLabs/watermill v1.5.1 github.com/ThreeDotsLabs/watermill-kafka/v3 v3.1.2 github.com/ThreeDotsLabs/watermill-redisstream v1.4.5 + github.com/ThreeDotsLabs/watermill-sql/v4 v4.1.3 github.com/alexliesenfeld/health v0.8.1 + github.com/bep/debounce v1.2.1 github.com/casbin/casbin/v3 v3.10.0 github.com/casbin/gorm-adapter/v3 v3.41.0 github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0 github.com/exaring/otelpgx v0.10.0 - github.com/getkin/kin-openapi v0.134.0 + github.com/getkin/kin-openapi v0.133.0 github.com/gin-contrib/slog v1.2.1 github.com/gin-gonic/gin v1.12.0 - github.com/go-jet/jet/v2 v2.14.1 github.com/go-playground/validator/v10 v10.30.2 github.com/goforj/wire v1.1.0 github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 github.com/hellofresh/health-go/v5 v5.5.5 github.com/jackc/pgerrcode v0.0.0-20250907135507-afb5586c32a6 - github.com/jackc/pgx/v5 v5.9.1 - github.com/lib/pq v1.10.9 + github.com/jackc/pgx/v5 v5.8.0 github.com/nkonev/watermill-opentelemetry v0.1.11 github.com/oapi-codegen/gin-middleware v1.0.2 github.com/oapi-codegen/runtime v1.3.1 @@ -111,6 +111,7 @@ require ( github.com/klauspost/compress v1.18.4 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/lithammer/shortuuid/v3 v3.0.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -121,8 +122,8 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v1.0.0 // indirect - github.com/oasdiff/yaml v0.0.0-20260313112342-a3ea61cb4d4c // indirect - github.com/oasdiff/yaml3 v0.0.0-20260224194419-61cd415a242b // indirect + github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect + github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect diff --git a/go.sum b/go.sum index dc9762dc..611b3220 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,8 @@ github.com/ThreeDotsLabs/watermill-kafka/v3 v3.1.2 h1:lLmrzZnl8o8U5uLVhMLSFHGSuW github.com/ThreeDotsLabs/watermill-kafka/v3 v3.1.2/go.mod h1:o1GcoF/1CSJ9JSmQzUkULvpZeO635pZe+WWrYNFlJNk= github.com/ThreeDotsLabs/watermill-redisstream v1.4.5 h1:SCETqsAYo/CRBb7H3+zWCcSqhMpDrQA4I6dCqC7UPR4= github.com/ThreeDotsLabs/watermill-redisstream v1.4.5/go.mod h1:Da3wqG1OcvHPODjuJcxSCY1O7D4loIZQpVbZ5u94xRo= +github.com/ThreeDotsLabs/watermill-sql/v4 v4.1.3 h1:d9niNUM3G9nFH2YRdJ7F+qz3IF0bS2IHq6UB2TKWHGE= +github.com/ThreeDotsLabs/watermill-sql/v4 v4.1.3/go.mod h1:Ce2GVZVnyajAh0AkwxSJXwx8ajBBveu1DI/yatan5jc= github.com/alexliesenfeld/health v0.8.1 h1:wdE3vt+cbJotiR8DGDBZPKHDFoJbAoWEfQTcqrmedUg= github.com/alexliesenfeld/health v0.8.1/go.mod h1:TfNP0f+9WQVWMQRzvMUjlws4ceXKEL3WR+6Hp95HUFc= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= @@ -49,6 +51,8 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7D github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY= +github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= @@ -106,8 +110,6 @@ github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9 github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= -github.com/getkin/kin-openapi v0.134.0 h1:/L5+1+kfe6dXh8Ot/wqiTgUkjOIEJiC0bbYVziHB8rU= -github.com/getkin/kin-openapi v0.134.0/go.mod h1:wK6ZLG/VgoETO9pcLJ/VmAtIcl/DNlMayNTb716EUxE= github.com/gin-contrib/slog v1.2.1 h1:tQbsmllW/PNgtvHRdVlI38jLfpN4IFLS7Pb4HgTeiYw= github.com/gin-contrib/slog v1.2.1/go.mod h1:f/Ke0A3h4DUh0cQnjR2b/l+i0EmVJ+6VY6GIw3RKtxA= github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= @@ -118,8 +120,6 @@ github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= -github.com/go-jet/jet/v2 v2.14.1 h1:wsfD9e7CGP9h46+IFNlftfncBcmVnKddikbTtapQM3M= -github.com/go-jet/jet/v2 v2.14.1/go.mod h1:dqTAECV2Mo3S2NFjbm4vJ1aDruZjhaJ1RAAR8rGUkkc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -205,8 +205,6 @@ github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7Ulw github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= -github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= -github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -281,12 +279,8 @@ github.com/oapi-codegen/runtime v1.3.1 h1:RgDY6J4OGQLbRXhG/Xpt3vSVqYpHQS7hN4m85+ github.com/oapi-codegen/runtime v1.3.1/go.mod h1:kOdeacKy7t40Rclb1je37ZLFboFxh+YLy0zaPCMibPY= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= -github.com/oasdiff/yaml v0.0.0-20260313112342-a3ea61cb4d4c h1:7ACFcSaQsrWtrH4WHHfUqE1C+f8r2uv8KGaW0jTNjus= -github.com/oasdiff/yaml v0.0.0-20260313112342-a3ea61cb4d4c/go.mod h1:JKox4Gszkxt57kj27u7rvi7IFoIULvCZHUsBTUmQM/s= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= -github.com/oasdiff/yaml3 v0.0.0-20260224194419-61cd415a242b h1:vivRhVUAa9t1q0Db4ZmezBP8pWQWnXHFokZj0AOea2g= -github.com/oasdiff/yaml3 v0.0.0-20260224194419-61cd415a242b/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= @@ -303,8 +297,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pressly/goose/v3 v3.26.0 h1:KJakav68jdH0WDvoAcj8+n61WqOIaPGgH0bJWS6jpmM= -github.com/pressly/goose/v3 v3.26.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY= github.com/pressly/goose/v3 v3.27.0 h1:/D30gVTuQhu0WsNZYbJi4DMOsx1lNq+6SkLe+Wp59BM= github.com/pressly/goose/v3 v3.27.0/go.mod h1:3ZBeCXqzkgIRvrEMDkYh1guvtoJTU5oMMuDdkutoM78= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= @@ -596,8 +588,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20260226221140-a57be14db171 h1: google.golang.org/genproto/googleapis/api v0.0.0-20260226221140-a57be14db171/go.mod h1:M5krXqk4GhBKvB596udGL3UyjL4I1+cTbK0orROM9ng= google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= -google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= -google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= diff --git a/internal/note/app/app.go b/internal/note/app/app.go index 24503581..92f09326 100644 --- a/internal/note/app/app.go +++ b/internal/note/app/app.go @@ -1,20 +1,12 @@ package app -import ( - "context" - "fmt" - - "golang.org/x/sync/errgroup" -) - -type CommandHandlers struct { +type Cmds struct { CreateFolderHandler *CreateFolderHandler CreateNoteHandler *CreateNoteHandler CreateWorkspaceHandler *CreateWorkspaceHandler DeleteFolderHandler *PermanentlyDeleteFolderHandler DeleteNoteHandler *PermanentlyDeleteNoteHandler DeleteWorkspaceHandler *DeleteWorkspaceHandler - GenerateDailyNoteHandler *GenerateDailyNoteHandler MoveWorkspaceItemsHandler *MoveWorkspaceItemsHandler PermanentlyDeleteWorkspaceItemsHandler *PermanentlyDeleteWorkspaceItemsHandler PublishNoteHandler *PublishNoteHandler @@ -29,7 +21,7 @@ type CommandHandlers struct { UpdateWorkspaceMembersHandler *UpdateWorkspaceMembersHandler } -type QueryHandlers struct { +type Queries struct { CheckWorkspaceSlugExistsHandler *CheckWorkspaceSlugExistsHandler GetNoteGraphHandler *GetNoteGraphHandler GetNoteHandler *GetNoteHandler @@ -41,46 +33,15 @@ type QueryHandlers struct { ShowTrashHandler *ShowTrashHandler } -type IntegrationEventHandlers struct { - DocumentCommittedHandler *DocumentCommittedHandler +type Events struct { + DocumentCommittedHandler *DocumentCommittedHandler + NotifyWorkspaceItemsUpdated *NotifyWorkspaceItemsUpdatedHandler } type Server struct { - CommandHandlers *CommandHandlers - IntegrationEventHandlers *IntegrationEventHandlers - QueryHandlers *QueryHandlers - - IntegrationPubSub *IntegrationPubSub - WorkspaceEventPubSub WorkspaceEventPubSub - Persistence Persistence -} - -func (s *Server) RunMigration(ctx context.Context) error { - return s.Persistence.RunMigrations(ctx) -} - -func (s *Server) Start(ctx context.Context) error { - if err := s.RunMigration(ctx); err != nil { - return fmt.Errorf("failed to run migrations: %w", err) - } - - g, ctx := errgroup.WithContext(ctx) - g.Go(func() error { - return s.WorkspaceEventPubSub.Run(ctx) - }) - g.Go(func() error { - return s.IntegrationPubSub.Run(ctx) - }) - return g.Wait() -} + Cmds *Cmds + Events *Events + Queries *Queries -func (s *Server) Stop(ctx context.Context) error { - g, _ := errgroup.WithContext(ctx) - g.Go(func() error { - return s.WorkspaceEventPubSub.Close() - }) - g.Go(func() error { - return s.IntegrationPubSub.Close() - }) - return g.Wait() + WorkspaceEventHub WorkspaceEventHub } diff --git a/internal/note/app/createfolder.go b/internal/note/app/cmd_createfolder.go similarity index 90% rename from internal/note/app/createfolder.go rename to internal/note/app/cmd_createfolder.go index 3eb11d38..a15b7a84 100644 --- a/internal/note/app/createfolder.go +++ b/internal/note/app/cmd_createfolder.go @@ -12,7 +12,7 @@ import ( type CreateFolder struct { ID uuid.UUID Name string - Icon *string + Icon string ParentID uuid.UUID WorkspaceID uuid.UUID @@ -36,7 +36,7 @@ func NewCreateFolderHandler( var ProvideCreateFolderHandler = NewCreateFolderHandler -func (h *CreateFolderHandler) Handle(ctx context.Context, cmd *CreateFolder) errs.Error { +func (h *CreateFolderHandler) Handle(ctx context.Context, cmd *CreateFolder) error { hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( ctx, cmd.UserID, @@ -52,8 +52,8 @@ func (h *CreateFolderHandler) Handle(ctx context.Context, cmd *CreateFolder) err fmt.Sprintf("user %q does not have permission to create folder in workspace %q", cmd.UserID, cmd.WorkspaceID.String()), ) } - hierarchy := domain.NewFolderHierarchy(&cmd.ParentID) - folder, err := domain.NewFolder(cmd.ID, cmd.Name, cmd.Icon, cmd.WorkspaceID, *hierarchy, cmd.UserID) + hierarchy := domain.NewFolderHierarchy(cmd.ParentID) + folder, err := domain.NewFolder(cmd.ID, cmd.Name, cmd.Icon, cmd.WorkspaceID, hierarchy, cmd.UserID) if err != nil { return err } diff --git a/internal/note/app/createnote.go b/internal/note/app/cmd_createnote.go similarity index 97% rename from internal/note/app/createnote.go rename to internal/note/app/cmd_createnote.go index 34b438ad..5919150a 100644 --- a/internal/note/app/createnote.go +++ b/internal/note/app/cmd_createnote.go @@ -12,7 +12,7 @@ import ( type CreateNote struct { ID uuid.UUID Name string - Icon *string + Icon string FolderID uuid.UUID UserID string @@ -38,7 +38,7 @@ func NewCreateNoteHandler( var ProvideCreateNoteHandler = NewCreateNoteHandler -func (h *CreateNoteHandler) Handle(ctx context.Context, cmd *CreateNote) errs.Error { +func (h *CreateNoteHandler) Handle(ctx context.Context, cmd *CreateNote) error { workspaceID, err := h.folderRepo.GetWorkspaceIDByID(ctx, cmd.FolderID) if err != nil { return err diff --git a/internal/note/app/createworkspace.go b/internal/note/app/cmd_createworkspace.go similarity index 54% rename from internal/note/app/createworkspace.go rename to internal/note/app/cmd_createworkspace.go index 90cba8d7..b5f03dbe 100644 --- a/internal/note/app/createworkspace.go +++ b/internal/note/app/cmd_createworkspace.go @@ -9,33 +9,36 @@ import ( ) type CreateWorkspace struct { - ID uuid.UUID - Name string - Slug string - UserID string + ID uuid.UUID + Name string + Slug string + OwnerID string } type CreateWorkspaceHandler struct { - workspaceRepo domain.WorkspaceRepo - folderRepo domain.FolderRepo - uow domain.UnitOfWork + workspaceRepo domain.WorkspaceRepo + folderRepo domain.FolderRepo + uow domain.UnitOfWork + authorizationService AuthorizationService } func NewCreateWorkspaceHandler( workspaceRepo domain.WorkspaceRepo, folderRepo domain.FolderRepo, uow domain.UnitOfWork, + authorizationService AuthorizationService, ) *CreateWorkspaceHandler { return &CreateWorkspaceHandler{ - workspaceRepo: workspaceRepo, - folderRepo: folderRepo, - uow: uow, + workspaceRepo: workspaceRepo, + folderRepo: folderRepo, + uow: uow, + authorizationService: authorizationService, } } var ProvideCreateWorkspaceHandler = NewCreateWorkspaceHandler -func (h *CreateWorkspaceHandler) Handle(ctx context.Context, cmd *CreateWorkspace) errs.Error { +func (h *CreateWorkspaceHandler) Handle(ctx context.Context, cmd *CreateWorkspace) error { slugExisted, err := h.workspaceRepo.CheckSlugExists(ctx, cmd.Slug) if err != nil { return err @@ -43,9 +46,11 @@ func (h *CreateWorkspaceHandler) Handle(ctx context.Context, cmd *CreateWorkspac if slugExisted { return errs.NewWorkspaceSlugAlreadyExists(cmd.Slug, nil) } - rootFolderID := uuid.New() - rootHierarchy := domain.NewFolderHierarchy(nil) - rootFolder, err := domain.NewFolder(rootFolderID, cmd.Name, nil, cmd.ID, *rootHierarchy, cmd.UserID) + rootFolderID, err := uuid.NewV7() + if err != nil { + return errs.NewInternalGenerateID(err) + } + rootFolder, err := domain.NewFolder(rootFolderID, cmd.Name, "", cmd.ID, domain.FolderHierarchy{}, cmd.OwnerID) if err != nil { return err } @@ -53,7 +58,7 @@ func (h *CreateWorkspaceHandler) Handle(ctx context.Context, cmd *CreateWorkspac if err != nil { return err } - err = h.uow.Execute(ctx, func(r domain.RepoRegistry) errs.Error { + if err := h.uow.Execute(ctx, func(r domain.RepoRegistry) error { if err := h.folderRepo.Save(ctx, rootFolder); err != nil { return err } @@ -61,6 +66,11 @@ func (h *CreateWorkspaceHandler) Handle(ctx context.Context, cmd *CreateWorkspac return err } return nil - }) + }); err != nil { + return err + } + if err := h.authorizationService.CreateWorkspaceWithOwnership(ctx, cmd.OwnerID, workspace.ID()); err != nil { + return err + } return err } diff --git a/internal/note/app/deleteworkspace.go b/internal/note/app/cmd_deleteworkspace.go similarity index 73% rename from internal/note/app/deleteworkspace.go rename to internal/note/app/cmd_deleteworkspace.go index 2bcc92f1..3312d5cd 100644 --- a/internal/note/app/deleteworkspace.go +++ b/internal/note/app/cmd_deleteworkspace.go @@ -17,21 +17,24 @@ type DeleteWorkspace struct { type DeleteWorkspaceHandler struct { authorizationService AuthorizationService workspaceRepo domain.WorkspaceRepo + uow domain.UnitOfWork } func NewDeleteWorkspaceHandler( authorizationService AuthorizationService, workspaceRepo domain.WorkspaceRepo, + uow domain.UnitOfWork, ) *DeleteWorkspaceHandler { return &DeleteWorkspaceHandler{ authorizationService: authorizationService, workspaceRepo: workspaceRepo, + uow: uow, } } var ProvideDeleteWorkspaceHandler = NewDeleteWorkspaceHandler -func (h *DeleteWorkspaceHandler) Handle(ctx context.Context, cmd *DeleteWorkspace) errs.Error { +func (h *DeleteWorkspaceHandler) Handle(ctx context.Context, cmd *DeleteWorkspace) error { hasPermission, err := h.authorizationService.HasWorkspacePermission( ctx, cmd.UserID, @@ -48,10 +51,13 @@ func (h *DeleteWorkspaceHandler) Handle(ctx context.Context, cmd *DeleteWorkspac ) } - workspace, err := h.workspaceRepo.GetByID(ctx, cmd.ID, true) - if err != nil { - return err - } - workspace.Delete(cmd.UserID) - return h.workspaceRepo.Save(ctx, workspace) + return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + workspaceRepo := r.Workspace() + workspace, err := workspaceRepo.GetByID(ctx, cmd.ID, true) + if err != nil { + return err + } + workspace.Delete(cmd.UserID) + return workspaceRepo.Save(ctx, workspace) + }) } diff --git a/internal/note/app/moveworkspaceitems.go b/internal/note/app/cmd_moveworkspaceitems.go similarity index 56% rename from internal/note/app/moveworkspaceitems.go rename to internal/note/app/cmd_moveworkspaceitems.go index 1a14f5b3..13ede001 100644 --- a/internal/note/app/moveworkspaceitems.go +++ b/internal/note/app/cmd_moveworkspaceitems.go @@ -40,7 +40,9 @@ func NewMoveWorkspaceItemsHandler( var ProvideMoveWorkspaceItemsHandler = NewMoveWorkspaceItemsHandler -func (h *MoveWorkspaceItemsHandler) Handle(ctx context.Context, cmd *MoveWorkspaceItems) errs.Error { +// NOTE: Partially transaction? is it right +// Maybe we should in, side, a, tx... +func (h *MoveWorkspaceItemsHandler) Handle(ctx context.Context, cmd *MoveWorkspaceItems) error { hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( ctx, cmd.UserID, @@ -57,53 +59,80 @@ func (h *MoveWorkspaceItemsHandler) Handle(ctx context.Context, cmd *MoveWorkspa ) } - folderValid, err := h.folderRepo.AreAllInWorkspace(ctx, cmd.FolderIDs, cmd.WorkspaceID) - if err != nil { - return err - } - if !folderValid { - return errs.NewInvalid( - fmt.Sprintf("one or more folders do not belong to workspace %s", cmd.WorkspaceID), - ) - } - - noteValid, err := h.noteRepo.AreAllInWorkspace(ctx, cmd.NoteIDs, cmd.WorkspaceID) - if err != nil { - return err - } - if !noteValid { - return errs.NewInvalid( - fmt.Sprintf("one or more notes do not belong to workspace %s", cmd.WorkspaceID), - ) - } - - destinationFolder, err := h.folderRepo.GetByID(ctx, cmd.DestinationFolderID, false) - if err != nil { - return err - } - - if destinationFolder.WorkspaceID() != cmd.WorkspaceID { - return errs.NewInvalid( - fmt.Sprintf("destination folder %s does not belong to workspace %s", cmd.DestinationFolderID, cmd.WorkspaceID), - ) - } - var folders []*domain.Folder var notes []*domain.Note - err = h.uow.Execute(ctx, func(r domain.RepoRegistry) errs.Error { + err = h.uow.Execute(ctx, func(r domain.RepoRegistry) error { folderRepo := r.Folder() noteRepo := r.Note() + { + folderValid, err := folderRepo.AreAllInWorkspace(ctx, cmd.FolderIDs, cmd.WorkspaceID) + if err != nil { + return err + } + if !folderValid { + return errs.NewInvalid( + fmt.Sprintf("one or more folders do not belong to workspace %s", cmd.WorkspaceID), + ) + } + } + + { + noteValid, err := noteRepo.AreAllInWorkspace(ctx, cmd.NoteIDs, cmd.WorkspaceID) + if err != nil { + return err + } + if !noteValid { + return errs.NewInvalid( + fmt.Sprintf("one or more notes do not belong to workspace %s", cmd.WorkspaceID), + ) + } + } + + { + destinationFolder, err := folderRepo.GetByID(ctx, cmd.DestinationFolderID, false) + if err != nil { + return err + } + + if destinationFolder.WorkspaceID() != cmd.WorkspaceID { + return errs.NewInvalid( + fmt.Sprintf("destination folder %s does not belong to workspace %s", cmd.DestinationFolderID, cmd.WorkspaceID), + ) + } + } + + { + parentIDs, err := folderRepo.GetParentIDs(ctx, cmd.DestinationFolderID, true) + if err != nil { + return err + } + parentIDsSet := make(map[uuid.UUID]struct{}) + for _, id := range parentIDs { + parentIDsSet[id] = struct{}{} + } + for _, folderID := range cmd.FolderIDs { + if _, exists := parentIDsSet[folderID]; exists { + return errs.NewInvalid( + fmt.Sprintf("cannot move folder %s into its own subfolder %s", folderID, cmd.DestinationFolderID), + ) + } + } + } + if len(cmd.FolderIDs) == 0 && len(cmd.NoteIDs) == 0 { return nil } if len(cmd.FolderIDs) > 0 { folders, err = folderRepo.GetMany(ctx, - domain.NewFolderRepoGetManyParamsByIDs(cmd.FolderIDs). - WithWorkspaceID(cmd.WorkspaceID). - WithForUpdate()) + //exhaustruct:ignore + &domain.FolderRepoGetManyParams{ + WorkspaceID: cmd.WorkspaceID, + IDs: cmd.FolderIDs, + ForUpdate: true, + }) if err != nil { return err } @@ -115,8 +144,12 @@ func (h *MoveWorkspaceItemsHandler) Handle(ctx context.Context, cmd *MoveWorkspa } } notes, err = noteRepo.GetMany(ctx, - domain.NewNoteRepoGetManyParamsByIDs(cmd.NoteIDs). - WithForUpdate(), + //exhaustruct:ignore + &domain.NoteRepoGetManyParams{ + IDs: cmd.NoteIDs, + WorkspaceID: cmd.WorkspaceID, + ForUpdate: true, + }, ) if err != nil { return err @@ -132,6 +165,5 @@ func (h *MoveWorkspaceItemsHandler) Handle(ctx context.Context, cmd *MoveWorkspa if err != nil { return err } - return nil } diff --git a/internal/note/app/permanentlydeletefolder.go b/internal/note/app/cmd_permanentlydeletefolder.go similarity index 76% rename from internal/note/app/permanentlydeletefolder.go rename to internal/note/app/cmd_permanentlydeletefolder.go index 0ef50c0e..e6671619 100644 --- a/internal/note/app/permanentlydeletefolder.go +++ b/internal/note/app/cmd_permanentlydeletefolder.go @@ -17,21 +17,24 @@ type PermanentlyDeleteFolder struct { type PermanentlyDeleteFolderHandler struct { authorizationService AuthorizationService folderRepo domain.FolderRepo + uow domain.UnitOfWork } func PermanentlyNewDeleteFolderHandler( authorizationService AuthorizationService, folderRepo domain.FolderRepo, + uow domain.UnitOfWork, ) *PermanentlyDeleteFolderHandler { return &PermanentlyDeleteFolderHandler{ authorizationService: authorizationService, folderRepo: folderRepo, + uow: uow, } } var ProvidePermanentlyDeleteFolderHandler = PermanentlyNewDeleteFolderHandler -func (h *PermanentlyDeleteFolderHandler) Handle(ctx context.Context, cmd *PermanentlyDeleteFolder) errs.Error { +func (h *PermanentlyDeleteFolderHandler) Handle(ctx context.Context, cmd *PermanentlyDeleteFolder) error { workspaceID, err := h.folderRepo.GetWorkspaceIDByID(ctx, cmd.ID) if err != nil { return err @@ -52,5 +55,13 @@ func (h *PermanentlyDeleteFolderHandler) Handle(ctx context.Context, cmd *Perman ) } - return h.folderRepo.PermanentlyDeleteByID(ctx, cmd.ID) + return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + folderRepo := r.Folder() + folder, err := folderRepo.GetByID(ctx, cmd.ID, true) + if err != nil { + return err + } + folder.Deleted() + return folderRepo.Save(ctx, folder) + }) } diff --git a/internal/note/app/deletenote.go b/internal/note/app/cmd_permanentlydeletenote.go similarity index 77% rename from internal/note/app/deletenote.go rename to internal/note/app/cmd_permanentlydeletenote.go index 6c12fcac..8ce76b5a 100644 --- a/internal/note/app/deletenote.go +++ b/internal/note/app/cmd_permanentlydeletenote.go @@ -17,21 +17,24 @@ type PermanentlyDeleteNote struct { type PermanentlyDeleteNoteHandler struct { authorizationService AuthorizationService noteRepo domain.NoteRepo + uow domain.UnitOfWork } func PermanentlyNewDeleteNoteHandler( authorizationService AuthorizationService, noteRepo domain.NoteRepo, + uow domain.UnitOfWork, ) *PermanentlyDeleteNoteHandler { return &PermanentlyDeleteNoteHandler{ authorizationService: authorizationService, noteRepo: noteRepo, + uow: uow, } } var ProvidePermanentlyDeleteNoteHandler = PermanentlyNewDeleteNoteHandler -func (h *PermanentlyDeleteNoteHandler) Handle(ctx context.Context, cmd *PermanentlyDeleteNote) errs.Error { +func (h *PermanentlyDeleteNoteHandler) Handle(ctx context.Context, cmd *PermanentlyDeleteNote) error { workspaceID, err := h.noteRepo.GetWorkspaceIDByID(ctx, cmd.ID) if err != nil { return err @@ -52,7 +55,13 @@ func (h *PermanentlyDeleteNoteHandler) Handle(ctx context.Context, cmd *Permanen ) } - return h.noteRepo.PermanentlyDeleteByID(ctx, cmd.ID) + return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + noteRepo := r.Note() + note, err := noteRepo.GetByID(ctx, cmd.ID, true) + if err != nil { + return err + } + note.Deleted() + return noteRepo.Save(ctx, note) + }) } - -var ErrCodeDeleteNoteForbidden = "DeleteNote_1" diff --git a/internal/note/app/permanentlydeleteworkspaceitems.go b/internal/note/app/cmd_permanentlydeleteworkspaceitems.go similarity index 62% rename from internal/note/app/permanentlydeleteworkspaceitems.go rename to internal/note/app/cmd_permanentlydeleteworkspaceitems.go index 70171e80..15b53550 100644 --- a/internal/note/app/permanentlydeleteworkspaceitems.go +++ b/internal/note/app/cmd_permanentlydeleteworkspaceitems.go @@ -18,19 +18,16 @@ type PermanentlyDeleteWorkspaceItems struct { type PermanentlyDeleteWorkspaceItemsHandler struct { authorizationService AuthorizationService - noteRepo domain.NoteRepo - folderRepo domain.FolderRepo + uow domain.UnitOfWork } func NewPermanentlyDeleteWorkspaceItemsHandler( authorizationService AuthorizationService, - noteRepo domain.NoteRepo, - folderRepo domain.FolderRepo, + uow domain.UnitOfWork, ) *PermanentlyDeleteWorkspaceItemsHandler { return &PermanentlyDeleteWorkspaceItemsHandler{ authorizationService: authorizationService, - noteRepo: noteRepo, - folderRepo: folderRepo, + uow: uow, } } @@ -53,17 +50,29 @@ func (h *PermanentlyDeleteWorkspaceItemsHandler) Handle(ctx context.Context, cmd ) } - if len(cmd.NoteIDs) > 0 { - if err := h.noteRepo.PermanentlyDeleteByIDs(ctx, cmd.NoteIDs); err != nil { - return err - } - } + // TODO: first, need to fix the param back, awful - if len(cmd.FolderIDs) > 0 { - if err := h.folderRepo.PermanentlyDeleteByIDs(ctx, cmd.FolderIDs); err != nil { - return err - } - } + // return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + // folderRepo := r.Folder() + // folder, err := folderRepo.GetMany(ctx, domain.NewFolderRepoGetManyParamsByIDs(cmd.FolderIDs).WithTrashed()) + // if err != nil { + // return err + // } + // folder.Deleted() + // return folderRepo.Save(ctx, folder) + // }) + + // if len(cmd.NoteIDs) > 0 { + // if err := h.noteRepo.PermanentlyDeleteByIDs(ctx, cmd.NoteIDs); err != nil { + // return err + // } + // } + // + // if len(cmd.FolderIDs) > 0 { + // if err := h.folderRepo.PermanentlyDeleteByIDs(ctx, cmd.FolderIDs); err != nil { + // return err + // } + // } return nil } diff --git a/internal/note/app/publishnote.go b/internal/note/app/cmd_publishnote.go similarity index 93% rename from internal/note/app/publishnote.go rename to internal/note/app/cmd_publishnote.go index 97d5a4af..2563d867 100644 --- a/internal/note/app/publishnote.go +++ b/internal/note/app/cmd_publishnote.go @@ -5,7 +5,6 @@ import ( "github.com/google/uuid" "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" ) type PublishNote struct { @@ -22,7 +21,7 @@ func NewPublishNoteHandler(noteRepo domain.NoteRepo) *PublishNoteHandler { var ProvidePublishNoteHandler = NewPublishNoteHandler -func (h *PublishNoteHandler) Handle(ctx context.Context, cmd *PublishNote) errs.Error { +func (h *PublishNoteHandler) Handle(ctx context.Context, cmd *PublishNote) error { // WARN: Handler is incomplete - domain.Note has no Publish() method. // TODO: domain.Note has no Publish() method. Add Publish() to domain.Note and a // published field, then call note.Publish() here before Save. diff --git a/internal/note/app/publishworkspace.go b/internal/note/app/cmd_publishworkspace.go similarity index 100% rename from internal/note/app/publishworkspace.go rename to internal/note/app/cmd_publishworkspace.go diff --git a/internal/note/app/renamefolder.go b/internal/note/app/cmd_renamefolder.go similarity index 75% rename from internal/note/app/renamefolder.go rename to internal/note/app/cmd_renamefolder.go index 44f43bb7..2d093c8d 100644 --- a/internal/note/app/renamefolder.go +++ b/internal/note/app/cmd_renamefolder.go @@ -18,21 +18,24 @@ type RenameFolder struct { type RenameFolderHandler struct { authorizationService AuthorizationService folderRepo domain.FolderRepo + uow domain.UnitOfWork } func NewRenameFolderHandler( authorizationService AuthorizationService, folderRepo domain.FolderRepo, + uow domain.UnitOfWork, ) *RenameFolderHandler { return &RenameFolderHandler{ authorizationService: authorizationService, folderRepo: folderRepo, + uow: uow, } } var ProvideRenameFolderHandler = NewRenameFolderHandler -func (h *RenameFolderHandler) Handle(ctx context.Context, cmd *RenameFolder) errs.Error { +func (h *RenameFolderHandler) Handle(ctx context.Context, cmd *RenameFolder) error { workspaceID, err := h.folderRepo.GetWorkspaceIDByID(ctx, cmd.ID) if err != nil { return err @@ -53,10 +56,13 @@ func (h *RenameFolderHandler) Handle(ctx context.Context, cmd *RenameFolder) err ) } - folder, err := h.folderRepo.GetByID(ctx, cmd.ID, true) - if err != nil { - return err - } - folder.Rename(cmd.Name, cmd.UserID) - return h.folderRepo.Save(ctx, folder) + return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + folderRepo := r.Folder() + folder, err := folderRepo.GetByID(ctx, cmd.ID, true) + if err != nil { + return err + } + folder.Rename(cmd.Name, cmd.UserID) + return folderRepo.Save(ctx, folder) + }) } diff --git a/internal/note/app/renamenote.go b/internal/note/app/cmd_renamenote.go similarity index 76% rename from internal/note/app/renamenote.go rename to internal/note/app/cmd_renamenote.go index c4317c1e..0e242699 100644 --- a/internal/note/app/renamenote.go +++ b/internal/note/app/cmd_renamenote.go @@ -18,21 +18,24 @@ type RenameNote struct { type RenameNoteHandler struct { authorizationService AuthorizationService noterepo domain.NoteRepo + uow domain.UnitOfWork } func NewRenameNoteHandler( authorizationService AuthorizationService, noterepo domain.NoteRepo, + uow domain.UnitOfWork, ) *RenameNoteHandler { return &RenameNoteHandler{ authorizationService: authorizationService, noterepo: noterepo, + uow: uow, } } var ProvideRenameNoteHandler = NewRenameNoteHandler -func (h *RenameNoteHandler) Handle(ctx context.Context, cmd *RenameNote) errs.Error { +func (h *RenameNoteHandler) Handle(ctx context.Context, cmd *RenameNote) error { workspaceID, err := h.noterepo.GetWorkspaceIDByID(ctx, cmd.ID) if err != nil { return err @@ -53,10 +56,13 @@ func (h *RenameNoteHandler) Handle(ctx context.Context, cmd *RenameNote) errs.Er ) } - note, err := h.noterepo.GetByID(ctx, cmd.ID, true) - if err != nil { - return err - } - note.Rename(cmd.Name, cmd.UserID) - return h.noterepo.Save(ctx, note) + return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + noteRepo := r.Note() + note, err := noteRepo.GetByID(ctx, cmd.ID, true) + if err != nil { + return err + } + note.Rename(cmd.Name, cmd.UserID) + return noteRepo.Save(ctx, note) + }) } diff --git a/internal/note/app/renameworkspace.go b/internal/note/app/cmd_renameworkspace.go similarity index 73% rename from internal/note/app/renameworkspace.go rename to internal/note/app/cmd_renameworkspace.go index 0a39d6e5..dcef2248 100644 --- a/internal/note/app/renameworkspace.go +++ b/internal/note/app/cmd_renameworkspace.go @@ -18,21 +18,24 @@ type RenameWorkspace struct { type RenameWorkspaceHandler struct { authorizationService AuthorizationService workspacerepo domain.WorkspaceRepo + uow domain.UnitOfWork } func NewRenameWorkspaceHandler( authorizationService AuthorizationService, workspacerepo domain.WorkspaceRepo, + uow domain.UnitOfWork, ) *RenameWorkspaceHandler { return &RenameWorkspaceHandler{ authorizationService: authorizationService, workspacerepo: workspacerepo, + uow: uow, } } var ProvideRenameWorkspaceHandler = NewRenameWorkspaceHandler -func (h *RenameWorkspaceHandler) Handle(ctx context.Context, cmd *RenameWorkspace) errs.Error { +func (h *RenameWorkspaceHandler) Handle(ctx context.Context, cmd *RenameWorkspace) error { hasPermission, err := h.authorizationService.HasWorkspacePermission( ctx, cmd.UserID, @@ -49,10 +52,13 @@ func (h *RenameWorkspaceHandler) Handle(ctx context.Context, cmd *RenameWorkspac ) } - workspace, err := h.workspacerepo.GetByID(ctx, cmd.ID, true) - if err != nil { - return err - } - workspace.Rename(cmd.Name, cmd.UserID) - return h.workspacerepo.Save(ctx, workspace) + return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + workspaceRepo := r.Workspace() + workspace, err := workspaceRepo.GetByID(ctx, cmd.ID, true) + if err != nil { + return err + } + workspace.Rename(cmd.Name, cmd.UserID) + return workspaceRepo.Save(ctx, workspace) + }) } diff --git a/internal/note/app/cmd_restoretrashworkspaceitems.go b/internal/note/app/cmd_restoretrashworkspaceitems.go new file mode 100644 index 00000000..5915ce21 --- /dev/null +++ b/internal/note/app/cmd_restoretrashworkspaceitems.go @@ -0,0 +1,149 @@ +package app + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +// TODO: This should carefully recheck +// Transaction? + +type RestoreTrashedWorkspaceItems struct { + WorkspaceID uuid.UUID + UserID string + NoteIDs []uuid.UUID + FolderIDs []uuid.UUID +} + +type RestoreTrashedWorkspaceItemsHandler struct { + authorizationService AuthorizationService + noteRepo domain.NoteRepo + folderRepo domain.FolderRepo + trashService *domain.TrashService + uow domain.UnitOfWork +} + +func NewRestoreTrashedWorkspaceItemsHandler( + authorizationService AuthorizationService, + noteRepo domain.NoteRepo, + folderRepo domain.FolderRepo, + trashService *domain.TrashService, + uow domain.UnitOfWork, +) *RestoreTrashedWorkspaceItemsHandler { + return &RestoreTrashedWorkspaceItemsHandler{ + authorizationService: authorizationService, + noteRepo: noteRepo, + folderRepo: folderRepo, + trashService: trashService, + uow: uow, + } +} + +var ProvideRestoreTrashedWorkspaceItemsHandler = NewRestoreTrashedWorkspaceItemsHandler + +func (h *RestoreTrashedWorkspaceItemsHandler) Handle(ctx context.Context, cmd *RestoreTrashedWorkspaceItems) error { + hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( + ctx, + cmd.UserID, + cmd.WorkspaceID, + WorkspaceItemPermissionDelete, + ) + if err != nil { + return err + } + + if !hasPermission { + return errs.NewForbidden( + fmt.Sprintf("user %s does not have permission to restore items in workspace %s", cmd.UserID, cmd.WorkspaceID), + ) + } + + return h.uow.Execute(ctx, func(r domain.RepoRegistry) error { + noteRepo := r.Note() + folderRepo := r.Folder() + + trashedNotes, err := noteRepo.GetMany(ctx, + //exhaustruct:ignore + &domain.NoteRepoGetManyParams{ + WorkspaceID: cmd.WorkspaceID, + TrashOnly: true, + }, + ) + if err != nil { + return err + } + + trashedFolders, err := folderRepo.GetMany(ctx, + //exhaustruct:ignore + &domain.FolderRepoGetManyParams{ + WorkspaceID: cmd.WorkspaceID, + TrashOnly: true, + }, + ) + if err != nil { + return err + } + + trashedNotePtrs := trashedNotes + trashedFolderPtrs := trashedFolders + + if len(cmd.NoteIDs) > 0 { + notes, err := noteRepo.GetMany(ctx, + //exhaustruct:ignore + &domain.NoteRepoGetManyParams{ + IDs: cmd.NoteIDs, + TrashOnly: true, + ForUpdate: true, + }, + ) + if err != nil { + return err + } + + notePtrs := notes + if err := h.trashService.RestoreNotes(notePtrs, cmd.UserID); err != nil { + return err + } + for _, note := range notePtrs { + if err := noteRepo.Save(ctx, note); err != nil { + return err + } + } + } + + if len(cmd.FolderIDs) > 0 { + folders, err := folderRepo.GetMany(ctx, + //exhaustruct:ignore + &domain.FolderRepoGetManyParams{ + IDs: cmd.FolderIDs, + ForUpdate: true, + }, + ) + if err != nil { + return err + } + + if err := h.trashService.RestoreFolders(&trashedNotePtrs, &trashedFolderPtrs, folders, cmd.UserID); err != nil { + return err + } + + for _, folder := range trashedFolderPtrs { + if err := folderRepo.Save(ctx, folder); err != nil { + return err + } + } + + for _, note := range trashedNotePtrs { + if err := noteRepo.Save(ctx, note); err != nil { + return err + } + } + } + + return nil + }) +} diff --git a/internal/note/app/trashworkspaceitems.go b/internal/note/app/cmd_trashworkspaceitems.go similarity index 79% rename from internal/note/app/trashworkspaceitems.go rename to internal/note/app/cmd_trashworkspaceitems.go index 3aeeb843..1399dad4 100644 --- a/internal/note/app/trashworkspaceitems.go +++ b/internal/note/app/cmd_trashworkspaceitems.go @@ -36,7 +36,7 @@ func NewTrashWorkspaceItemsHandler( var ProvideTrashWorkspaceItemsHandler = NewTrashWorkspaceItemsHandler -func (h *TrashWorkspaceItemsHandler) Handle(ctx context.Context, cmd *TrashWorkspaceItems) errs.Error { +func (h *TrashWorkspaceItemsHandler) Handle(ctx context.Context, cmd *TrashWorkspaceItems) error { hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( ctx, cmd.UserID, @@ -53,10 +53,8 @@ func (h *TrashWorkspaceItemsHandler) Handle(ctx context.Context, cmd *TrashWorks ) } - var workspaceEvents []domain.Event - // TODO: Why it getting 4 times?? - err = h.uow.Execute(ctx, func(r domain.RepoRegistry) errs.Error { + err = h.uow.Execute(ctx, func(r domain.RepoRegistry) error { noteRepo := r.Note() folderRepo := r.Folder() @@ -65,14 +63,21 @@ func (h *TrashWorkspaceItemsHandler) Handle(ctx context.Context, cmd *TrashWorks } workspaceNotes, err := noteRepo.GetMany(ctx, - domain.NewNoteRepoGetManyParamsByWorkspaceID(cmd.WorkspaceID), + //exhaustruct:ignore + &domain.NoteRepoGetManyParams{ + WorkspaceID: cmd.WorkspaceID, + }, ) if err != nil { return err } workspaceFolders, err := folderRepo.GetMany(ctx, - domain.NewFolderRepoGetManyParamsByWorkspaceID(cmd.WorkspaceID), + //exhaustruct:ignore + &domain.FolderRepoGetManyParams{ + WorkspaceID: cmd.WorkspaceID, + ForUpdate: true, + }, ) if err != nil { return err @@ -84,8 +89,11 @@ func (h *TrashWorkspaceItemsHandler) Handle(ctx context.Context, cmd *TrashWorks var notes []*domain.Note if len(cmd.NoteIDs) > 0 { notes, err = noteRepo.GetMany(ctx, - domain.NewNoteRepoGetManyParamsByIDs(cmd.NoteIDs). - WithForUpdate(), + //exhaustruct:ignore + &domain.NoteRepoGetManyParams{ + IDs: cmd.NoteIDs, + ForUpdate: true, + }, ) if err != nil { return err @@ -99,8 +107,11 @@ func (h *TrashWorkspaceItemsHandler) Handle(ctx context.Context, cmd *TrashWorks var folders []*domain.Folder if len(cmd.FolderIDs) > 0 { folders, err = folderRepo.GetMany(ctx, - domain.NewFolderRepoGetManyParamsByIDs(cmd.FolderIDs). - WithForUpdate(), + //exhaustruct:ignore + &domain.FolderRepoGetManyParams{ + IDs: cmd.FolderIDs, + ForUpdate: true, + }, ) if err != nil { return err @@ -116,18 +127,12 @@ func (h *TrashWorkspaceItemsHandler) Handle(ctx context.Context, cmd *TrashWorks if err := noteRepo.SaveMany(ctx, workspaceNotePtrs); err != nil { return err } - for _, note := range workspaceNotePtrs { - workspaceEvents = append(workspaceEvents, note.PopEvents()...) - } } if len(workspaceFolderPtrs) > 0 { if err := folderRepo.SaveMany(ctx, workspaceFolderPtrs); err != nil { return err } - for _, folder := range workspaceFolderPtrs { - workspaceEvents = append(workspaceEvents, folder.PopEvents()...) - } } return nil diff --git a/internal/note/app/unpublishnote.go b/internal/note/app/cmd_unpublishnote.go similarity index 94% rename from internal/note/app/unpublishnote.go rename to internal/note/app/cmd_unpublishnote.go index 87cffe22..df3b903d 100644 --- a/internal/note/app/unpublishnote.go +++ b/internal/note/app/cmd_unpublishnote.go @@ -5,7 +5,6 @@ import ( "github.com/google/uuid" "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" ) type UnpublishNote struct { @@ -22,7 +21,7 @@ func NewUnpublishNoteHandler(noteRepo domain.NoteRepo) *UnpublishNoteHandler { var ProvideUnpublishNoteHandler = NewUnpublishNoteHandler -func (h *UnpublishNoteHandler) Handle(ctx context.Context, cmd *UnpublishNote) errs.Error { +func (h *UnpublishNoteHandler) Handle(ctx context.Context, cmd *UnpublishNote) error { // WARN: Handler is incomplete - domain.Note has no Unpublish() method. // TODO: domain.Note has no Unpublish() method. Add Unpublish() to domain.Note and a // published field, then call note.Unpublish() here before Save. diff --git a/internal/note/app/unpublishworkspace.go b/internal/note/app/cmd_unpublishworkspace.go similarity index 93% rename from internal/note/app/unpublishworkspace.go rename to internal/note/app/cmd_unpublishworkspace.go index 9003c116..ba1e1044 100644 --- a/internal/note/app/unpublishworkspace.go +++ b/internal/note/app/cmd_unpublishworkspace.go @@ -4,7 +4,6 @@ import ( "context" "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" ) type UnpublishWorkspace struct { @@ -21,7 +20,7 @@ func NewUnpublishWorkspaceHandler(workspaceRepo domain.WorkspaceRepo) *Unpublish var ProvideUnpublishWorkspaceHandler = NewUnpublishWorkspaceHandler -func (h *UnpublishWorkspaceHandler) Handle(ctx context.Context, cmd *UnpublishWorkspace) errs.Error { +func (h *UnpublishWorkspaceHandler) Handle(ctx context.Context, cmd *UnpublishWorkspace) error { // WARN: Handler is incomplete - domain.Workspace has no Unpublish() method. // TODO: domain.Workspace has no Unpublish() method. Add a published field and // Unpublish() method to domain.Workspace, then call workspace.Unpublish() here before Save. diff --git a/internal/note/app/updateworkspacemembers.go b/internal/note/app/cmd_updateworkspacemembers.go similarity index 77% rename from internal/note/app/updateworkspacemembers.go rename to internal/note/app/cmd_updateworkspacemembers.go index 6a9f2321..bc2a02d9 100644 --- a/internal/note/app/updateworkspacemembers.go +++ b/internal/note/app/cmd_updateworkspacemembers.go @@ -4,7 +4,6 @@ import ( "context" "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" ) type WorkspaceMemberUpdate struct { @@ -17,15 +16,21 @@ type UpdateWorkspaceMembers struct { Members []WorkspaceMemberUpdate } -type UpdateWorkspaceMembersHandler struct{} +type UpdateWorkspaceMembersHandler struct { + integrationPublisher IntegrationPublisher +} -func NewUpdateWorkspaceMembersHandler() *UpdateWorkspaceMembersHandler { - return &UpdateWorkspaceMembersHandler{} +func NewUpdateWorkspaceMembersHandler( + integrationPublisher IntegrationPublisher, +) *UpdateWorkspaceMembersHandler { + return &UpdateWorkspaceMembersHandler{ + integrationPublisher: integrationPublisher, + } } var ProvideUpdateWorkspaceMembersHandler = NewUpdateWorkspaceMembersHandler -func (h *UpdateWorkspaceMembersHandler) Handle(ctx context.Context, cmd *UpdateWorkspaceMembers) errs.Error { +func (h *UpdateWorkspaceMembersHandler) Handle(ctx context.Context, cmd *UpdateWorkspaceMembers) error { // WARN: Unimplemented stub - no domain model for WorkspaceMember exists. // TODO: There is no domain model for WorkspaceMember. Implement a WorkspaceMember // domain entity and corresponding repo (WorkspaceMemberRepo) to manage member roles. diff --git a/internal/note/app/documentcommitted.go b/internal/note/app/e_documentcommitted.go similarity index 78% rename from internal/note/app/documentcommitted.go rename to internal/note/app/e_documentcommitted.go index 6a5e6d01..d2465e86 100644 --- a/internal/note/app/documentcommitted.go +++ b/internal/note/app/e_documentcommitted.go @@ -4,13 +4,16 @@ import ( "context" "log/slog" + "github.com/google/uuid" "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/pkg/api/share" ) type DocumentCommitted struct { - share.DocumentCommittedEvent - UserID string + Content any + ID uuid.UUID + OutgoingLinkIDs []uuid.UUID + Tags []string + UserID string } type DocumentCommittedHandler struct { @@ -31,7 +34,7 @@ func NewDocumentCommittedHandler( var ProvideDocumentCommittedHandler = NewDocumentCommittedHandler func (h *DocumentCommittedHandler) Handle(ctx context.Context, event *DocumentCommitted) error { - note, err := h.noteRepo.GetByID(ctx, event.Id, false) + note, err := h.noteRepo.GetByID(ctx, event.ID, false) if err != nil { return err } @@ -40,7 +43,7 @@ func (h *DocumentCommittedHandler) Handle(ctx context.Context, event *DocumentCo return err } note.SetTags(event.Tags, event.UserID) - note.SetOutgoingLinks(event.OutgoingLinkIds, event.UserID) + note.SetOutgoingLinks(event.OutgoingLinkIDs, event.UserID) slog.InfoContext( ctx, "Document committed event handled", diff --git a/internal/note/app/e_notecreatedintegration.go b/internal/note/app/e_notecreatedintegration.go new file mode 100644 index 00000000..66834028 --- /dev/null +++ b/internal/note/app/e_notecreatedintegration.go @@ -0,0 +1,32 @@ +package app + +import ( + "context" + "fmt" + + "github.com/notopia-uit/notopia/internal/note/domain" +) + +type NoteCreatedDomainToIntegrationEventHandler struct { + integrationPublisher IntegrationPublisher +} + +func NewNoteCreatedDomainToIntegrationEventHandler( + integrationPublisher IntegrationPublisher, +) *NoteCreatedDomainToIntegrationEventHandler { + return &NoteCreatedDomainToIntegrationEventHandler{ + integrationPublisher: integrationPublisher, + } +} + +func (h *NoteCreatedDomainToIntegrationEventHandler) Handle(ctx context.Context, event *domain.NoteCreatedEvent) error { + integrationEvent := IntegrationEventNoteCreated{ + ID: event.AggregateID, + Icon: event.Icon, + Name: event.Name, + } + if err := h.integrationPublisher.Publish(ctx, integrationEvent); err != nil { + return fmt.Errorf("failed to publish the converted note created event to the integration publisher: %w", err) + } + return nil +} diff --git a/internal/note/app/e_notifyworkspaceitemsupdated.go b/internal/note/app/e_notifyworkspaceitemsupdated.go new file mode 100644 index 00000000..e44f911f --- /dev/null +++ b/internal/note/app/e_notifyworkspaceitemsupdated.go @@ -0,0 +1,96 @@ +package app + +import ( + "context" + "errors" + "fmt" + "log/slog" + "sync" + "time" + + "github.com/bep/debounce" + "github.com/google/uuid" + "github.com/notopia-uit/notopia/pkg/api/note" +) + +type NotifyWorkspaceItemsUpdatedType uint8 + +const ( + NotifyWorkspaceItemsUpdatedTypeFolder NotifyWorkspaceItemsUpdatedType = iota + NotifyWorkspaceItemsUpdatedTypeNote +) + +type NotifyWorkspaceItemsUpdated struct { + UserID string + WorkspaceID uuid.UUID + WorkspaceItemID uuid.UUID + Type NotifyWorkspaceItemsUpdatedType +} + +// Because workspace items includes note and folder, so, we share the same debounce +type NotifyWorkspaceItemsUpdatedHandler struct { + debouncers sync.Map + debounceDuration time.Duration + workspaceEventPublisher WorkspaceEventPublisher +} + +func NewNotifyWorkspaceItemsUpdatedHandler( + workspaceEventPublisher WorkspaceEventPublisher, +) *NotifyWorkspaceItemsUpdatedHandler { + return &NotifyWorkspaceItemsUpdatedHandler{ + debouncers: sync.Map{}, + debounceDuration: 1 * time.Second, + workspaceEventPublisher: workspaceEventPublisher, + } +} + +var ProvideNotifyWorkspaceItemsUpdatedHandler = NewNotifyWorkspaceItemsUpdatedHandler + +func (h *NotifyWorkspaceItemsUpdatedHandler) Handle(params *NotifyWorkspaceItemsUpdated) error { + val, _ := h.debouncers.LoadOrStore(params.WorkspaceID, debounce.New(h.debounceDuration)) + debouncer, ok := val.(func(func())) + if !ok { + return fmt.Errorf("failed to assert debouncer for workspaceID: %s", params.WorkspaceID.String()) + } + + debouncer(func() { + publishCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + err := h.publishWorkspaceUpdate(publishCtx, params.WorkspaceID, params.UserID) + if err != nil { + slog.Error( + "failed to publish workspace update", + slog.String("workspaceID", params.WorkspaceID.String()), + slog.Any("error", err), + ) + } + + h.debouncers.Delete(params.WorkspaceID) + }) + return nil +} + +func (h *NotifyWorkspaceItemsUpdatedHandler) publishWorkspaceUpdate( + ctx context.Context, + workspaceID uuid.UUID, + userID string, +) error { + id, err := uuid.NewV7() + if err != nil { + return errors.New("failed to generate event ID") + } + event := &WorkspaceEventWorkspaceItemsUpdated{ + workspaceEvent[note.WorkspaceItemsUpdatedEventEvent]{ + Id: id, + Event: note.WorkspaceItemsUpdatedEventEventWorkspaceItemsUpdatedEvent, + Data: note.WorkspaceItemsUpdatedEventData{ + WorkspaceId: &workspaceID, + }, + }, + } + if err := h.workspaceEventPublisher.Publish(ctx, workspaceID, userID, event); err != nil { + return fmt.Errorf("failed to publish workspace update event: %w", err) + } + return nil +} diff --git a/internal/note/app/generatedailynote.go b/internal/note/app/generatedailynote.go deleted file mode 100644 index d3ccd926..00000000 --- a/internal/note/app/generatedailynote.go +++ /dev/null @@ -1,51 +0,0 @@ -package app - -import ( - "context" - - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type GenerateDailyNote struct { - NoteID uuid.UUID - WorkspaceID uuid.UUID - UserID string -} - -type GenerateDailyNoteHandler struct { - noteRepo domain.NoteRepo - folderRepo domain.FolderRepo - workspaceRepo domain.WorkspaceRepo -} - -func NewGenerateDailyNoteHandler( - noteRepo domain.NoteRepo, - folderRepo domain.FolderRepo, - workspaceRepo domain.WorkspaceRepo, -) *GenerateDailyNoteHandler { - return &GenerateDailyNoteHandler{ - noteRepo: noteRepo, - folderRepo: folderRepo, - workspaceRepo: workspaceRepo, - } -} - -var ProvideGenerateDailyNoteHandler = NewGenerateDailyNoteHandler - -func (h *GenerateDailyNoteHandler) Handle(ctx context.Context, cmd *GenerateDailyNote) (*uuid.UUID, errs.Error) { - // WARN: Unimplemented stub - returns nil, nil without any logic. - // TODO: No domain method for generating a daily note. Implement logic to: - // 1. Find or create a "Daily Notes" folder in the workspace root - // - Use folderRepo.GetByWorkspaceID() to find existing, or NewFolder if not found - // 2. Find or create today's note in that folder (named e.g. "2026-03-20") - // - Compare note names to find today's date, or use createdAt timestamp - // 3. Return the note ID for the Content-Location response header - // Expected HTTP response: 201 Created with Content-Location header pointing to the new note - // Consider: - // - Using time.Now().Format("2006-01-02") for note naming - // - Publishing NoteCreatedEvent if new note is created - // - Returning cmd.NoteID if pre-generated, or generated ID if created - return nil, nil -} diff --git a/internal/note/app/getnotegraph.go b/internal/note/app/getnotegraph.go deleted file mode 100644 index 555ee63e..00000000 --- a/internal/note/app/getnotegraph.go +++ /dev/null @@ -1,36 +0,0 @@ -package app - -import ( - "context" - "math" - - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type GetNoteGraph struct { - ID uuid.UUID - Depth int -} - -type GetNoteGraphReadModel interface { - GetNoteGraph(ctx context.Context, q *GetNoteGraph) (*Graph, errs.Error) -} - -type GetNoteGraphHandler struct { - readModel GetNoteGraphReadModel -} - -func NewGetNoteGraphHandler(readModel GetNoteGraphReadModel) *GetNoteGraphHandler { - return &GetNoteGraphHandler{readModel: readModel} -} - -var ProvideGetNoteGraphHandler = NewGetNoteGraphHandler - -func (h *GetNoteGraphHandler) Handle(ctx context.Context, query *GetNoteGraph) (*Graph, errs.Error) { - // TODO: Auth - if query.Depth <= 0 { - query.Depth = math.MaxInt - } - return h.readModel.GetNoteGraph(ctx, query) -} diff --git a/internal/note/app/getnotelinks.go b/internal/note/app/getnotelinks.go deleted file mode 100644 index baeea4dd..00000000 --- a/internal/note/app/getnotelinks.go +++ /dev/null @@ -1,33 +0,0 @@ -package app - -import ( - "context" - - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type GetNoteLinks struct { - ID uuid.UUID - OutgoingLinks bool - Backlinks bool -} - -type GetNoteLinksReadModel interface { - GetNoteLinks(ctx context.Context, q *GetNoteLinks) (*NoteLinkResult, errs.Error) -} - -type GetNoteLinksHandler struct { - readModel GetNoteLinksReadModel -} - -func NewGetNoteLinksHandler(readModel GetNoteLinksReadModel) *GetNoteLinksHandler { - return &GetNoteLinksHandler{readModel: readModel} -} - -var ProvideGetNoteLinksHandler = NewGetNoteLinksHandler - -func (h *GetNoteLinksHandler) Handle(ctx context.Context, query *GetNoteLinks) (*NoteLinkResult, errs.Error) { - // TODO: Authorize - return h.readModel.GetNoteLinks(ctx, query) -} diff --git a/internal/note/app/getworkspacebyslug.go b/internal/note/app/getworkspacebyslug.go deleted file mode 100644 index 735a82c8..00000000 --- a/internal/note/app/getworkspacebyslug.go +++ /dev/null @@ -1,30 +0,0 @@ -package app - -import ( - "context" - - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type GetWorkspaceBySlug struct { - Slug string -} - -type GetWorkspaceBySlugReadModel interface { - GetWorkspaceBySlug(ctx context.Context, q *GetWorkspaceBySlug) (*Workspace, errs.Error) -} - -type GetWorkspaceHandler struct { - readModel GetWorkspaceBySlugReadModel -} - -func NewGetWorkspaceBySlugHandler(readModel GetWorkspaceBySlugReadModel) *GetWorkspaceHandler { - return &GetWorkspaceHandler{readModel: readModel} -} - -var ProvideGetWorkspaceBySlugHandler = NewGetWorkspaceBySlugHandler - -func (h *GetWorkspaceHandler) Handle(ctx context.Context, query *GetWorkspaceBySlug) (*Workspace, errs.Error) { - // TODO: Authorize - return h.readModel.GetWorkspaceBySlug(ctx, query) -} diff --git a/internal/note/app/getworkspacegraph.go b/internal/note/app/getworkspacegraph.go deleted file mode 100644 index 17d6983b..00000000 --- a/internal/note/app/getworkspacegraph.go +++ /dev/null @@ -1,32 +0,0 @@ -package app - -import ( - "context" - - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type GetWorkspaceGraph struct { - ID uuid.UUID - IgnoreOrphans bool -} - -type GetWorkspaceGraphReadModel interface { - GetWorkspaceGraph(ctx context.Context, q *GetWorkspaceGraph) (*Graph, errs.Error) -} - -type GetWorkspaceGraphHandler struct { - readModel GetWorkspaceGraphReadModel -} - -func NewGetWorkspaceGraphHandler(readModel GetWorkspaceGraphReadModel) *GetWorkspaceGraphHandler { - return &GetWorkspaceGraphHandler{readModel: readModel} -} - -var ProvideGetWorkspaceGraphHandler = NewGetWorkspaceGraphHandler - -func (h *GetWorkspaceGraphHandler) Handle(ctx context.Context, query *GetWorkspaceGraph) (*Graph, errs.Error) { - // TODO: Authorize - return h.readModel.GetWorkspaceGraph(ctx, query) -} diff --git a/internal/note/app/getworkspacetree.go b/internal/note/app/getworkspacetree.go deleted file mode 100644 index 48ceb7fd..00000000 --- a/internal/note/app/getworkspacetree.go +++ /dev/null @@ -1,34 +0,0 @@ -package app - -import ( - "context" - - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type GetWorkspaceTree struct { - WorkspaceID uuid.UUID - RootFolderID *uuid.UUID - IncludeTrashed bool - Depth *uint -} - -type GetWorkspaceTreeReadModel interface { - GetWorkspaceTree(ctx context.Context, q *GetWorkspaceTree) (*WorkspaceTreeFolder, errs.Error) -} - -type GetWorkspaceTreeHandler struct { - readModel GetWorkspaceTreeReadModel -} - -func NewGetWorkspaceTreeHandler(readModel GetWorkspaceTreeReadModel) *GetWorkspaceTreeHandler { - return &GetWorkspaceTreeHandler{readModel: readModel} -} - -var ProvideGetWorkspaceTreeHandler = NewGetWorkspaceTreeHandler - -func (h *GetWorkspaceTreeHandler) Handle(ctx context.Context, query *GetWorkspaceTree) (*WorkspaceTreeFolder, errs.Error) { - // TODO: Authorize - return h.readModel.GetWorkspaceTree(ctx, query) -} diff --git a/internal/note/app/integrationevent.go b/internal/note/app/integrationevent.go new file mode 100644 index 00000000..32cde7d7 --- /dev/null +++ b/internal/note/app/integrationevent.go @@ -0,0 +1,45 @@ +package app + +import ( + "context" + "time" + + "github.com/google/uuid" +) + +// TODO: Hey quite miss the user ID right?, but it should need when other service need it ye... + +type IntegrationPublisher interface { + Publish(ctx context.Context, event ...IntegrationEvent) error +} + +type IntegrationEvent interface { + isIntegrationEvent() +} + +type IntegrationEventNoteCreated struct { + ID uuid.UUID + Name string + Icon string +} + +func (e IntegrationEventNoteCreated) isIntegrationEvent() {} + +type IntegrationEventNoteDeleted struct { + ID uuid.UUID +} + +func (e IntegrationEventNoteDeleted) isIntegrationEvent() {} + +type IntegrationEventNoteUpdated struct { + ID uuid.UUID + Name string + Icon string + Tags []string + Size uint64 + FolderID uuid.UUID + OutgoingLinks uuid.UUIDs + UpdatedAt time.Time +} + +func (e IntegrationEventNoteUpdated) isIntegrationEvent() {} diff --git a/internal/note/app/integrationpubsub.go b/internal/note/app/integrationpubsub.go deleted file mode 100644 index fc836445..00000000 --- a/internal/note/app/integrationpubsub.go +++ /dev/null @@ -1,46 +0,0 @@ -package app - -import ( - "context" - - "github.com/ThreeDotsLabs/watermill/components/cqrs" - "github.com/ThreeDotsLabs/watermill/message" -) - -type IntegrationPubSub struct { - eventBus *cqrs.EventBus - eventProcessor *cqrs.EventProcessor - router *message.Router -} - -func NewIntegrationPubSub( - eventBus *cqrs.EventBus, - eventProcessor *cqrs.EventProcessor, - router *message.Router, -) *IntegrationPubSub { - return &IntegrationPubSub{ - eventBus: eventBus, - eventProcessor: eventProcessor, - router: router, - } -} - -func (p *IntegrationPubSub) EventBus() *cqrs.EventBus { - return p.eventBus -} - -func (p *IntegrationPubSub) EventProcessor() *cqrs.EventProcessor { - return p.eventProcessor -} - -func (p *IntegrationPubSub) Router() *message.Router { - return p.router -} - -func (p *IntegrationPubSub) Run(ctx context.Context) error { - return p.router.Run(ctx) -} - -func (p *IntegrationPubSub) Close() error { - return p.router.Close() -} diff --git a/internal/note/app/internalpubsub.go b/internal/note/app/internalpubsub.go deleted file mode 100644 index 4879f7a4..00000000 --- a/internal/note/app/internalpubsub.go +++ /dev/null @@ -1 +0,0 @@ -package app diff --git a/internal/note/app/persistence.go b/internal/note/app/persistence.go deleted file mode 100644 index ecf05c48..00000000 --- a/internal/note/app/persistence.go +++ /dev/null @@ -1,9 +0,0 @@ -package app - -import "context" - -type Persistence interface { - IsMigrationDone(ctx context.Context) (bool, error) - Ping(ctx context.Context) error - RunMigrations(ctx context.Context) error -} diff --git a/internal/note/app/checkworkspaceexists.go b/internal/note/app/q_checkworkspaceexists.go similarity index 84% rename from internal/note/app/checkworkspaceexists.go rename to internal/note/app/q_checkworkspaceexists.go index c937af6d..51cb3232 100644 --- a/internal/note/app/checkworkspaceexists.go +++ b/internal/note/app/q_checkworkspaceexists.go @@ -2,8 +2,6 @@ package app import ( "context" - - "github.com/notopia-uit/notopia/internal/note/errs" ) type CheckWorkspaceSlugExists struct { @@ -11,7 +9,7 @@ type CheckWorkspaceSlugExists struct { } type CheckWorkspaceSlugExistsReadModel interface { - CheckWorkspaceSlugExists(ctx context.Context, q *CheckWorkspaceSlugExists) (*CheckWorkspaceSlugExistsResult, errs.Error) + CheckWorkspaceSlugExists(ctx context.Context, q *CheckWorkspaceSlugExists) (*CheckWorkspaceSlugExistsResult, error) } type CheckWorkspaceSlugExistsHandler struct { @@ -24,6 +22,6 @@ func NewCheckWorkspaceSlugExistsHandler(readModel CheckWorkspaceSlugExistsReadMo var ProvideCheckWorkspaceSlugExistsHandler = NewCheckWorkspaceSlugExistsHandler -func (h *CheckWorkspaceSlugExistsHandler) Handle(ctx context.Context, query *CheckWorkspaceSlugExists) (*CheckWorkspaceSlugExistsResult, errs.Error) { +func (h *CheckWorkspaceSlugExistsHandler) Handle(ctx context.Context, query *CheckWorkspaceSlugExists) (*CheckWorkspaceSlugExistsResult, error) { return h.readModel.CheckWorkspaceSlugExists(ctx, query) } diff --git a/internal/note/app/getnote.go b/internal/note/app/q_getnote.go similarity index 93% rename from internal/note/app/getnote.go rename to internal/note/app/q_getnote.go index 1789b21a..35d58e20 100644 --- a/internal/note/app/getnote.go +++ b/internal/note/app/q_getnote.go @@ -17,7 +17,7 @@ type GetNote struct { } type GetNoteReadModel interface { - GetNote(ctx context.Context, q *GetNote) (*Note, errs.Error) + GetNote(ctx context.Context, q *GetNote) (*Note, error) } type GetNoteHandler struct { @@ -40,7 +40,7 @@ func NewGetNoteHandler( var ProvideGetNoteHandler = NewGetNoteHandler -func (h *GetNoteHandler) Handle(ctx context.Context, query *GetNote) (*Note, errs.Error) { +func (h *GetNoteHandler) Handle(ctx context.Context, query *GetNote) (*Note, error) { workspaceID, err := h.noteRepo.GetWorkspaceIDByID(ctx, query.ID) if err != nil { return nil, err diff --git a/internal/note/app/q_getnotegraph.go b/internal/note/app/q_getnotegraph.go new file mode 100644 index 00000000..ef9f5dfe --- /dev/null +++ b/internal/note/app/q_getnotegraph.go @@ -0,0 +1,67 @@ +package app + +import ( + "context" + "fmt" + "math" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +type GetNoteGraph struct { + ID uuid.UUID + Depth int + + UserID string +} + +type GetNoteGraphReadModel interface { + GetNoteGraph(ctx context.Context, q *GetNoteGraph) (*Graph, error) +} + +type GetNoteGraphHandler struct { + authorizationService AuthorizationService + noteRepo domain.NoteRepo + readModel GetNoteGraphReadModel +} + +func NewGetNoteGraphHandler( + authorizationService AuthorizationService, + noteRepo domain.NoteRepo, + readModel GetNoteGraphReadModel, +) *GetNoteGraphHandler { + return &GetNoteGraphHandler{ + authorizationService: authorizationService, + noteRepo: noteRepo, + readModel: readModel, + } +} + +var ProvideGetNoteGraphHandler = NewGetNoteGraphHandler + +func (h *GetNoteGraphHandler) Handle(ctx context.Context, query *GetNoteGraph) (*Graph, error) { + workspaceID, err := h.noteRepo.GetWorkspaceIDByID(ctx, query.ID) + if err != nil { + return nil, err + } + hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( + ctx, + query.UserID, + workspaceID, + WorkspaceItemPermissionRead, + ) + if err != nil { + return nil, err + } + if !hasPermission { + return nil, errs.NewForbidden( + fmt.Sprintf("user %s does not have permission to read note graph %s", query.UserID, query.ID), + ) + } + if query.Depth <= 0 { + query.Depth = math.MaxInt + } + return h.readModel.GetNoteGraph(ctx, query) +} diff --git a/internal/note/app/q_getnotelinks.go b/internal/note/app/q_getnotelinks.go new file mode 100644 index 00000000..73ebcf1e --- /dev/null +++ b/internal/note/app/q_getnotelinks.go @@ -0,0 +1,64 @@ +package app + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +type GetNoteLinks struct { + ID uuid.UUID + OutgoingLinks bool + Backlinks bool + + UserID string +} + +type GetNoteLinksReadModel interface { + GetNoteLinks(ctx context.Context, q *GetNoteLinks) (*NoteLinkResult, error) +} + +type GetNoteLinksHandler struct { + authorizationService AuthorizationService + noteRepo domain.NoteRepo + readModel GetNoteLinksReadModel +} + +func NewGetNoteLinksHandler( + authorizationService AuthorizationService, + noteRepo domain.NoteRepo, + readModel GetNoteLinksReadModel, +) *GetNoteLinksHandler { + return &GetNoteLinksHandler{ + authorizationService: authorizationService, + noteRepo: noteRepo, + readModel: readModel, + } +} + +var ProvideGetNoteLinksHandler = NewGetNoteLinksHandler + +func (h *GetNoteLinksHandler) Handle(ctx context.Context, query *GetNoteLinks) (*NoteLinkResult, error) { + workspaceID, err := h.noteRepo.GetWorkspaceIDByID(ctx, query.ID) + if err != nil { + return nil, err + } + hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( + ctx, + query.UserID, + workspaceID, + WorkspaceItemPermissionRead, + ) + if err != nil { + return nil, err + } + if !hasPermission { + return nil, errs.NewForbidden( + fmt.Sprintf("user %s does not have permission to read note links %s", query.UserID, query.ID), + ) + } + return h.readModel.GetNoteLinks(ctx, query) +} diff --git a/internal/note/app/q_getworkspacebyslug.go b/internal/note/app/q_getworkspacebyslug.go new file mode 100644 index 00000000..73de9fc3 --- /dev/null +++ b/internal/note/app/q_getworkspacebyslug.go @@ -0,0 +1,60 @@ +package app + +import ( + "context" + "fmt" + + "github.com/notopia-uit/notopia/internal/note/errs" +) + +type GetWorkspaceBySlug struct { + Slug string + + UserID string +} + +type WorkspaceBySlugReadModel interface { + GetWorkspaceBySlug(ctx context.Context, q *GetWorkspaceBySlug) (*Workspace, error) +} + +type GetWorkspaceHandler struct { + authorizationService AuthorizationService + readModel WorkspaceBySlugReadModel +} + +func NewGetWorkspaceBySlugHandler( + authorizationService AuthorizationService, + readModel WorkspaceBySlugReadModel, +) *GetWorkspaceHandler { + return &GetWorkspaceHandler{ + authorizationService: authorizationService, + readModel: readModel, + } +} + +var ProvideGetWorkspaceBySlugHandler = NewGetWorkspaceBySlugHandler + +func (h *GetWorkspaceHandler) Handle(ctx context.Context, query *GetWorkspaceBySlug) (*Workspace, error) { + workspace, err := h.readModel.GetWorkspaceBySlug(ctx, query) + if err != nil { + return nil, err + } + if workspace == nil { + return nil, nil + } + hasPermission, err := h.authorizationService.HasWorkspacePermission( + ctx, + query.UserID, + workspace.ID, + WorkspacePermissionRead, + ) + if err != nil { + return nil, err + } + if !hasPermission { + return nil, errs.NewForbidden( + fmt.Sprintf("user %s does not have permission to read workspace %s", query.UserID, workspace.ID), + ) + } + return workspace, nil +} diff --git a/internal/note/app/q_getworkspacegraph.go b/internal/note/app/q_getworkspacegraph.go new file mode 100644 index 00000000..f5030fbc --- /dev/null +++ b/internal/note/app/q_getworkspacegraph.go @@ -0,0 +1,55 @@ +package app + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +type GetWorkspaceGraph struct { + ID uuid.UUID + IgnoreOrphans bool + + UserID string +} + +type GetWorkspaceGraphReadModel interface { + GetWorkspaceGraph(ctx context.Context, q *GetWorkspaceGraph) (*Graph, error) +} + +type GetWorkspaceGraphHandler struct { + authorizationService AuthorizationService + readModel GetWorkspaceGraphReadModel +} + +func NewGetWorkspaceGraphHandler( + authorizationService AuthorizationService, + readModel GetWorkspaceGraphReadModel, +) *GetWorkspaceGraphHandler { + return &GetWorkspaceGraphHandler{ + authorizationService: authorizationService, + readModel: readModel, + } +} + +var ProvideGetWorkspaceGraphHandler = NewGetWorkspaceGraphHandler + +func (h *GetWorkspaceGraphHandler) Handle(ctx context.Context, query *GetWorkspaceGraph) (*Graph, error) { + hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( + ctx, + query.UserID, + query.ID, + WorkspaceItemPermissionRead, + ) + if err != nil { + return nil, err + } + if !hasPermission { + return nil, errs.NewForbidden( + fmt.Sprintf("user %s does not have permission to read workspace graph %s", query.UserID, query.ID), + ) + } + return h.readModel.GetWorkspaceGraph(ctx, query) +} diff --git a/internal/note/app/getworkspacemembers.go b/internal/note/app/q_getworkspacemembers.go similarity index 78% rename from internal/note/app/getworkspacemembers.go rename to internal/note/app/q_getworkspacemembers.go index d4ddcd3c..59b19c99 100644 --- a/internal/note/app/getworkspacemembers.go +++ b/internal/note/app/q_getworkspacemembers.go @@ -4,7 +4,6 @@ import ( "context" "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" ) // TODO: Kev @@ -20,6 +19,6 @@ func NewGetWorkspaceMembersHandler() *GetWorkspaceMembersHandler { var ProvideGetWorkspaceMembersHandler = NewGetWorkspaceMembersHandler -func (h *GetWorkspaceMembersHandler) Handle(ctx context.Context, query *GetWorkspaceMembers) ([]*WorkspaceMember, errs.Error) { +func (h *GetWorkspaceMembersHandler) Handle(ctx context.Context, query *GetWorkspaceMembers) ([]*WorkspaceMember, error) { return nil, nil } diff --git a/internal/note/app/q_getworkspacetree.go b/internal/note/app/q_getworkspacetree.go new file mode 100644 index 00000000..a1ae4276 --- /dev/null +++ b/internal/note/app/q_getworkspacetree.go @@ -0,0 +1,57 @@ +package app + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +type GetWorkspaceTree struct { + WorkspaceID uuid.UUID + RootFolderID uuid.UUID + IncludeTrashed bool + Depth uint + + UserID string +} + +type GetWorkspaceTreeReadModel interface { + GetWorkspaceTree(ctx context.Context, q *GetWorkspaceTree) (*WorkspaceTreeFolder, error) +} + +type GetWorkspaceTreeHandler struct { + authorizationService AuthorizationService + readModel GetWorkspaceTreeReadModel +} + +func NewGetWorkspaceTreeHandler( + authorizationService AuthorizationService, + readModel GetWorkspaceTreeReadModel, +) *GetWorkspaceTreeHandler { + return &GetWorkspaceTreeHandler{ + authorizationService: authorizationService, + readModel: readModel, + } +} + +var ProvideGetWorkspaceTreeHandler = NewGetWorkspaceTreeHandler + +func (h *GetWorkspaceTreeHandler) Handle(ctx context.Context, query *GetWorkspaceTree) (*WorkspaceTreeFolder, error) { + hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( + ctx, + query.UserID, + query.WorkspaceID, + WorkspaceItemPermissionRead, + ) + if err != nil { + return nil, err + } + if !hasPermission { + return nil, errs.NewForbidden( + fmt.Sprintf("user %s does not have permission to read workspace tree %s", query.UserID, query.WorkspaceID), + ) + } + return h.readModel.GetWorkspaceTree(ctx, query) +} diff --git a/internal/note/app/readmodel.go b/internal/note/app/q_model.go similarity index 91% rename from internal/note/app/readmodel.go rename to internal/note/app/q_model.go index 27046855..82b55cd5 100644 --- a/internal/note/app/readmodel.go +++ b/internal/note/app/q_model.go @@ -33,7 +33,7 @@ type Trashed struct { type Note struct { ID uuid.UUID Name string - Icon *string + Icon string Tags []string Size int32 FolderID uuid.UUID @@ -46,7 +46,7 @@ type Note struct { type Folder struct { ID uuid.UUID Name string - Icon *string + Icon string ParentID uuid.UUID WorkspaceID uuid.UUID Trashed *Trashed @@ -64,7 +64,7 @@ type GraphNode struct { ID string Name string Type GraphNodeType - Weight *float64 + Weight float64 } type GraphLink struct { @@ -80,7 +80,7 @@ type Graph struct { type NoteLink struct { ID uuid.UUID Name string - Icon *string + Icon string } type NoteLinkResult struct { @@ -104,26 +104,27 @@ var ( type WorkspaceMember struct { ID string - Username *string + Username string Role WorkspaceRole } type WorkspaceTreeNote struct { ID uuid.UUID Name string - Icon *string + Icon string UpdatedAt time.Time } type WorkspaceTreeFolder struct { ID uuid.UUID Name string - Icon *string + Icon string Notes []*WorkspaceTreeNote Children []*WorkspaceTreeFolder UpdatedAt time.Time } +// if we have time, change to uint8 iota type TrashedBy string const ( @@ -134,14 +135,14 @@ const ( type TrashedFolder struct { ID uuid.UUID Name string - Icon *string + Icon string Trashed Trashed } type TrashedNote struct { ID uuid.UUID Name string - Icon *string + Icon string Trashed Trashed } diff --git a/internal/note/app/q_showtrash.go b/internal/note/app/q_showtrash.go new file mode 100644 index 00000000..3c870f97 --- /dev/null +++ b/internal/note/app/q_showtrash.go @@ -0,0 +1,54 @@ +package app + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +type ShowTrash struct { + WorkspaceID uuid.UUID + + UserID string +} + +type ShowTrashReadModel interface { + ShowTrash(ctx context.Context, q *ShowTrash) (*Trash, error) +} + +type ShowTrashHandler struct { + authorizationService AuthorizationService + readModel ShowTrashReadModel +} + +func NewShowTrashHandler( + authorizationService AuthorizationService, + readModel ShowTrashReadModel, +) *ShowTrashHandler { + return &ShowTrashHandler{ + authorizationService: authorizationService, + readModel: readModel, + } +} + +var ProvideShowTrashHandler = NewShowTrashHandler + +func (h *ShowTrashHandler) Handle(ctx context.Context, query *ShowTrash) (*Trash, error) { + hasPermission, err := h.authorizationService.HasWorkspaceItemPermission( + ctx, + query.UserID, + query.WorkspaceID, + WorkspaceItemPermissionRead, + ) + if err != nil { + return nil, err + } + if !hasPermission { + return nil, errs.NewForbidden( + fmt.Sprintf("user %s does not have permission to read trash in workspace %s", query.UserID, query.WorkspaceID), + ) + } + return h.readModel.ShowTrash(ctx, query) +} diff --git a/internal/note/app/restoretrashworkspaceitems.go b/internal/note/app/restoretrashworkspaceitems.go deleted file mode 100644 index 37ec29d7..00000000 --- a/internal/note/app/restoretrashworkspaceitems.go +++ /dev/null @@ -1,105 +0,0 @@ -package app - -import ( - "context" - - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type RestoreTrashedWorkspaceItems struct { - WorkspaceID uuid.UUID - UserID string - NoteIDs []uuid.UUID - FolderIDs []uuid.UUID -} - -type RestoreTrashedWorkspaceItemsHandler struct { - noteRepo domain.NoteRepo - folderRepo domain.FolderRepo - trashService *domain.TrashService -} - -func NewRestoreTrashedWorkspaceItemsHandler( - noteRepo domain.NoteRepo, - folderRepo domain.FolderRepo, - trashService *domain.TrashService, -) *RestoreTrashedWorkspaceItemsHandler { - return &RestoreTrashedWorkspaceItemsHandler{ - noteRepo: noteRepo, - folderRepo: folderRepo, - trashService: trashService, - } -} - -var ProvideRestoreTrashedWorkspaceItemsHandler = NewRestoreTrashedWorkspaceItemsHandler - -func (h *RestoreTrashedWorkspaceItemsHandler) Handle(ctx context.Context, cmd *RestoreTrashedWorkspaceItems) errs.Error { - trashedNotes, err := h.noteRepo.GetMany(ctx, - domain.NewNoteRepoGetManyParamsByWorkspaceID(cmd.WorkspaceID). - WithIsTrashed(true), - ) - if err != nil { - return err - } - - trashedFolders, err := h.folderRepo.GetMany(ctx, - domain.NewFolderRepoGetManyParamsByWorkspaceID(cmd.WorkspaceID). - WithTrashed(), - ) - if err != nil { - return err - } - - trashedNotePtrs := trashedNotes - trashedFolderPtrs := trashedFolders - - if len(cmd.NoteIDs) > 0 { - notes, err := h.noteRepo.GetMany(ctx, - domain.NewNoteRepoGetManyParamsByIDs(cmd.NoteIDs). - WithForUpdate(), - ) - if err != nil { - return err - } - - notePtrs := notes - if err := h.trashService.RestoreNotes(notePtrs, cmd.UserID); err != nil { - return err - } - for _, note := range notePtrs { - if err := h.noteRepo.Save(ctx, note); err != nil { - return err - } - } - } - - if len(cmd.FolderIDs) > 0 { - folders, err := h.folderRepo.GetMany(ctx, - domain.NewFolderRepoGetManyParamsByIDs(cmd.FolderIDs). - WithForUpdate(), - ) - if err != nil { - return err - } - - if err := h.trashService.RestoreFolders(&trashedNotePtrs, &trashedFolderPtrs, folders, cmd.UserID); err != nil { - return err - } - - for _, folder := range trashedFolderPtrs { - if err := h.folderRepo.Save(ctx, folder); err != nil { - return err - } - } - - for _, note := range trashedNotePtrs { - if err := h.noteRepo.Save(ctx, note); err != nil { - return err - } - } - } - - return nil -} diff --git a/internal/note/app/showtrash.go b/internal/note/app/showtrash.go deleted file mode 100644 index 2fd876fd..00000000 --- a/internal/note/app/showtrash.go +++ /dev/null @@ -1,30 +0,0 @@ -package app - -import ( - "context" - - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" -) - -type ShowTrash struct { - WorkspaceID uuid.UUID -} - -type ShowTrashReadModel interface { - ShowTrash(ctx context.Context, q *ShowTrash) (*Trash, errs.Error) -} - -type ShowTrashHandler struct { - readModel ShowTrashReadModel -} - -func NewShowTrashHandler(readModel ShowTrashReadModel) *ShowTrashHandler { - return &ShowTrashHandler{readModel: readModel} -} - -var ProvideShowTrashHandler = NewShowTrashHandler - -func (h *ShowTrashHandler) Handle(ctx context.Context, query *ShowTrash) (*Trash, errs.Error) { - return h.readModel.ShowTrash(ctx, query) -} diff --git a/internal/note/app/authorizationservice.go b/internal/note/app/svc_authorization.go similarity index 86% rename from internal/note/app/authorizationservice.go rename to internal/note/app/svc_authorization.go index 301157eb..87d74c71 100644 --- a/internal/note/app/authorizationservice.go +++ b/internal/note/app/svc_authorization.go @@ -4,7 +4,6 @@ import ( "context" "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" ) type WorkspacePermission string @@ -42,39 +41,38 @@ type AuthorizationService interface { userID string, workspaceID uuid.UUID, permission WorkspacePermission, - ) (bool, errs.Error) + ) (bool, error) HasWorkspaceItemPermission( ctx context.Context, userID string, workspaceID uuid.UUID, permission WorkspaceItemPermission, - ) (bool, errs.Error) + ) (bool, error) HasWorkspaceNotePermission( ctx context.Context, userID string, workspaceID uuid.UUID, permission WorkspaceItemPermission, - ) (bool, errs.Error) + ) (bool, error) HasWorkspaceFolderPermission( ctx context.Context, userID string, workspaceID uuid.UUID, permission WorkspaceItemPermission, - ) (bool, errs.Error) + ) (bool, error) CreateWorkspaceWithOwnership( ctx context.Context, - userID string, + ownerID string, workspaceID uuid.UUID, - ownerID uuid.UUID, - ) errs.Error + ) error GetWorkspaceMembers( ctx context.Context, userID string, workspaceID uuid.UUID, - ) ([]*WorkspaceMemberInfo, errs.Error) + ) ([]*WorkspaceMemberInfo, error) } diff --git a/internal/note/app/wire.go b/internal/note/app/wire.go index b143c344..ec96f1fd 100644 --- a/internal/note/app/wire.go +++ b/internal/note/app/wire.go @@ -11,7 +11,6 @@ var ProviderSetCommand = wire.NewSet( ProvidePermanentlyDeleteFolderHandler, ProvidePermanentlyDeleteNoteHandler, ProvideDeleteWorkspaceHandler, - ProvideGenerateDailyNoteHandler, ProvideGetNoteHandler, ProvideMoveWorkspaceItemsHandler, ProvidePermanentlyDeleteWorkspaceItemsHandler, @@ -25,7 +24,13 @@ var ProviderSetCommand = wire.NewSet( ProvideUnpublishNoteHandler, ProvideUnpublishWorkspaceHandler, ProvideUpdateWorkspaceMembersHandler, - wire.Struct(new(CommandHandlers), "*"), + wire.Struct(new(Cmds), "*"), +) + +var ProviderSetEvent = wire.NewSet( + ProvideDocumentCommittedHandler, + ProvideNotifyWorkspaceItemsUpdatedHandler, + wire.Struct(new(Events), "*"), ) var ProviderSetQuery = wire.NewSet( @@ -37,17 +42,12 @@ var ProviderSetQuery = wire.NewSet( ProvideGetWorkspaceMembersHandler, ProvideGetWorkspaceTreeHandler, ProvideShowTrashHandler, - wire.Struct(new(QueryHandlers), "*"), -) - -var ProviderSetIntegrationEvent = wire.NewSet( - ProvideDocumentCommittedHandler, - wire.Struct(new(IntegrationEventHandlers), "*"), + wire.Struct(new(Queries), "*"), ) var ProviderSet = wire.NewSet( ProviderSetCommand, - ProviderSetIntegrationEvent, + ProviderSetEvent, ProviderSetQuery, wire.Struct(new(Server), "*"), ) diff --git a/internal/note/app/workspaceevent.go b/internal/note/app/workspaceevent.go index cf105aae..90d95dee 100644 --- a/internal/note/app/workspaceevent.go +++ b/internal/note/app/workspaceevent.go @@ -2,33 +2,32 @@ package app import ( "context" - "reflect" "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" "github.com/notopia-uit/notopia/pkg/api/note" ) -type WorkspaceEventPubSub interface { +type WorkspaceEventPublisher interface { Publish( ctx context.Context, workspaceID uuid.UUID, userID string, events ...WorkspaceEvent, - ) errs.Error + ) error +} +type WorkspaceEventSubscriber interface { Subscribe( ctx context.Context, workspaceID uuid.UUID, userID string, - ) (<-chan WorkspaceEvent, errs.Error) - - Run(ctx context.Context) error - - Close() error + ) (<-chan WorkspaceEvent, error) +} - Check(ctx context.Context) error +// Or, we just marshall the event to a string json string, instead marshall and unmarshall +type WorkspaceEventHub interface { + WorkspaceEventPublisher + WorkspaceEventSubscriber } type WorkspaceEvent interface { @@ -49,11 +48,7 @@ func (e workspaceEvent[E]) isWorkspaceEvent() {} func (e workspaceEvent[E]) GetID() uuid.UUID { return e.Id } func (e workspaceEvent[E]) GetEvent() string { return string(e.Event) } -type WorkspaceEventWorkspaceMembersUpdated struct { - workspaceEvent[note.WorkspaceMembersUpdatedEventEvent] -} - -type WorkspaceEventWorkspaceItemsChanged struct { +type WorkspaceEventWorkspaceItemsUpdated struct { workspaceEvent[note.WorkspaceItemsUpdatedEventEvent] } @@ -69,91 +64,21 @@ type WorkspaceEventWorkspaceDeleted struct { workspaceEvent[note.WorkspaceDeletedEventEvent] } -func FromDomainEventToWorkspaceEvent(event domain.Event) (WorkspaceEvent, bool) { - switch e := event.(type) { - case *domain.FolderCreatedEvent, - *domain.FolderDeletedEvent, - *domain.FolderUpdatedEvent, - *domain.FolderMovedEvent, - *domain.FolderTrashedEvent, - *domain.FolderRestoredEvent, - *domain.FolderPermanentlyDeletedEvent, - *domain.NoteCreatedEvent, - *domain.NoteDeletedEvent, - *domain.NoteUpdatedEvent, - *domain.NoteMovedEvent, - *domain.NoteTrashedEvent, - *domain.NoteRestoredEvent, - *domain.NotePermanentlyDeletedEvent: - return &WorkspaceEventWorkspaceItemsChanged{ - workspaceEvent: workspaceEvent[note.WorkspaceItemsUpdatedEventEvent]{ - Id: e.GetID(), - Event: note.WorkspaceItemsUpdatedEventEventWorkspaceItemsUpdatedEvent, - Data: note.WorkspaceItemsUpdatedEventData{ - WorkspaceId: (*note.PropertiesId)(new(e.GetAggregateID())), - }, - }, - }, true - case *domain.WorkspaceUpdatedEvent: - return &WorkspaceEventWorkspaceUpdated{ - workspaceEvent: workspaceEvent[note.WorkspaceUpdatedEventEvent]{ - Id: e.GetID(), - Event: note.WorkspaceUpdatedEventEventWorkspaceUpdatedEvent, - Data: note.Workspace{ - Id: (*note.PropertiesId)(new(e.GetAggregateID())), - Name: e.Name, - Slug: e.Slug, - }, - }, - }, true - case *domain.WorkspaceDeletedEvent: - return &WorkspaceEventWorkspaceDeleted{ - workspaceEvent: workspaceEvent[note.WorkspaceDeletedEventEvent]{ - Id: e.GetID(), - Event: note.WorkspaceDeletedEventEventWorkspaceDeletedEvent, - Data: note.WorkspaceDeletedEventData{ - Id: (*note.PropertiesId)(new(e.GetAggregateID())), - }, - }, - }, true - default: - return nil, false - } -} - -var workspaceEventTypeRegistry = make(map[string]reflect.Type) - -func init() { - registerWorkspaceEventType( - //exhaustruct:ignore - &WorkspaceEventWorkspaceMembersUpdated{}, - ) - registerWorkspaceEventType( +func NewEmptyWorkspaceEventFromType(t string) (WorkspaceEvent, bool) { + switch t { + case string(note.WorkspaceMembersUpdatedEventEventWorkspaceMembersUpdatedEvent): //exhaustruct:ignore - &WorkspaceEventWorkspaceItemsChanged{}, - ) - registerWorkspaceEventType( + return &WorkspaceEventMembersUpdated{}, true + case string(note.WorkspaceItemsUpdatedEventEventWorkspaceItemsUpdatedEvent): //exhaustruct:ignore - &WorkspaceEventMembersUpdated{}, - ) - registerWorkspaceEventType( + return &WorkspaceEventWorkspaceItemsUpdated{}, true + case string(note.WorkspaceUpdatedEventEventWorkspaceUpdatedEvent): //exhaustruct:ignore - &WorkspaceEventWorkspaceUpdated{}, - ) - registerWorkspaceEventType( + return &WorkspaceEventWorkspaceUpdated{}, true + case string(note.WorkspaceDeletedEventEventWorkspaceDeletedEvent): //exhaustruct:ignore - &WorkspaceEventWorkspaceDeleted{}, - ) -} - -func registerWorkspaceEventType(event WorkspaceEvent) { - eventType := reflect.TypeOf(event).Elem().Name() - workspaceEventTypeRegistry[eventType] = reflect.TypeOf(event).Elem() -} - -func NewEmptyWorkspaceEventFromType(t string) (WorkspaceEvent, bool) { - if t, ok := workspaceEventTypeRegistry[t]; ok { - return reflect.New(t).Interface().(WorkspaceEvent), true + return &WorkspaceEventWorkspaceDeleted{}, true + default: + return nil, false } - return nil, false } diff --git a/internal/note/component/domaineventtopic.go b/internal/note/component/domaineventtopic.go new file mode 100644 index 00000000..4f79dbd0 --- /dev/null +++ b/internal/note/component/domaineventtopic.go @@ -0,0 +1,47 @@ +package component + +import "github.com/notopia-uit/notopia/internal/note/domain" + +const DomainEventTopicPrefix = "events.internal.note." + +func DomainEventToTopic(event domain.Event) (string, bool) { + eType := domain.GetEventType(event) + switch eType { + case domain.EventTypeFolderCreated: + return DomainEventTopicPrefix + "folder.created", true + case domain.EventTypeFolderDeleted: + return DomainEventTopicPrefix + "folder.deleted", true + case domain.EventTypeFolderUpdated: + return DomainEventTopicPrefix + "folder.updated", true + case domain.EventTypeFolderMoved: + return DomainEventTopicPrefix + "folder.moved", true + case domain.EventTypeFolderTrashed: + return DomainEventTopicPrefix + "folder.trashed", true + case domain.EventTypeFolderRestored: + return DomainEventTopicPrefix + "folder.restored", true + case domain.EventTypeFolderPermanentlyDeleted: + return DomainEventTopicPrefix + "folder.permanently_deleted", true + case domain.EventTypeNoteCreated: + return DomainEventTopicPrefix + "note.created", true + case domain.EventTypeNoteDeleted: + return DomainEventTopicPrefix + "note.deleted", true + case domain.EventTypeNoteUpdated: + return DomainEventTopicPrefix + "note.updated", true + case domain.EventTypeNoteMoved: + return DomainEventTopicPrefix + "note.moved", true + case domain.EventTypeNoteTrashed: + return DomainEventTopicPrefix + "note.trashed", true + case domain.EventTypeNoteRestored: + return DomainEventTopicPrefix + "note.restored", true + case domain.EventTypeNotePermanentlyDeleted: + return DomainEventTopicPrefix + "note.permanently_deleted", true + case domain.EventTypeWorkspaceUpdated: + return DomainEventTopicPrefix + "workspace.updated", true + case domain.EventTypeWorkspaceDeleted: + return DomainEventTopicPrefix + "workspace.deleted", true + case domain.EventTypeUnspecified: + return "", false + default: + return "", false + } +} diff --git a/internal/note/component/validator.go b/internal/note/component/validator.go index 042367f1..4c37c2a8 100644 --- a/internal/note/component/validator.go +++ b/internal/note/component/validator.go @@ -1,4 +1,4 @@ -package components +package component import ( "github.com/go-playground/validator/v10" diff --git a/internal/note/component/watermill.go b/internal/note/component/watermill.go new file mode 100644 index 00000000..4ce612b0 --- /dev/null +++ b/internal/note/component/watermill.go @@ -0,0 +1,20 @@ +package component + +import ( + "log/slog" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill/components/cqrs" +) + +func NewWatermillJsonMarshaler() *cqrs.JSONMarshaler { + return &cqrs.JSONMarshaler{} +} + +var ProvideWatermillJsonMarshaler = NewWatermillJsonMarshaler + +func NewWatermillLogger(logger *slog.Logger) watermill.LoggerAdapter { + return watermill.NewSlogLogger(logger) +} + +var ProvideWatermillLogger = NewWatermillLogger diff --git a/internal/note/component/wire.go b/internal/note/component/wire.go index 35cb34c9..d32f2284 100644 --- a/internal/note/component/wire.go +++ b/internal/note/component/wire.go @@ -1,4 +1,4 @@ -package components +package component import ( "github.com/goforj/wire" @@ -6,4 +6,6 @@ import ( var ProviderSet = wire.NewSet( ProvideValidate, + ProvideWatermillJsonMarshaler, + ProvideWatermillLogger, ) diff --git a/internal/note/config/config.go b/internal/note/config/config.go index df05e56b..d38bb933 100644 --- a/internal/note/config/config.go +++ b/internal/note/config/config.go @@ -21,8 +21,42 @@ type Services struct { Authorization commonconfig.Service `json:"authorization" mapstructure:"authorization" validate:"required" yaml:"authorization"` } +type DomainEvent struct { + MessageMetadataUserIDKey string `json:"messageMetadataUserIdKey" mapstructure:"message_metadata_user_id_key" validate:"required" yaml:"message_metadata_user_id_key"` + MessageWorkspaceIDKey string `json:"messageMetadataWorkspaceIdKey" mapstructure:"message_metadata_workspace_id_key" validate:"required" yaml:"message_metadata_workspace_id_key"` + MessageMetadataAggregateIDKey string `json:"messageMetadataAggregateIdKey" mapstructure:"message_metadata_aggregate_id_key" validate:"required" yaml:"message_metadata_aggregate_id_key"` + OutboxTableName string `json:"outboxTableName" mapstructure:"outbox_table_name" validate:"required" yaml:"outbox_table_name"` +} + +func setViperAdvancedDomainEventDefault(viper *viper.Viper) { + viper.SetDefault("advanced.domain_event.message_metadata_user_id_key", "user_id") + viper.SetDefault("advanced.domain_event.message_metadata_workspace_id_key", "workspace_id") + viper.SetDefault("advanced.domain_event.message_metadata_aggregate_id_key", "aggregate_id") + viper.SetDefault("advanced.domain_event.outbox_table_name", "eventsToForward") +} + +type WorkspaceEvent struct { + MessageMetadataWorkspaceIDKey string `json:"messageMetadataWorkspaceIdKey" mapstructure:"message_metadata_workspace_id_key" validate:"required" yaml:"message_metadata_workspace_id_key"` + MessageMetadataUserIDKey string `json:"messageMetadataUserIdKey" mapstructure:"message_metadata_user_id_key" validate:"required" yaml:"message_metadata_user_id_key"` + MessageMetadataEventTypeKey string `json:"messageMetadataEventTypeKey" mapstructure:"message_metadata_event_type_key" validate:"required" yaml:"message_metadata_event_type_key"` + MessageGeneralTopic string `json:"messageGeneralTopic" mapstructure:"message_general_topic" validate:"required" yaml:"message_general_topic"` +} + +func setViperAdvancedWorkspaceEventDefault(viper *viper.Viper) { + viper.SetDefault("advanced.workspace_event.message_metadata_workspace_id_key", "workspace_id") + viper.SetDefault("advanced.workspace_event.message_metadata_user_id_key", "user_id") + viper.SetDefault("advanced.workspace_event.message_metadata_event_type_key", "event_type") + viper.SetDefault("advanced.workspace_event.message_general_topic", "events:workspaces") +} + type Advanced struct { - OutboxTableName string `json:"outboxTableName" mapstructure:"outbox_table_name" validate:"required" yaml:"outbox_table_name"` + DomainEvent DomainEvent `json:"domainEvent" mapstructure:"domain_event" validate:"omitempty" yaml:"domain_event"` + WorkspaceEvent WorkspaceEvent `json:"workspaceEvent" mapstructure:"workspace_event" validate:"omitempty" yaml:"workspace_event"` +} + +func setViperAdvancedDefault(viper *viper.Viper) { + setViperAdvancedDomainEventDefault(viper) + setViperAdvancedWorkspaceEventDefault(viper) } type Config struct { @@ -49,7 +83,7 @@ func New( viper.SetDefault("server.http.port", 8081) viper.SetDefault("server.grpc.port", 18081) viper.SetDefault("server.health.port", 28081) - viper.SetDefault("advanced.outbox_table_name", "eventsToForward") + setViperAdvancedDefault(viper) commonconfig.LogViperSetDefault(viper, "log") commonconfig.KafkaViperSetDefault(viper, "kafka", "note-service") commonconfig.SQLViperSetDefault(viper, "database") diff --git a/internal/note/config/wire.go b/internal/note/config/wire.go index 3528960f..aa9ff68a 100644 --- a/internal/note/config/wire.go +++ b/internal/note/config/wire.go @@ -9,12 +9,18 @@ var ProviderSet = wire.NewSet( Provide, wire.FieldsOf( new(*Config), + "Advanced", "Database", "General", - "Log", - "Server", "Kafka", + "Log", "Redis", + "Server", "Services", ), + wire.FieldsOf( + new(*Advanced), + "DomainEvent", + "WorkspaceEvent", + ), ) diff --git a/internal/note/controller/event/documentcommitted.go b/internal/note/controller/event/documentcommitted.go new file mode 100644 index 00000000..406694eb --- /dev/null +++ b/internal/note/controller/event/documentcommitted.go @@ -0,0 +1,19 @@ +package event + +import ( + "context" + + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/pkg/api/share" +) + +func (e *Event) documentCommittedHandler(ctx context.Context, event *share.DocumentCommittedEvent) error { + ev := &app.DocumentCommitted{ + ID: event.Id, + Content: event.Content, + Tags: event.Tags, + OutgoingLinkIDs: event.OutgoingLinkIds, + UserID: event.UserId, + } + return e.app.Events.DocumentCommittedHandler.Handle(ctx, ev) +} diff --git a/internal/note/controller/event/event.go b/internal/note/controller/event/event.go index 3d8ca22c..93df9f09 100644 --- a/internal/note/controller/event/event.go +++ b/internal/note/controller/event/event.go @@ -1,34 +1,174 @@ package event import ( + "context" + "fmt" + "time" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka" "github.com/ThreeDotsLabs/watermill/components/cqrs" "github.com/ThreeDotsLabs/watermill/message" + "github.com/ThreeDotsLabs/watermill/message/router/middleware" "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/component" + "github.com/notopia-uit/notopia/internal/note/config" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/pkg/api/share" + commonconfig "github.com/notopia-uit/notopia/pkg/common/config" ) -type PubSub struct { - commandBus *cqrs.CommandBus - commandProcessor *cqrs.CommandProcessor - eventBus *cqrs.EventBus - eventProcessor *cqrs.EventProcessor - router *message.Router - app *app.Server +// This include integration (from share package) and domain event, setup for event processor +// If not use with event processor but via subscriber directly, not need to declare this +func eventToTopic(event any) (string, bool) { + switch e := event.(type) { + case domain.Event: + return component.DomainEventToTopic(e) + case *share.DocumentCommittedEvent: + return "events.integration.document.document.committed", true + } + return "", false +} + +type Event struct { + subcriber message.Subscriber + eventProcessor *cqrs.EventProcessor + router *message.Router + app *app.Server + + domainEventCfg *config.DomainEvent } -func NewPubSub( - commandBus *cqrs.CommandBus, - commandProcessor *cqrs.CommandProcessor, - eventBus *cqrs.EventBus, - eventProcessor *cqrs.EventProcessor, - router *message.Router, +func NewEvent( + cfg *commonconfig.Kafka, app *app.Server, -) *PubSub { - return &PubSub{ - commandBus: commandBus, - commandProcessor: commandProcessor, - eventBus: eventBus, - eventProcessor: eventProcessor, - router: router, - app: app, + tracer kafka.SaramaTracer, + logger watermill.LoggerAdapter, + marshaler *cqrs.JSONMarshaler, + domainEventCfg *config.DomainEvent, +) (*Event, error) { + subcriber, err := kafka.NewSubscriber( + kafka.SubscriberConfig{ + Brokers: cfg.Brokers, + ConsumerGroup: cfg.ConsumerGroup, + Tracer: tracer, + }, + logger, + ) + if err != nil { + return nil, fmt.Errorf("failed to create event controller subscriber: %w", err) + } + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, fmt.Errorf("failed to create event controller router: %w", err) + } + retryMiddleware := middleware.Retry{ + MaxRetries: 3, + InitialInterval: 500 * time.Millisecond, + MaxInterval: 10 * time.Second, + Multiplier: 2, + Logger: logger, + } + router.AddMiddleware( + middleware.CorrelationID, + middleware.Recoverer, + retryMiddleware.Middleware, + ) + eventProcessor, err := cqrs.NewEventProcessorWithConfig( + router, + cqrs.EventProcessorConfig{ + GenerateSubscribeTopic: func(params cqrs.EventProcessorGenerateSubscribeTopicParams) (string, error) { + if topic, ok := eventToTopic(params.EventHandler.NewEvent()); ok { // Watermill doesn't pass the new event for me??, so I create a new event again + return topic, nil + } + return "", fmt.Errorf("unknown event name %s", params.EventName) + }, + SubscriberConstructor: func(params cqrs.EventProcessorSubscriberConstructorParams) (message.Subscriber, error) { + return kafka.NewSubscriber( + kafka.SubscriberConfig{ + Brokers: cfg.Brokers, + ConsumerGroup: cfg.ConsumerGroup + "." + params.HandlerName, + Tracer: tracer, + }, + logger, + ) + }, + Marshaler: marshaler, + Logger: logger, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create event controller event processor: %w", err) + } + event := &Event{ + subcriber: subcriber, + eventProcessor: eventProcessor, + router: router, + app: app, + domainEventCfg: domainEventCfg, + } + if err := event.setup(); err != nil { + return nil, fmt.Errorf("failed to setup event controller: %w", err) + } + return event, nil +} + +var ProvideEvent = NewEvent + +func (e *Event) setup() error { + if _, err := e.eventProcessor.AddHandler(cqrs.NewEventHandler( + "DocumentCommittedHandler", + e.documentCommittedHandler, + )); err != nil { + return fmt.Errorf("failed to add event handler: %w", err) + } + + // NOTE: because watermill doesn't support kafka regex (IBM/sarama) + // So, we will need to for loop all topic we have, (for note, and folder) + workspaceItemUpdatedFolderTopics := []string{ + component.DomainEventTopicPrefix + "folder.created", + component.DomainEventTopicPrefix + "folder.deleted", + component.DomainEventTopicPrefix + "folder.updated", + component.DomainEventTopicPrefix + "folder.moved", + component.DomainEventTopicPrefix + "folder.trashed", + component.DomainEventTopicPrefix + "folder.restored", + component.DomainEventTopicPrefix + "folder.permanently_deleted", + } + for _, topic := range workspaceItemUpdatedFolderTopics { + e.router.AddConsumerHandler( + fmt.Sprintf("WorkspaceItemsUpdatedHandler.%s", topic), + topic, + e.subcriber, + e.notifyWorkspaceItemsUpdatedFolderHandler, + ) + } + workspaceItemUpdatedNoteTopics := []string{ + component.DomainEventTopicPrefix + "note.created", + component.DomainEventTopicPrefix + "note.deleted", + component.DomainEventTopicPrefix + "note.updated", + component.DomainEventTopicPrefix + "note.moved", + component.DomainEventTopicPrefix + "note.trashed", + component.DomainEventTopicPrefix + "note.restored", + component.DomainEventTopicPrefix + "note.permanently_deleted", } + for _, topic := range workspaceItemUpdatedNoteTopics { + e.router.AddConsumerHandler( + fmt.Sprintf("WorkspaceItemsUpdatedHandler.%s", topic), + topic, + e.subcriber, + e.notifyWorkspaceItemsUpdatedNoteHandler, + ) + } + return nil +} + +func (e *Event) Run(ctx context.Context) error { + if err := e.router.Run(ctx); err != nil { + return fmt.Errorf("failed to run event controller router: %w", err) + } + return nil +} + +func (e *Event) IsRunning() bool { + return e.router.IsRunning() } diff --git a/internal/note/controller/event/notifyworkspaceitemsupdated.go b/internal/note/controller/event/notifyworkspaceitemsupdated.go new file mode 100644 index 00000000..fb537739 --- /dev/null +++ b/internal/note/controller/event/notifyworkspaceitemsupdated.go @@ -0,0 +1,74 @@ +package event + +import ( + "errors" + "fmt" + + "github.com/ThreeDotsLabs/watermill/message" + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/app" +) + +func (e *Event) notifyWorkspaceItemsUpdatedNoteHandler(msg *message.Message) error { + noteIDStr := msg.Metadata.Get(e.domainEventCfg.MessageMetadataAggregateIDKey) + if noteIDStr == "" { + return errors.New("missing note id in message metadata in notifyWorkspaceItemsUpdatedNoteHandler") + } + noteID, err := uuid.Parse(noteIDStr) + if err != nil { + return errors.New("invalid note id in message metadata in notifyWorkspaceItemsUpdatedNoteHandler") + } + workspaceIDStr := msg.Metadata.Get(e.domainEventCfg.MessageWorkspaceIDKey) + if workspaceIDStr == "" { + return errors.New("missing workspace id in message metadata in notifyWorkspaceItemsUpdatedNoteHandler") + } + workspaceID, err := uuid.Parse(workspaceIDStr) + if err != nil { + return errors.New("invalid workspace id in message metadata in notifyWorkspaceItemsUpdatedNoteHandler") + } + userID := msg.Metadata.Get(e.domainEventCfg.MessageMetadataUserIDKey) + if userID == "" { + return errors.New("missing user id in message metadata in notifyWorkspaceItemsUpdatedNoteHandler") + } + if err := e.app.Events.NotifyWorkspaceItemsUpdated.Handle(&app.NotifyWorkspaceItemsUpdated{ + WorkspaceItemID: noteID, + WorkspaceID: workspaceID, + UserID: userID, + Type: app.NotifyWorkspaceItemsUpdatedTypeNote, + }); err != nil { + return fmt.Errorf("failed to handle NotifyWorkspaceItemsUpdated in notifyWorkspaceItemsUpdatedNoteHandler: %w", err) + } + return nil +} + +func (e *Event) notifyWorkspaceItemsUpdatedFolderHandler(msg *message.Message) error { + folderIDStr := msg.Metadata.Get(e.domainEventCfg.MessageMetadataAggregateIDKey) + if folderIDStr == "" { + return errors.New("missing folder id in message metadata in notifyWorkspaceItemsUpdatedFolderHandler") + } + folderID, err := uuid.Parse(folderIDStr) + if err != nil { + return errors.New("invalid folder id in message metadata in notifyWorkspaceItemsUpdatedFolderHandler") + } + workspaceIDStr := msg.Metadata.Get(e.domainEventCfg.MessageWorkspaceIDKey) + if workspaceIDStr == "" { + return errors.New("missing workspace id in message metadata in notifyWorkspaceItemsUpdatedFolderHandler") + } + workspaceID, err := uuid.Parse(workspaceIDStr) + if err != nil { + return errors.New("invalid workspace id in message metadata in notifyWorkspaceItemsUpdatedFolderHandler") + } + userID := msg.Metadata.Get(e.domainEventCfg.MessageMetadataUserIDKey) + if userID == "" { + return errors.New("missing user id in message metadata in notifyWorkspaceItemsUpdatedFolderHandler") + } + if err := e.app.Events.NotifyWorkspaceItemsUpdated.Handle(&app.NotifyWorkspaceItemsUpdated{ + WorkspaceItemID: folderID, + WorkspaceID: workspaceID, + UserID: userID, + Type: app.NotifyWorkspaceItemsUpdatedTypeFolder, + }); err != nil { + return fmt.Errorf("failed to handle NotifyWorkspaceItemsUpdated in notifyWorkspaceItemsUpdatedFolderHandler: %w", err) + } + return nil +} diff --git a/internal/note/controller/integrationevent/wire.go b/internal/note/controller/event/wire.go similarity index 58% rename from internal/note/controller/integrationevent/wire.go rename to internal/note/controller/event/wire.go index 0e7f4baa..0a1870b9 100644 --- a/internal/note/controller/integrationevent/wire.go +++ b/internal/note/controller/event/wire.go @@ -1,9 +1,9 @@ -package integrationevent +package event import ( "github.com/goforj/wire" ) var ProviderSet = wire.NewSet( - ProvideIntegrationEvent, + ProvideEvent, ) diff --git a/internal/note/controller/health/health.go b/internal/note/controller/health/health.go index 9c85782e..56d718df 100644 --- a/internal/note/controller/health/health.go +++ b/internal/note/controller/health/health.go @@ -10,6 +10,8 @@ import ( httpCheck "github.com/hellofresh/health-go/v5/checks/http" "github.com/notopia-uit/notopia/internal/note/app" "github.com/notopia-uit/notopia/internal/note/config" + "github.com/notopia-uit/notopia/internal/note/infra/persistence" + "github.com/notopia-uit/notopia/internal/note/infra/workspaceevent" ) type Health struct { @@ -17,9 +19,10 @@ type Health struct { } func New( - persistence app.Persistence, + persistence *persistence.Pg, serverCfg *config.Server, - workspaceEventPubSub app.WorkspaceEventPubSub, + workspaceEventHub app.WorkspaceEventHub, + redisClient *workspaceevent.RedisClient, ) *Health { startupChecker := health.NewChecker( health.WithCheck( @@ -70,13 +73,14 @@ func New( }), }, ), + // TODO: this have to check kafka, not the pub sub health.WithPeriodicCheck( 15*time.Second, 3*time.Second, health.Check{ - Name: "workspaceEventPubSub", + Name: "workspaceEventHub redis connection", Check: func(ctx context.Context) error { - return workspaceEventPubSub.Check(ctx) + return redisClient.Ping(ctx).Err() }, }, ), diff --git a/internal/note/controller/http/folder.go b/internal/note/controller/http/folder.go index c8b26a44..94a3c140 100644 --- a/internal/note/controller/http/folder.go +++ b/internal/note/controller/http/folder.go @@ -25,16 +25,20 @@ func (h *StrictHandler) CreateFolder( if err != nil { return nil, errs.NewInternalGenerateID(err) } + var icon string + if body.Icon != nil { + icon = *body.Icon + } cmd := &app.CreateFolder{ ID: id, Name: body.Name, - Icon: body.Icon, + Icon: icon, ParentID: *body.ParentId, WorkspaceID: *body.WorkspaceId, UserID: user.ID, } - if err := h.App.CommandHandlers.CreateFolderHandler.Handle(ctx, cmd); err != nil { + if err := h.App.Cmds.CreateFolderHandler.Handle(ctx, cmd); err != nil { return nil, err } @@ -59,7 +63,7 @@ func (h *StrictHandler) PermanentlyDeleteFolder( UserID: user.ID, } - if err := h.App.CommandHandlers.DeleteFolderHandler.Handle(ctx, cmd); err != nil { + if err := h.App.Cmds.DeleteFolderHandler.Handle(ctx, cmd); err != nil { return nil, err } return note.PermanentlyDeleteFolder204Response{}, nil @@ -82,7 +86,7 @@ func (h *StrictHandler) RenameFolder( UserID: user.ID, } - if err := h.App.CommandHandlers.RenameFolderHandler.Handle(ctx, cmd); err != nil { + if err := h.App.Cmds.RenameFolderHandler.Handle(ctx, cmd); err != nil { return nil, err } diff --git a/internal/note/controller/http/http.go b/internal/note/controller/http/http.go index 9ebb37f6..6d32fd84 100644 --- a/internal/note/controller/http/http.go +++ b/internal/note/controller/http/http.go @@ -24,9 +24,9 @@ type ( ) type StrictHandler struct { - App *app.Server - BaseURL *url.URL - WorkspaceEventPubSub app.WorkspaceEventPubSub + App *app.Server + BaseURL *url.URL + WorkspaceEventHub app.WorkspaceEventHub } var _ IStrictHandler = (*StrictHandler)(nil) @@ -34,7 +34,7 @@ var _ IStrictHandler = (*StrictHandler)(nil) func NewStrictHandler( app *app.Server, cfg *config.Server, - workspaceEventPubSub app.WorkspaceEventPubSub, + workspaceEventHub app.WorkspaceEventHub, ) *StrictHandler { return &StrictHandler{ App: app, @@ -42,7 +42,7 @@ func NewStrictHandler( Scheme: "http", Host: cfg.HTTP.Address(), }, - WorkspaceEventPubSub: workspaceEventPubSub, + WorkspaceEventHub: workspaceEventHub, } } diff --git a/internal/note/controller/http/map.go b/internal/note/controller/http/map.go index 173b502b..e3d99f70 100644 --- a/internal/note/controller/http/map.go +++ b/internal/note/controller/http/map.go @@ -1,90 +1,138 @@ package http import ( + "fmt" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/authorization/errs" "github.com/notopia-uit/notopia/internal/note/app" "github.com/notopia-uit/notopia/pkg/api/note" ) -func toNote(n app.Note) note.Note { - id := n.ID - folderID := n.FolderID - updatedAt := n.UpdatedAt - tags := n.Tags - if tags == nil { - tags = []string{} +func toNote(n app.Note) (note.Note, error) { + var icon *string + if n.Icon != "" { + icon = &n.Icon + } + + var tags *[]string + if len(n.Tags) > 0 { + tags = &n.Tags } + var trashed *note.NoteTrashed if n.Trashed != nil { + trashedBy, err := toTrashedBy(n.Trashed.TrashedBy) + if err != nil { + return note.Note{}, fmt.Errorf("invalid trashed by: %v", err) + } trashed = ¬e.NoteTrashed{ - TrashedBy: toTrashedBy(n.Trashed.TrashedBy), + TrashedBy: trashedBy, TrashedAt: n.Trashed.TrashedAt, } } + return note.Note{ - Id: &id, + Id: &n.ID, Name: n.Name, - Icon: n.Icon, - Tags: &tags, - FolderId: &folderID, - UpdatedAt: &updatedAt, + Icon: icon, + Tags: tags, + FolderId: &n.FolderID, + UpdatedAt: &n.UpdatedAt, Trashed: trashed, - } + }, nil } -func toFolder(f app.Folder) note.Folder { - id := f.ID - parentID := f.ParentID - workspaceID := f.WorkspaceID - updatedAt := f.UpdatedAt +func toFolder(f app.Folder) (note.Folder, error) { + var icon *string + if f.Icon != "" { + icon = &f.Icon + } + + var parentID *uuid.UUID + if f.ParentID != uuid.Nil { + parentID = &f.ParentID + } + var trashed *note.FolderTrashed if f.Trashed != nil { + trashedBy, err := toTrashedBy(f.Trashed.TrashedBy) + if err != nil { + return note.Folder{}, fmt.Errorf("invalid trashed by: %v", err) + } trashed = ¬e.FolderTrashed{ - TrashedBy: toTrashedBy(f.Trashed.TrashedBy), + TrashedBy: trashedBy, TrashedAt: f.Trashed.TrashedAt, } } + return note.Folder{ - Id: &id, + Id: &f.ID, Name: f.Name, - Icon: f.Icon, - ParentId: &parentID, - WorkspaceId: &workspaceID, - UpdatedAt: &updatedAt, + Icon: icon, + ParentId: parentID, + WorkspaceId: &f.WorkspaceID, + UpdatedAt: &f.UpdatedAt, Trashed: trashed, - } + }, nil } func toWorkspace(w app.Workspace) note.Workspace { - id := w.ID return note.Workspace{ - Id: &id, + Id: &w.ID, Name: w.Name, Slug: w.Slug, } } -func toWorkspaceMember(m *app.WorkspaceMember) note.WorkspaceMember { +func toWorkspaceRole(r app.WorkspaceRole) (note.WorkspaceRole, error) { + switch r { + case app.WorkspaceRoleOwner: + return note.Owner, nil + case app.WorkspaceRoleEditor: + return note.Editor, nil + case app.WorkspaceRoleViewer: + return note.Viewer, nil + default: + return note.Viewer, errs.NewInternal(fmt.Sprintf("invalid workspace role: %v", r), nil) + } +} + +func toWorkspaceMember(m *app.WorkspaceMember) (note.WorkspaceMember, error) { + var username *string + if m.Username != "" { + username = &m.Username + } + role, err := toWorkspaceRole(m.Role) + if err != nil { + return note.WorkspaceMember{}, err + } + return note.WorkspaceMember{ Id: m.ID, - Role: note.WorkspaceRole(m.Role), - Username: m.Username, - } + Role: role, + Username: username, + }, nil } func toWorkspaceTreeNote(n *app.WorkspaceTreeNote) note.WorkspaceTreeNote { - id := n.ID - updatedAt := n.UpdatedAt + var icon *string + if n.Icon != "" { + icon = &n.Icon + } return note.WorkspaceTreeNote{ - Id: &id, + Id: &n.ID, Name: n.Name, - Icon: n.Icon, - UpdatedAt: &updatedAt, + Icon: icon, + UpdatedAt: &n.UpdatedAt, } } func toWorkspaceTreeFolder(f *app.WorkspaceTreeFolder) note.WorkspaceTreeFolder { - id := f.ID - updatedAt := f.UpdatedAt + var icon *string + if f.Icon != "" { + icon = &f.Icon + } notes := make([]note.WorkspaceTreeNote, len(f.Notes)) for i, n := range f.Notes { notes[i] = toWorkspaceTreeNote(n) @@ -94,43 +142,54 @@ func toWorkspaceTreeFolder(f *app.WorkspaceTreeFolder) note.WorkspaceTreeFolder children[i] = toWorkspaceTreeFolder(c) } return note.WorkspaceTreeFolder{ - Id: &id, + Id: &f.ID, Name: f.Name, - Icon: f.Icon, + Icon: icon, Notes: notes, Children: children, - UpdatedAt: &updatedAt, + UpdatedAt: &f.UpdatedAt, } } -func toTrashedFolder(f *app.TrashedFolder) note.TrashedFolder { +func toTrashedFolder(f *app.TrashedFolder) (note.TrashedFolder, error) { + trashedBy, err := toTrashedBy(f.Trashed.TrashedBy) + if err != nil { + return note.TrashedFolder{}, err + } return note.TrashedFolder{ Id: f.ID, - Name: new(f.Name), + Name: &f.Name, Trashed: note.Trashed{ - TrashedBy: toTrashedBy(f.Trashed.TrashedBy), + TrashedBy: trashedBy, TrashedAt: f.Trashed.TrashedAt, }, - } + }, nil } -func toTrashedNote(n *app.TrashedNote) note.TrashedNote { +func toTrashedNote(n *app.TrashedNote) (note.TrashedNote, error) { + trashedBy, err := toTrashedBy(n.Trashed.TrashedBy) + if err != nil { + return note.TrashedNote{}, err + } return note.TrashedNote{ Id: n.ID, - Name: new(n.Name), + Name: &n.Name, Trashed: note.Trashed{ - TrashedBy: toTrashedBy(n.Trashed.TrashedBy), + TrashedBy: trashedBy, TrashedAt: n.Trashed.TrashedAt, }, - } + }, nil } func toNoteLink(n *app.NoteLink) note.NoteLink { - id := n.ID + var icon *string + if n.Icon != "" { + icon = &n.Icon + } return note.NoteLink{ - Id: &id, + Id: &n.ID, Name: n.Name, - Icon: n.Icon, + Icon: icon, } } @@ -140,8 +199,8 @@ func toGraph(g *app.Graph) note.Graph { nodes[i].Id = n.ID nodes[i].Name = n.Name nodes[i].Type = note.GraphNodesType(n.Type) - if n.Weight != nil { - w := float32(*n.Weight) + if n.Weight != 0 { + w := float32(n.Weight) nodes[i].Weight = &w } } @@ -156,19 +215,27 @@ func toGraph(g *app.Graph) note.Graph { } } -func toShowTrash(t *app.Trash) note.ShowTrash200JSONResponse { +func toShowTrash(t *app.Trash) (note.ShowTrash200JSONResponse, error) { notes := make([]note.TrashedNote, len(t.Notes)) for i, n := range t.Notes { - notes[i] = toTrashedNote(n) + trashedNote, err := toTrashedNote(n) + if err != nil { + return note.ShowTrash200JSONResponse{}, fmt.Errorf("invalid trashed note: %v", err) + } + notes[i] = trashedNote } folders := make([]note.TrashedFolder, len(t.Folders)) for i, f := range t.Folders { - folders[i] = toTrashedFolder(f) + trashedFolder, err := toTrashedFolder(f) + if err != nil { + return note.ShowTrash200JSONResponse{}, fmt.Errorf("invalid trashed folder: %v", err) + } + folders[i] = trashedFolder } return note.ShowTrash200JSONResponse{ Notes: notes, Folders: folders, - } + }, nil } func toGetNoteLinks(r *app.NoteLinkResult) note.GetNoteLinks200JSONResponse { @@ -186,13 +253,13 @@ func toGetNoteLinks(r *app.NoteLinkResult) note.GetNoteLinks200JSONResponse { } } -func toTrashedBy(t app.TrashedBy) note.TrashedBy { +func toTrashedBy(t app.TrashedBy) (note.TrashedBy, error) { switch t { case app.TrashedByParent: - return note.Parent + return note.Parent, nil case app.TrashedByPurpose: - return note.Purpose + return note.Purpose, nil default: - panic("invalid trashed by") + return note.TrashedBy(""), errs.NewInternal(fmt.Sprintf("invalid trashed by: %v", t), nil) } } diff --git a/internal/note/controller/http/note.go b/internal/note/controller/http/note.go index b2cf41ee..18ab8fc1 100644 --- a/internal/note/controller/http/note.go +++ b/internal/note/controller/http/note.go @@ -23,15 +23,19 @@ func (h *StrictHandler) CreateNote( if err != nil { return nil, errs.NewInternalGenerateID(err) } + var icon string + if request.Body.Icon != nil { + icon = *request.Body.Icon + } cmd := &app.CreateNote{ ID: id, Name: request.Body.Name, - Icon: request.Body.Icon, + Icon: icon, FolderID: *request.Body.FolderId, UserID: user.ID, } - err = h.App.CommandHandlers.CreateNoteHandler.Handle(ctx, cmd) + err = h.App.Cmds.CreateNoteHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -56,7 +60,7 @@ func (h *StrictHandler) PermanentlyDeleteNote( ID: request.NoteId, UserID: user.ID, } - err := h.App.CommandHandlers.DeleteNoteHandler.Handle(ctx, cmd) + err := h.App.Cmds.DeleteNoteHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -80,12 +84,15 @@ func (h *StrictHandler) GetNote( UserID: user.ID, } - result, err := h.App.QueryHandlers.GetNoteHandler.Handle(ctx, query) + result, err := h.App.Queries.GetNoteHandler.Handle(ctx, query) if err != nil { return nil, err } - dto := toNote(*result) + dto, err := toNote(*result) + if err != nil { + return nil, err + } return note.GetNote200JSONResponse(dto), nil } @@ -93,17 +100,22 @@ func (h *StrictHandler) GetNoteGraph( ctx context.Context, request note.GetNoteGraphRequestObject, ) (note.GetNoteGraphResponseObject, error) { + user, ok := commonhttp.UserFromContext(ctx) + if !ok { + return nil, errs.NewUnauthorized() + } var depth int if request.Params.Depth != nil { depth = *request.Params.Depth } query := &app.GetNoteGraph{ - ID: request.NoteId, - Depth: depth, + ID: request.NoteId, + Depth: depth, + UserID: user.ID, } - result, err := h.App.QueryHandlers.GetNoteGraphHandler.Handle(ctx, query) + result, err := h.App.Queries.GetNoteGraphHandler.Handle(ctx, query) if err != nil { return nil, err } @@ -116,14 +128,19 @@ func (h *StrictHandler) GetNoteLinks( ctx context.Context, request note.GetNoteLinksRequestObject, ) (note.GetNoteLinksResponseObject, error) { + user, ok := commonhttp.UserFromContext(ctx) + if !ok { + return nil, errs.NewUnauthorized() + } outgoingLinks := request.Params.OutgoingLinks != nil && *request.Params.OutgoingLinks backlinks := request.Params.Backlinks != nil && *request.Params.Backlinks query := &app.GetNoteLinks{ ID: request.NoteId, OutgoingLinks: outgoingLinks, Backlinks: backlinks, + UserID: user.ID, } - result, err := h.App.QueryHandlers.GetNoteLinksHandler.Handle(ctx, query) + result, err := h.App.Queries.GetNoteLinksHandler.Handle(ctx, query) if err != nil { return nil, err } @@ -153,7 +170,7 @@ func (h *StrictHandler) RenameNote( Name: request.Body.Name, UserID: user.ID, } - err := h.App.CommandHandlers.RenameNoteHandler.Handle(ctx, cmd) + err := h.App.Cmds.RenameNoteHandler.Handle(ctx, cmd) if err != nil { return nil, err } diff --git a/internal/note/controller/http/workspace.go b/internal/note/controller/http/workspace.go index f332cfd8..c569f18c 100644 --- a/internal/note/controller/http/workspace.go +++ b/internal/note/controller/http/workspace.go @@ -27,12 +27,12 @@ func (h *StrictHandler) CreateWorkspace( return nil, errs.NewInternalGenerateID(err) } cmd := &app.CreateWorkspace{ - ID: id, - Name: request.Body.Name, - Slug: request.Body.Slug, - UserID: user.ID, + ID: id, + Name: request.Body.Name, + Slug: request.Body.Slug, + OwnerID: user.ID, } - err = h.App.CommandHandlers.CreateWorkspaceHandler.Handle(ctx, cmd) + err = h.App.Cmds.CreateWorkspaceHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -57,7 +57,7 @@ func (h *StrictHandler) DeleteWorkspace( ID: request.WorkspaceId, UserID: user.ID, } - err := h.App.CommandHandlers.DeleteWorkspaceHandler.Handle(ctx, cmd) + err := h.App.Cmds.DeleteWorkspaceHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -69,10 +69,15 @@ func (h *StrictHandler) GetWorkspace( ctx context.Context, request note.GetWorkspaceRequestObject, ) (note.GetWorkspaceResponseObject, error) { + user, ok := commonhttp.UserFromContext(ctx) + if !ok { + return nil, errs.NewUnauthorized() + } query := &app.GetWorkspaceBySlug{ - Slug: request.WorkspaceSlug, + Slug: request.WorkspaceSlug, + UserID: user.ID, } - result, err := h.App.QueryHandlers.GetWorkspaceHandler.Handle(ctx, query) + result, err := h.App.Queries.GetWorkspaceHandler.Handle(ctx, query) if err != nil { return nil, err } @@ -88,7 +93,7 @@ func (h *StrictHandler) CheckWorkspaceSlugExists( query := &app.CheckWorkspaceSlugExists{ Slug: request.WorkspaceSlug, } - result, err := h.App.QueryHandlers.CheckWorkspaceSlugExistsHandler.Handle(ctx, query) + result, err := h.App.Queries.CheckWorkspaceSlugExistsHandler.Handle(ctx, query) if err != nil { return nil, err } @@ -111,7 +116,7 @@ func (h *StrictHandler) GetWorkspaceEvents( return nil, errs.NewUnauthorized() } - eventCh, err := h.WorkspaceEventPubSub.Subscribe(ctx, request.WorkspaceId, user.ID) + eventCh, err := h.WorkspaceEventHub.Subscribe(ctx, request.WorkspaceId, user.ID) if err != nil { return nil, errs.NewInternal("failed to subscribe to workspace events", err) } @@ -129,6 +134,10 @@ func (h *StrictHandler) GetWorkspaceGraph( ctx context.Context, request note.GetWorkspaceGraphRequestObject, ) (note.GetWorkspaceGraphResponseObject, error) { + user, ok := commonhttp.UserFromContext(ctx) + if !ok { + return nil, errs.NewUnauthorized() + } ignoreOrphans := false if request.Params.IncludeOrphans != nil { ignoreOrphans = !*request.Params.IncludeOrphans @@ -136,8 +145,9 @@ func (h *StrictHandler) GetWorkspaceGraph( query := &app.GetWorkspaceGraph{ ID: request.WorkspaceId, IgnoreOrphans: ignoreOrphans, + UserID: user.ID, } - result, err := h.App.QueryHandlers.GetWorkspaceGraphHandler.Handle(ctx, query) + result, err := h.App.Queries.GetWorkspaceGraphHandler.Handle(ctx, query) if err != nil { return nil, err } @@ -191,7 +201,7 @@ func (h *StrictHandler) MoveWorkspaceItems( FolderIDs: folderIDs, DestinationFolderID: destFolderID, } - err := h.App.CommandHandlers.MoveWorkspaceItemsHandler.Handle(ctx, cmd) + err := h.App.Cmds.MoveWorkspaceItemsHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -220,7 +230,7 @@ func (h *StrictHandler) RenameWorkspace( Name: request.Body.Name, UserID: user.ID, } - err := h.App.CommandHandlers.RenameWorkspaceHandler.Handle(ctx, cmd) + err := h.App.Cmds.RenameWorkspaceHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -253,7 +263,7 @@ func (h *StrictHandler) RestoreTrashedWorkspaceItems( NoteIDs: noteIDs, FolderIDs: folderIDs, } - err := h.App.CommandHandlers.RestoreTrashedWorkspaceItemsHandler.Handle(ctx, cmd) + err := h.App.Cmds.RestoreTrashedWorkspaceItemsHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -265,15 +275,23 @@ func (h *StrictHandler) ShowTrash( ctx context.Context, request note.ShowTrashRequestObject, ) (note.ShowTrashResponseObject, error) { + user, ok := commonhttp.UserFromContext(ctx) + if !ok { + return nil, errs.NewUnauthorized() + } query := &app.ShowTrash{ WorkspaceID: request.WorkspaceId, + UserID: user.ID, } - result, err := h.App.QueryHandlers.ShowTrashHandler.Handle(ctx, query) + result, err := h.App.Queries.ShowTrashHandler.Handle(ctx, query) if err != nil { return nil, err } - dto := toShowTrash(result) + dto, err := toShowTrash(result) + if err != nil { + return nil, err + } return note.ShowTrash200JSONResponse(dto), nil } @@ -292,7 +310,7 @@ func (h *StrictHandler) TrashWorkspaceItems( NoteIDs: *request.Body.NoteIds, FolderIDs: *request.Body.FolderIds, } - err := h.App.CommandHandlers.TrashWorkspaceItemsHandler.Handle(ctx, cmd) + err := h.App.Cmds.TrashWorkspaceItemsHandler.Handle(ctx, cmd) if err != nil { return nil, err } @@ -304,19 +322,29 @@ func (h *StrictHandler) GetWorkspaceTree( ctx context.Context, request note.GetWorkspaceTreeRequestObject, ) (note.GetWorkspaceTreeResponseObject, error) { - var depth *uint + user, ok := commonhttp.UserFromContext(ctx) + if !ok { + return nil, errs.NewUnauthorized() + } + var depth uint if request.Params.Depth != nil && *request.Params.Depth > 0 { - depth = new(uint(*request.Params.Depth)) + depth = uint(*request.Params.Depth) + } + + var rootFolderID uuid.UUID + if request.Params.RootFolderId != nil { + rootFolderID = *request.Params.RootFolderId } query := &app.GetWorkspaceTree{ WorkspaceID: request.WorkspaceId, - RootFolderID: request.Params.RootFolderId, + RootFolderID: rootFolderID, IncludeTrashed: request.Params.IncludeTrashed != nil && *request.Params.IncludeTrashed, Depth: depth, + UserID: user.ID, } - result, err := h.App.QueryHandlers.GetWorkspaceTreeHandler.Handle(ctx, query) + result, err := h.App.Queries.GetWorkspaceTreeHandler.Handle(ctx, query) if err != nil { return nil, err } @@ -357,7 +385,7 @@ func (h *StrictHandler) PermanentlyDeleteWorkspaceItems( NoteIDs: noteIDs, FolderIDs: folderIDs, } - err := h.App.CommandHandlers.PermanentlyDeleteWorkspaceItemsHandler.Handle(ctx, cmd) + err := h.App.Cmds.PermanentlyDeleteWorkspaceItemsHandler.Handle(ctx, cmd) if err != nil { return nil, err } diff --git a/internal/note/controller/integrationevent/integrationevent.go b/internal/note/controller/integrationevent/integrationevent.go deleted file mode 100644 index e086c5c3..00000000 --- a/internal/note/controller/integrationevent/integrationevent.go +++ /dev/null @@ -1,35 +0,0 @@ -package integrationevent - -import ( - "fmt" - - "github.com/ThreeDotsLabs/watermill/components/cqrs" - "github.com/notopia-uit/notopia/internal/note/app" -) - -type IntegrationEvent struct { - pubSub *app.IntegrationPubSub - app *app.Server -} - -func NewIntegrationEvent( - integrationPubSub *app.IntegrationPubSub, - app *app.Server, -) (*IntegrationEvent, error) { - err := integrationPubSub.EventProcessor().AddHandlers( - cqrs.NewEventHandler( - "DocumentCommittedHandler", - app.IntegrationEventHandlers.DocumentCommittedHandler.Handle, - ), - ) - if err != nil { - return nil, fmt.Errorf("failed to add event handlers to integration event processor: %w", err) - } - - return &IntegrationEvent{ - pubSub: integrationPubSub, - app: app, - }, nil -} - -var ProvideIntegrationEvent = NewIntegrationEvent diff --git a/internal/note/controller/wire.go b/internal/note/controller/wire.go index e7d6189d..8c41fd86 100644 --- a/internal/note/controller/wire.go +++ b/internal/note/controller/wire.go @@ -2,14 +2,14 @@ package controller import ( "github.com/goforj/wire" + "github.com/notopia-uit/notopia/internal/note/controller/event" "github.com/notopia-uit/notopia/internal/note/controller/grpc" "github.com/notopia-uit/notopia/internal/note/controller/health" "github.com/notopia-uit/notopia/internal/note/controller/http" - "github.com/notopia-uit/notopia/internal/note/controller/integrationevent" ) var ProviderSet = wire.NewSet( - integrationevent.ProviderSet, + event.ProviderSet, grpc.ProviderSet, http.ProviderSet, health.ProviderSet, diff --git a/internal/note/domain/common.go b/internal/note/domain/common.go index 33b637a8..363004d0 100644 --- a/internal/note/domain/common.go +++ b/internal/note/domain/common.go @@ -2,18 +2,38 @@ package domain import "time" -type TrashedBy string +type TrashedBy uint8 -var ( - TrashedByUnspecified TrashedBy = "unspecified" - TrashedByPurpose TrashedBy = "purpose" - TrashedByParent TrashedBy = "parent" +const ( + TrashedByUnspecified TrashedBy = iota + TrashedByPurpose + TrashedByParent ) +func (t TrashedBy) IsValid() bool { + switch t { + case TrashedByUnspecified, TrashedByPurpose, TrashedByParent: + return true + default: + return false + } +} + func (t TrashedBy) String() string { - return string(t) + switch t { + case TrashedByUnspecified: + return "unspecified" + case TrashedByPurpose: + return "purpose" + case TrashedByParent: + return "parent" + default: + return "unknown" + } } +// NOTE: if need, we can have ParseTrashedBy, but for now mostly we map from outside into + type Trashed struct { by TrashedBy at time.Time @@ -25,3 +45,7 @@ func NewTrashed(by TrashedBy, at time.Time) *Trashed { at: at, } } + +func (t *Trashed) By() TrashedBy { return t.by } + +func (t *Trashed) At() time.Time { return t.at } diff --git a/internal/note/domain/event.go b/internal/note/domain/event.go index 1a21046d..00f8881b 100644 --- a/internal/note/domain/event.go +++ b/internal/note/domain/event.go @@ -39,6 +39,7 @@ type Event interface { GetID() uuid.UUID GetOccurredAt() time.Time GetAggregateID() uuid.UUID + GetUserID() string } type BaseEvent struct { @@ -56,8 +57,10 @@ func (e *BaseEvent) GetOccurredAt() time.Time { return e.OccurredAt } func (e *BaseEvent) GetAggregateID() uuid.UUID { return e.AggregateID } -func NewBaseEvent(aggregateID uuid.UUID, userID string) *BaseEvent { - return &BaseEvent{ +func (e *BaseEvent) GetUserID() string { return e.UserID } + +func NewBaseEvent(aggregateID uuid.UUID, userID string) BaseEvent { + return BaseEvent{ ID: uuid.New(), OccurredAt: time.Now(), AggregateID: aggregateID, @@ -109,8 +112,8 @@ func GetEventType(e Event) EventType { type FolderCreatedEvent struct { BaseEvent - Name string `json:"name"` - Icon *string `json:"icon"` + Name string `json:"name"` + Icon string `json:"icon"` } type FolderDeletedEvent struct { @@ -119,14 +122,14 @@ type FolderDeletedEvent struct { type FolderUpdatedEvent struct { BaseEvent - Name string `json:"name"` - Icon *string `json:"icon"` + Name string `json:"name"` + Icon string `json:"icon"` } type NoteCreatedEvent struct { BaseEvent - Name string `json:"name"` - Icon *string `json:"icon"` + Name string `json:"name"` + Icon string `json:"icon"` } type FolderMovedEvent struct { @@ -153,7 +156,7 @@ type NoteDeletedEvent struct { type NoteUpdatedEvent struct { BaseEvent Name string `json:"name"` - Icon *string `json:"icon"` + Icon string `json:"icon"` Tags []string `json:"tags"` Size uint64 `json:"size"` FolderID uuid.UUID `json:"folderId"` diff --git a/internal/note/domain/folder.go b/internal/note/domain/folder.go index b77acb29..c6ef4657 100644 --- a/internal/note/domain/folder.go +++ b/internal/note/domain/folder.go @@ -10,10 +10,11 @@ import ( type Folder struct { id uuid.UUID name string - icon *string + icon string workspaceID uuid.UUID folderHierarchy FolderHierarchy trashed *Trashed + deleted bool events []Event } @@ -21,11 +22,11 @@ type Folder struct { func NewFolder( id uuid.UUID, name string, - icon *string, + icon string, workspaceID uuid.UUID, folderHierarchy FolderHierarchy, userID string, -) (*Folder, errs.Error) { +) (*Folder, error) { if name == "" { return nil, errs.EmptyFolderName } @@ -36,12 +37,13 @@ func NewFolder( workspaceID: workspaceID, folderHierarchy: folderHierarchy, trashed: nil, + deleted: false, events: []Event{}, } - folder.AddEvent( + folder.addEvent( &FolderCreatedEvent{ - BaseEvent: *NewBaseEvent(folder.id, userID), + BaseEvent: NewBaseEvent(folder.id, userID), Name: folder.name, Icon: folder.icon, }, @@ -52,10 +54,11 @@ func NewFolder( func UnmarshalFolder( id uuid.UUID, name string, - icon *string, + icon string, workspaceID uuid.UUID, folderHierarchy FolderHierarchy, trashed *Trashed, + deleted bool, ) *Folder { return &Folder{ id: id, @@ -64,6 +67,7 @@ func UnmarshalFolder( workspaceID: workspaceID, folderHierarchy: folderHierarchy, trashed: trashed, + deleted: deleted, events: []Event{}, } @@ -79,21 +83,21 @@ func (f *Folder) Name() string { func (f *Folder) Rename(name string, userID string) { f.name = name - f.AddEvent(&FolderUpdatedEvent{ - BaseEvent: *NewBaseEvent(f.id, userID), + f.addEvent(&FolderUpdatedEvent{ + BaseEvent: NewBaseEvent(f.id, userID), Name: f.name, Icon: f.icon, }) } -func (f *Folder) Icon() *string { +func (f *Folder) Icon() string { return f.icon } func (f *Folder) SetIcon(icon string, userID string) { - f.icon = &icon - f.AddEvent(&FolderUpdatedEvent{ - BaseEvent: *NewBaseEvent(f.id, userID), + f.icon = icon + f.addEvent(&FolderUpdatedEvent{ + BaseEvent: NewBaseEvent(f.id, userID), Name: f.name, Icon: f.icon, }) @@ -107,7 +111,7 @@ func (f *Folder) FolderHierarchy() FolderHierarchy { return f.folderHierarchy } -func (f *Folder) ParentID() *uuid.UUID { +func (f *Folder) ParentID() uuid.UUID { return f.folderHierarchy.ParentID() } @@ -116,11 +120,10 @@ func (f *Folder) IsRoot() bool { } func (f *Folder) MoveToFolder(folderID uuid.UUID, userID string) { - hierarchy := NewFolderHierarchy(&folderID) - f.folderHierarchy = *hierarchy - f.AddEvent( + f.folderHierarchy = NewFolderHierarchy(folderID) + f.addEvent( &FolderMovedEvent{ - BaseEvent: *NewBaseEvent(f.id, userID), + BaseEvent: NewBaseEvent(f.id, userID), ParentID: folderID, }, ) @@ -130,46 +133,44 @@ func (f *Folder) IsTrashed() bool { return f.trashed != nil } -func (f *Folder) TrashedBy() *TrashedBy { - if f.trashed == nil { - return nil - } - return &f.trashed.by +func (f *Folder) TrashedBy() TrashedBy { + return f.trashed.By() } -func (f *Folder) TrashedByString() *string { - if f.trashed == nil { - return nil - } - return new(f.trashed.by.String()) -} - -func (f *Folder) TrashedAt() *time.Time { - if f.trashed == nil { - return nil - } - return &f.trashed.at +func (f *Folder) TrashedAt() time.Time { + return f.trashed.At() } -func (f *Folder) Trash(trashedBy TrashedBy, userID string) errs.Error { +func (f *Folder) Trash(trashedBy TrashedBy, userID string) error { if f.trashed != nil { return errs.NewFolderAlreadyTrashed(f.id) } f.trashed = NewTrashed(trashedBy, time.Now()) - f.AddEvent(&FolderTrashedEvent{ - BaseEvent: *NewBaseEvent(f.id, userID), + f.addEvent(&FolderTrashedEvent{ + BaseEvent: NewBaseEvent(f.id, userID), }) return nil } func (f *Folder) Restore(userID string) { f.trashed = nil - f.AddEvent(&FolderRestoredEvent{ - BaseEvent: *NewBaseEvent(f.id, userID), + f.addEvent(&FolderRestoredEvent{ + BaseEvent: NewBaseEvent(f.id, userID), + }) +} + +func (f *Folder) Deleted() bool { + return f.deleted +} + +func (f *Folder) PermanentlyDelete(userID string) { + f.deleted = true + f.addEvent(&FolderPermanentlyDeletedEvent{ + BaseEvent: NewBaseEvent(f.id, userID), }) } -func (f *Folder) AddEvent(event Event) { +func (f *Folder) addEvent(event Event) { f.events = append(f.events, event) } @@ -180,19 +181,19 @@ func (f *Folder) PopEvents() []Event { } type FolderHierarchy struct { - parentID *uuid.UUID + parentID uuid.UUID } -func NewFolderHierarchy(parentID *uuid.UUID) *FolderHierarchy { - return &FolderHierarchy{ +func NewFolderHierarchy(parentID uuid.UUID) FolderHierarchy { + return FolderHierarchy{ parentID: parentID, } } -func (fh *FolderHierarchy) ParentID() *uuid.UUID { +func (fh *FolderHierarchy) ParentID() uuid.UUID { return fh.parentID } func (fh *FolderHierarchy) IsRoot() bool { - return fh.parentID == nil + return fh.parentID == uuid.Nil } diff --git a/internal/note/domain/folderrepo.go b/internal/note/domain/folderrepo.go index ef72b996..d5f9a882 100644 --- a/internal/note/domain/folderrepo.go +++ b/internal/note/domain/folderrepo.go @@ -4,84 +4,22 @@ import ( "context" "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" ) type FolderRepo interface { - GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*Folder, errs.Error) - GetMany(ctx context.Context, params *FolderRepoGetManyParams) ([]*Folder, errs.Error) - GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, errs.Error) - Save(ctx context.Context, folder *Folder) errs.Error - SaveMany(ctx context.Context, folders []*Folder) errs.Error - AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, errs.Error) - PermanentlyDeleteByID(ctx context.Context, id uuid.UUID) errs.Error - PermanentlyDeleteByIDs(ctx context.Context, ids uuid.UUIDs) errs.Error + GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*Folder, error) + GetMany(ctx context.Context, params *FolderRepoGetManyParams) ([]*Folder, error) + GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, error) + Save(ctx context.Context, folder *Folder) error + SaveMany(ctx context.Context, folders []*Folder) error + AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, error) + GetParentIDs(ctx context.Context, id uuid.UUID, forUpdate bool) ([]uuid.UUID, error) } type FolderRepoGetManyParams struct { - workspaceID *uuid.UUID - ids []uuid.UUID - trashedBy *TrashedBy - isTrashed bool - forUpdate bool -} - -func NewFolderRepoGetManyParamsByIDs(ids []uuid.UUID) *FolderRepoGetManyParams { - //exhaustruct:ignore - return &FolderRepoGetManyParams{ - ids: ids, - } -} - -func NewFolderRepoGetManyParamsByWorkspaceID(workspaceID uuid.UUID) *FolderRepoGetManyParams { - //exhaustruct:ignore - return &FolderRepoGetManyParams{ - workspaceID: &workspaceID, - } -} - -func (p *FolderRepoGetManyParams) WithWorkspaceID(workspaceID uuid.UUID) *FolderRepoGetManyParams { - p.workspaceID = &workspaceID - return p -} - -func (p *FolderRepoGetManyParams) WithIDs(ids []uuid.UUID) *FolderRepoGetManyParams { - p.ids = ids - return p -} - -func (p *FolderRepoGetManyParams) WithTrashed() *FolderRepoGetManyParams { - p.isTrashed = true - return p -} - -func (p *FolderRepoGetManyParams) WithTrashedBy(trashedBy TrashedBy) *FolderRepoGetManyParams { - p.trashedBy = &trashedBy - p.isTrashed = true - return p -} - -func (p *FolderRepoGetManyParams) WithForUpdate() *FolderRepoGetManyParams { - p.forUpdate = true - return p -} - -func (p *FolderRepoGetManyParams) WorkspaceID() *uuid.UUID { - return p.workspaceID -} - -func (p *FolderRepoGetManyParams) IDs() []uuid.UUID { - return p.ids -} - -func (p *FolderRepoGetManyParams) IsTrashed() bool { - return p.isTrashed -} - -func (p *FolderRepoGetManyParams) TrashedBy() *TrashedBy { - return p.trashedBy -} - -func (p *FolderRepoGetManyParams) ForUpdate() bool { - return p.forUpdate + WorkspaceID uuid.UUID + IDs []uuid.UUID + TrashedBy TrashedBy + TrashOnly bool + ForUpdate bool } diff --git a/internal/note/domain/note.go b/internal/note/domain/note.go index 14dd2b19..ef7f41e4 100644 --- a/internal/note/domain/note.go +++ b/internal/note/domain/note.go @@ -10,12 +10,13 @@ import ( type Note struct { id uuid.UUID name string - icon *string + icon string tags []string size uint64 folderID uuid.UUID outgoingLinks uuid.UUIDs trashed *Trashed + deleted bool events []Event } @@ -23,7 +24,7 @@ type Note struct { func NewNote( id uuid.UUID, name string, - icon *string, + icon string, folderID uuid.UUID, ) *Note { if name == "" { @@ -38,6 +39,7 @@ func NewNote( size: 0, outgoingLinks: []uuid.UUID{}, trashed: nil, + deleted: false, events: []Event{}, } @@ -46,12 +48,13 @@ func NewNote( func UnmarshalNote( id uuid.UUID, name string, - icon *string, + icon string, tags []string, size uint64, folderID uuid.UUID, outgoingLinks uuid.UUIDs, trashed *Trashed, + deleted bool, ) *Note { return &Note{ id: id, @@ -62,23 +65,20 @@ func UnmarshalNote( folderID: folderID, outgoingLinks: outgoingLinks, trashed: trashed, + deleted: deleted, events: []Event{}, } } -func (n *Note) ID() uuid.UUID { - return n.id -} +func (n *Note) ID() uuid.UUID { return n.id } -func (n *Note) Name() string { - return n.name -} +func (n *Note) Name() string { return n.name } func (n *Note) Rename(name string, userID string) { n.name = name - n.AddEvent(&NoteUpdatedEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.addEvent(&NoteUpdatedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), Name: n.name, Icon: n.icon, Tags: n.tags, @@ -88,14 +88,12 @@ func (n *Note) Rename(name string, userID string) { }) } -func (n *Note) Icon() *string { - return n.icon -} +func (n *Note) Icon() string { return n.icon } func (n *Note) SetIcon(icon string, userID string) { - n.icon = &icon - n.AddEvent(&NoteUpdatedEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.icon = icon + n.addEvent(&NoteUpdatedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), Name: n.name, Icon: n.icon, Tags: n.tags, @@ -105,14 +103,12 @@ func (n *Note) SetIcon(icon string, userID string) { }) } -func (n *Note) Tags() []string { - return n.tags -} +func (n *Note) Tags() []string { return n.tags } func (n *Note) SetTags(tags []string, userID string) { n.tags = tags - n.AddEvent(&NoteUpdatedEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.addEvent(&NoteUpdatedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), Name: n.name, Icon: n.icon, Tags: n.tags, @@ -128,8 +124,8 @@ func (n *Note) Size() uint64 { func (n *Note) SetSize(size uint64, userID string) { n.size = size - n.AddEvent(&NoteUpdatedEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.addEvent(&NoteUpdatedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), Name: n.name, Icon: n.icon, Tags: n.tags, @@ -139,26 +135,22 @@ func (n *Note) SetSize(size uint64, userID string) { }) } -func (n *Note) FolderID() uuid.UUID { - return n.folderID -} +func (n *Note) FolderID() uuid.UUID { return n.folderID } func (n *Note) MoveToFolder(folderID uuid.UUID, userID string) { n.folderID = folderID - n.AddEvent(&NoteMovedEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.addEvent(&NoteMovedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), FolderID: n.folderID, }) } -func (n *Note) OutgoingLinks() uuid.UUIDs { - return n.outgoingLinks -} +func (n *Note) OutgoingLinks() uuid.UUIDs { return n.outgoingLinks } func (n *Note) SetOutgoingLinks(outgoingLinks uuid.UUIDs, userID string) { n.outgoingLinks = outgoingLinks - n.AddEvent(&NoteUpdatedEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.addEvent(&NoteUpdatedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), Name: n.name, Icon: n.icon, Tags: n.tags, @@ -172,46 +164,44 @@ func (n *Note) IsTrashed() bool { return n.trashed != nil } -func (n *Note) TrashedBy() *TrashedBy { - if n.trashed == nil { - return nil - } - return &n.trashed.by -} - -func (n *Note) TrashedByString() *string { - if n.trashed == nil { - return nil - } - return new(n.trashed.by.String()) +func (n *Note) TrashedBy() TrashedBy { + return n.trashed.By() } -func (n *Note) TrashedAt() *time.Time { - if n.trashed == nil { - return nil - } - return &n.trashed.at +func (n *Note) TrashedAt() time.Time { + return n.trashed.At() } -func (n *Note) Trash(trashedBy TrashedBy, userID string) errs.Error { +func (n *Note) Trash(trashedBy TrashedBy, userID string) error { if n.trashed != nil { return errs.NewNoteAlreadyTrashed(n.id) } n.trashed = NewTrashed(trashedBy, time.Now()) - n.AddEvent(&NoteTrashedEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.addEvent(&NoteTrashedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), }) return nil } func (n *Note) Restore(userID string) { n.trashed = nil - n.AddEvent(&NoteRestoredEvent{ - BaseEvent: *NewBaseEvent(n.id, userID), + n.addEvent(&NoteRestoredEvent{ + BaseEvent: NewBaseEvent(n.id, userID), + }) +} + +func (n *Note) Deleted() bool { + return n.deleted +} + +func (n *Note) Delete(userID string) { + n.deleted = true + n.addEvent(&NoteDeletedEvent{ + BaseEvent: NewBaseEvent(n.id, userID), }) } -func (n *Note) AddEvent(event Event) { +func (n *Note) addEvent(event Event) { n.events = append(n.events, event) } diff --git a/internal/note/domain/noterepo.go b/internal/note/domain/noterepo.go index c808962f..1887d506 100644 --- a/internal/note/domain/noterepo.go +++ b/internal/note/domain/noterepo.go @@ -4,85 +4,21 @@ import ( "context" "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" ) -type NoteRepoGetManyParams struct { - workspaceID *uuid.UUID - ids []uuid.UUID - trashedBy *TrashedBy - isTrashed *bool - forUpdate bool -} - -func NewNoteRepoGetManyParamsByIDs(ids []uuid.UUID) *NoteRepoGetManyParams { - //exhaustruct:ignore - return &NoteRepoGetManyParams{ - ids: ids, - } -} - -func NewNoteRepoGetManyParamsByWorkspaceID(workspaceID uuid.UUID) *NoteRepoGetManyParams { - //exhaustruct:ignore - return &NoteRepoGetManyParams{ - workspaceID: &workspaceID, - } -} - -func (p *NoteRepoGetManyParams) WithWorkspaceID(workspaceID uuid.UUID) *NoteRepoGetManyParams { - p.workspaceID = &workspaceID - return p -} - -func (p *NoteRepoGetManyParams) WithIDs(ids []uuid.UUID) *NoteRepoGetManyParams { - p.ids = ids - return p -} - -func (p *NoteRepoGetManyParams) WithTrashedBy(trashedBy TrashedBy) *NoteRepoGetManyParams { - p.trashedBy = &trashedBy - p.isTrashed = new(bool) - *p.isTrashed = true - return p -} - -func (p *NoteRepoGetManyParams) WithIsTrashed(isTrashed bool) *NoteRepoGetManyParams { - p.isTrashed = &isTrashed - return p -} - -func (p *NoteRepoGetManyParams) WithForUpdate() *NoteRepoGetManyParams { - p.forUpdate = true - return p -} - -func (p *NoteRepoGetManyParams) WorkspaceID() *uuid.UUID { - return p.workspaceID -} - -func (p *NoteRepoGetManyParams) IDs() []uuid.UUID { - return p.ids -} - -func (p *NoteRepoGetManyParams) TrashedBy() *TrashedBy { - return p.trashedBy -} - -func (p *NoteRepoGetManyParams) IsTrashed() *bool { - return p.isTrashed -} - -func (p *NoteRepoGetManyParams) ForUpdate() bool { - return p.forUpdate +type NoteRepo interface { + GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*Note, error) + GetMany(ctx context.Context, params *NoteRepoGetManyParams) ([]*Note, error) + GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, error) + Save(ctx context.Context, note *Note) error + SaveMany(ctx context.Context, notes []*Note) error + AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, error) } -type NoteRepo interface { - GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*Note, errs.Error) - GetMany(ctx context.Context, params *NoteRepoGetManyParams) ([]*Note, errs.Error) - GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, errs.Error) - Save(ctx context.Context, note *Note) errs.Error - SaveMany(ctx context.Context, notes []*Note) errs.Error - AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, errs.Error) - PermanentlyDeleteByID(ctx context.Context, id uuid.UUID) errs.Error - PermanentlyDeleteByIDs(ctx context.Context, ids uuid.UUIDs) errs.Error +type NoteRepoGetManyParams struct { + WorkspaceID uuid.UUID + IDs []uuid.UUID + TrashedBy TrashedBy + TrashOnly bool + ForUpdate bool } diff --git a/internal/note/domain/noteservice.go b/internal/note/domain/noteservice.go index a14669fe..79337cea 100644 --- a/internal/note/domain/noteservice.go +++ b/internal/note/domain/noteservice.go @@ -14,7 +14,7 @@ func NewNoteService() *NoteService { var ProvideNoteService = NewNoteService -func (s *NoteService) UpdateNoteSizeBasedOnContent(note *Note, content any, userID string) errs.Error { +func (s *NoteService) UpdateNoteSizeBasedOnContent(note *Note, content any, userID string) error { b, err := json.Marshal(content) if err != nil { return errs.NewNoteFailToMarshalDocumentContent(note.ID(), content, err) diff --git a/internal/note/domain/trashservice.go b/internal/note/domain/trashservice.go index 02d6ecf6..55e95f25 100644 --- a/internal/note/domain/trashservice.go +++ b/internal/note/domain/trashservice.go @@ -2,7 +2,6 @@ package domain import ( "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" ) type TrashService struct{} @@ -13,7 +12,7 @@ func NewTrashService() *TrashService { var ProvideTrashService = NewTrashService -func (s *TrashService) TrashNotes(notes []*Note, userID string) errs.Error { +func (s *TrashService) TrashNotes(notes []*Note, userID string) error { for i := range notes { if err := notes[i].Trash(TrashedByPurpose, userID); err != nil { return err @@ -27,7 +26,7 @@ func (s *TrashService) TrashFolders( workspaceFolders *[]*Folder, targetFolders []*Folder, userID string, -) errs.Error { +) error { for i := range targetFolders { if err := targetFolders[i].Trash(TrashedByPurpose, userID); err != nil { return err @@ -45,10 +44,10 @@ func (s *TrashService) cascadeTrashChildren( workspaceFolders *[]*Folder, folderID uuid.UUID, userID string, -) errs.Error { +) error { for i := range *workspaceFolders { folder := (*workspaceFolders)[i] - if folder.ParentID() != nil && *folder.ParentID() == folderID && !folder.IsTrashed() { + if folder.ParentID() != uuid.Nil && folder.ParentID() == folderID && !folder.IsTrashed() { if err := folder.Trash(TrashedByParent, userID); err != nil { return err } @@ -71,7 +70,7 @@ func (s *TrashService) cascadeTrashChildren( return nil } -func (s *TrashService) RestoreNotes(notes []*Note, userID string) errs.Error { +func (s *TrashService) RestoreNotes(notes []*Note, userID string) error { for i := range notes { notes[i].Restore(userID) } @@ -83,7 +82,7 @@ func (s *TrashService) RestoreFolders( trashedFolders *[]*Folder, targetFolders []*Folder, userID string, -) errs.Error { +) error { for i := range targetFolders { targetFolders[i].Restore(userID) @@ -99,12 +98,12 @@ func (s *TrashService) cascadeRestoreChildrenByParent( trashedFolders *[]*Folder, folderID uuid.UUID, userID string, -) errs.Error { +) error { for i := range *trashedFolders { folder := (*trashedFolders)[i] - if folder.ParentID() != nil && *folder.ParentID() == folderID && folder.IsTrashed() { + if folder.ParentID() != uuid.Nil && folder.ParentID() == folderID && folder.IsTrashed() { trashedBy := folder.TrashedBy() - if trashedBy != nil && *trashedBy == TrashedByParent { + if trashedBy != TrashedByUnspecified && trashedBy == TrashedByParent { folder.Restore(userID) if err := s.cascadeRestoreChildrenByParent(trashedNotes, trashedFolders, folder.ID(), userID); err != nil { @@ -118,7 +117,7 @@ func (s *TrashService) cascadeRestoreChildrenByParent( note := (*trashedNotes)[i] if note.FolderID() == folderID && note.IsTrashed() { trashedBy := note.TrashedBy() - if trashedBy != nil && *trashedBy == TrashedByParent { + if trashedBy != TrashedByUnspecified && trashedBy == TrashedByParent { note.Restore(userID) } } diff --git a/internal/note/domain/uow.go b/internal/note/domain/uow.go index d479352a..cdf6b5f3 100644 --- a/internal/note/domain/uow.go +++ b/internal/note/domain/uow.go @@ -2,8 +2,6 @@ package domain import ( "context" - - "github.com/notopia-uit/notopia/internal/note/errs" ) type RepoRegistry interface { @@ -13,5 +11,5 @@ type RepoRegistry interface { } type UnitOfWork interface { - Execute(ctx context.Context, fn func(repoRegistry RepoRegistry) errs.Error) errs.Error + Execute(ctx context.Context, fn func(repoRegistry RepoRegistry) error) error } diff --git a/internal/note/domain/workspace.go b/internal/note/domain/workspace.go index db8bb2c4..024b9874 100644 --- a/internal/note/domain/workspace.go +++ b/internal/note/domain/workspace.go @@ -22,7 +22,7 @@ func NewWorkspace( name string, slug string, rootFolderID uuid.UUID, -) (*Workspace, errs.Error) { +) (*Workspace, error) { if name == "" { return nil, errs.EmptyFolderName } @@ -50,8 +50,8 @@ func (w *Workspace) Name() string { func (w *Workspace) Rename(name string, userID string) { w.name = name - w.AddEvent(&WorkspaceUpdatedEvent{ - BaseEvent: *NewBaseEvent(w.id, userID), + w.addEvent(&WorkspaceUpdatedEvent{ + BaseEvent: NewBaseEvent(w.id, userID), Name: w.name, Slug: w.slug, }) @@ -71,12 +71,12 @@ func (w *Workspace) DeletedAt() *time.Time { func (w *Workspace) Delete(userID string) { w.deletedAt = new(time.Now()) - w.AddEvent(&WorkspaceDeletedEvent{ - BaseEvent: *NewBaseEvent(w.id, userID), + w.addEvent(&WorkspaceDeletedEvent{ + BaseEvent: NewBaseEvent(w.id, userID), }) } -func (w *Workspace) AddEvent(event Event) { +func (w *Workspace) addEvent(event Event) { w.event = append(w.event, event) } diff --git a/internal/note/domain/workspacerepo.go b/internal/note/domain/workspacerepo.go index ca0f9f0d..9e7bb41d 100644 --- a/internal/note/domain/workspacerepo.go +++ b/internal/note/domain/workspacerepo.go @@ -4,13 +4,12 @@ import ( "context" "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/errs" ) type WorkspaceRepo interface { - GetBySlug(ctx context.Context, slug string, forUpdate bool) (*Workspace, errs.Error) - GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*Workspace, errs.Error) - GetIDBySlug(ctx context.Context, slug string) (*uuid.UUID, errs.Error) - CheckSlugExists(ctx context.Context, slug string) (bool, errs.Error) - Save(ctx context.Context, workspace *Workspace) errs.Error + GetBySlug(ctx context.Context, slug string, forUpdate bool) (*Workspace, error) + GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*Workspace, error) + GetIDBySlug(ctx context.Context, slug string) (*uuid.UUID, error) + CheckSlugExists(ctx context.Context, slug string) (bool, error) + Save(ctx context.Context, workspace *Workspace) error } diff --git a/internal/note/infra/common/kafkapublisher.go b/internal/note/infra/common/kafkapublisher.go new file mode 100644 index 00000000..802e2c8e --- /dev/null +++ b/internal/note/infra/common/kafkapublisher.go @@ -0,0 +1,40 @@ +package common + +import ( + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka" + "github.com/notopia-uit/notopia/internal/note/infra/integrationpublisher" + "github.com/notopia-uit/notopia/internal/note/infra/outbox" + commonconfig "github.com/notopia-uit/notopia/pkg/common/config" +) + +type KafkaPublisher struct { + *kafka.Publisher +} + +func NewKafkaPublisher( + cfg *commonconfig.Kafka, + logger watermill.LoggerAdapter, + tracer kafka.SaramaTracer, +) (*KafkaPublisher, error) { + publisher, err := kafka.NewPublisher( + kafka.PublisherConfig{ + Brokers: cfg.Brokers, + Tracer: tracer, + }, + logger, + ) + if err != nil { + return nil, err + } + return &KafkaPublisher{ + Publisher: publisher, + }, nil +} + +var ProvideKafkaPublisher = NewKafkaPublisher + +var ( + _ outbox.Publisher = (*KafkaPublisher)(nil) + _ integrationpublisher.Publisher = (*KafkaPublisher)(nil) +) diff --git a/internal/note/infra/common/wire.go b/internal/note/infra/common/wire.go new file mode 100644 index 00000000..07584029 --- /dev/null +++ b/internal/note/infra/common/wire.go @@ -0,0 +1,17 @@ +package common + +import ( + "github.com/goforj/wire" + "github.com/notopia-uit/notopia/internal/note/infra/integrationpublisher" + "github.com/notopia-uit/notopia/internal/note/infra/outbox" +) + +var KafkaPublisherProviderSet = wire.NewSet( + ProvideKafkaPublisher, + wire.Bind(new(outbox.Publisher), new(*KafkaPublisher)), + wire.Bind(new(integrationpublisher.Publisher), new(*KafkaPublisher)), +) + +var ProviderSet = wire.NewSet( + KafkaPublisherProviderSet, +) diff --git a/internal/note/infra/integrationpublisher/integrationpublisher.go b/internal/note/infra/integrationpublisher/integrationpublisher.go new file mode 100644 index 00000000..d21db142 --- /dev/null +++ b/internal/note/infra/integrationpublisher/integrationpublisher.go @@ -0,0 +1,97 @@ +package integrationpublisher + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/pkg/api/share" +) + +func transformIntegrationEvent(event app.IntegrationEvent) (any, bool) { + switch e := event.(type) { + case app.IntegrationEventNoteCreated: + var icon *string + if e.Icon != "" { + icon = &e.Icon + } + return &share.NoteCreatedEvent{ + Id: e.ID, + Icon: icon, + Name: e.Name, + }, true + case app.IntegrationEventNoteDeleted: + return &share.NoteDeletedEvent{ + Id: e.ID, + }, true + case app.IntegrationEventNoteUpdated: + var icon *string + if e.Icon != "" { + icon = &e.Icon + } + return &share.NoteUpdatedEvent{ + Id: e.ID, + Name: e.Name, + Icon: icon, + Tags: e.Tags, + FolderId: e.FolderID, + UpdatedAt: e.UpdatedAt, + }, true + } + return nil, false +} + +func getIntegrationEventTopic(event app.IntegrationEvent) (string, bool) { + switch event.(type) { + case app.IntegrationEventNoteCreated: + return "events.integration.note.note.created", true + case app.IntegrationEventNoteDeleted: + return "events.integration.note.note.deleted", true + case app.IntegrationEventNoteUpdated: + return "events.integration.note.note.updated", true + } + return "", false +} + +type Publisher message.Publisher + +type IntegrationPublisher struct { + publisher message.Publisher +} + +var _ app.IntegrationPublisher = (*IntegrationPublisher)(nil) + +func NewIntegrationPublisher( + publisher Publisher, +) (*IntegrationPublisher, error) { + return &IntegrationPublisher{ + publisher: publisher, + }, nil +} + +var ProvideIntegrationPublisher = NewIntegrationPublisher + +func (p *IntegrationPublisher) Publish(ctx context.Context, events ...app.IntegrationEvent) error { + for _, event := range events { + transformedEvent, ok := transformIntegrationEvent(event) + if !ok { + return fmt.Errorf("cannot convert event to integration event: %T", event) + } + topic, ok := getIntegrationEventTopic(event) + if !ok { + return fmt.Errorf("cannot get topic for integration event: %T", event) + } + payload, err := json.Marshal(transformedEvent) + if err != nil { + return fmt.Errorf("failed to marshal integration event: %w", err) + } + msg := message.NewMessage(watermill.NewUUID(), payload) + if err := p.publisher.Publish(topic, msg); err != nil { + return fmt.Errorf("failed to publish integration event: %w", err) + } + } + return nil +} diff --git a/internal/note/infra/integrationpublisher/wire.go b/internal/note/infra/integrationpublisher/wire.go new file mode 100644 index 00000000..be09a87d --- /dev/null +++ b/internal/note/infra/integrationpublisher/wire.go @@ -0,0 +1,11 @@ +package integrationpublisher + +import ( + "github.com/goforj/wire" + "github.com/notopia-uit/notopia/internal/note/app" +) + +var ProviderSet = wire.NewSet( + ProvideIntegrationPublisher, + wire.Bind(new(app.IntegrationPublisher), new(*IntegrationPublisher)), +) diff --git a/internal/note/infra/outbox/common.go b/internal/note/infra/outbox/common.go new file mode 100644 index 00000000..841a71a4 --- /dev/null +++ b/internal/note/infra/outbox/common.go @@ -0,0 +1,18 @@ +package outbox + +import ( + "github.com/ThreeDotsLabs/watermill-sql/v4/pkg/sql" + "github.com/notopia-uit/notopia/internal/note/config" +) + +func NewSchemaAdapter( + cfg *config.DomainEvent, +) *sql.DefaultPostgreSQLSchema { + return &sql.DefaultPostgreSQLSchema{ + GenerateMessagesTableName: func(_ string) string { + return cfg.OutboxTableName + }, + } +} + +var ProvideSchemaAdapter = NewSchemaAdapter diff --git a/internal/note/infra/outbox/forwarder.go b/internal/note/infra/outbox/forwarder.go new file mode 100644 index 00000000..9e206b5d --- /dev/null +++ b/internal/note/infra/outbox/forwarder.go @@ -0,0 +1,115 @@ +package outbox + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill-sql/v4/pkg/sql" + "github.com/ThreeDotsLabs/watermill/components/forwarder" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/ThreeDotsLabs/watermill/message/router/middleware" + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/notopia-uit/notopia/internal/note/component" + "github.com/notopia-uit/notopia/internal/note/config" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgrepo" +) + +type ForwarderPublisher struct { + workspaceIDKey string + aggregateIDKey string + userIDKey string + publisher message.Publisher +} + +var _ pgrepo.Publisher = (*ForwarderPublisher)(nil) + +// TODO: create topic, metadata... +func (p *ForwarderPublisher) PublishWorkspaceItem(ctx context.Context, event domain.Event, workspaceID uuid.UUID) error { + payload, err := json.Marshal(event) + if err != nil { + return fmt.Errorf("failed to marshal event for forwarder publisher: %w", err) + } + msgID := watermill.NewUUID() + msg := message.NewMessage(msgID, payload) + middleware.SetCorrelationID(msgID, msg) + msg.Metadata.Set(p.workspaceIDKey, workspaceID.String()) + msg.Metadata.Set(p.aggregateIDKey, event.GetAggregateID().String()) + msg.Metadata.Set(p.userIDKey, event.GetUserID()) + topic, ok := component.DomainEventToTopic(event) + if !ok { + return fmt.Errorf("failed to get forwader event bus topic for event type: %T", event) + } + if err := p.publisher.Publish(topic, msg); err != nil { + return fmt.Errorf("failed to publish forwarder SQL event to event bus: %w", err) + } + return nil +} + +func (p *ForwarderPublisher) Publish(ctx context.Context, event domain.Event) error { + payload, err := json.Marshal(event) + if err != nil { + return fmt.Errorf("failed to marshal event for forwarder publisher: %w", err) + } + msg := message.NewMessage(watermill.NewUUID(), payload) + topic, ok := component.DomainEventToTopic(event) + if !ok { + return fmt.Errorf("failed to get forwader event bus topic for event type: %T", event) + } + if err := p.publisher.Publish(topic, msg); err != nil { + return fmt.Errorf("failed to publish forwarder SQL event to event bus: %w", err) + } + return nil +} + +type FromPersistenceToQSLForwarder struct { + workspaceIDKey string + aggregateIDKey string + userIDKey string + logger watermill.LoggerAdapter + schemaAdapter sql.SchemaAdapter +} + +var _ pgrepo.PublisherFactory = (*FromPersistenceToQSLForwarder)(nil) + +func NewFromPersistenceToQSLForwarder( + domainEventCfg config.DomainEvent, + logger watermill.LoggerAdapter, + schemaAdapter sql.SchemaAdapter, +) *FromPersistenceToQSLForwarder { + return &FromPersistenceToQSLForwarder{ + workspaceIDKey: domainEventCfg.MessageWorkspaceIDKey, + aggregateIDKey: domainEventCfg.MessageMetadataAggregateIDKey, + userIDKey: domainEventCfg.MessageMetadataUserIDKey, + logger: logger, + schemaAdapter: schemaAdapter, + } +} + +var ProvideFromPersistenceToQSLForwarder = NewFromPersistenceToQSLForwarder + +func (f *FromPersistenceToQSLForwarder) Create( + pgxTx pgx.Tx, +) (pgrepo.Publisher, error) { + sqlPublisher, err := sql.NewPublisher( + sql.TxFromPgx(pgxTx), + sql.PublisherConfig{ + SchemaAdapter: f.schemaAdapter, + AutoInitializeSchema: true, + }, + watermill.NopLogger{}, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SQL publisher: %w", err) + } + publisher := forwarder.NewPublisher(sqlPublisher, forwarder.PublisherConfig{}) + return &ForwarderPublisher{ + workspaceIDKey: f.workspaceIDKey, + aggregateIDKey: f.aggregateIDKey, + userIDKey: f.userIDKey, + publisher: publisher, + }, nil +} diff --git a/internal/note/infra/outbox/outbox.go b/internal/note/infra/outbox/outbox.go new file mode 100644 index 00000000..16f59176 --- /dev/null +++ b/internal/note/infra/outbox/outbox.go @@ -0,0 +1,67 @@ +package outbox + +import ( + "context" + "fmt" + "time" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill-sql/v4/pkg/sql" + "github.com/ThreeDotsLabs/watermill/components/forwarder" + "github.com/ThreeDotsLabs/watermill/message" +) + +type Publisher message.Publisher + +type Outbox struct { + fwd *forwarder.Forwarder +} + +func NewOutbox( + publisher Publisher, + logger watermill.LoggerAdapter, + schemaAdapter sql.SchemaAdapter, + pgxConn sql.Conn, +) (*Outbox, error) { + subcriber, err := sql.NewSubscriber( + sql.BeginnerFromPgx(pgxConn), + sql.SubscriberConfig{ + PollInterval: time.Second, + InitializeSchema: true, + SchemaAdapter: schemaAdapter, + ConsumerGroup: "", // NOTE: If scale, we should care about this + }, + logger, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SQL subscriber: %w", err) + } + fwd, err := forwarder.NewForwarder( + subcriber, + publisher, + logger, + forwarder.Config{}, + ) + if err != nil { + return nil, fmt.Errorf("failed to create forwarder: %w", err) + } + return &Outbox{ + fwd: fwd, + }, nil +} + +var ProvideOutbox = NewOutbox + +func (o *Outbox) Run(ctx context.Context) error { + if err := o.fwd.Run(ctx); err != nil { + return fmt.Errorf("failed to run forwarder: %w", err) + } + return nil +} + +func (o *Outbox) Stop() error { + if err := o.fwd.Close(); err != nil { + return fmt.Errorf("failed to close forwarder: %w", err) + } + return nil +} diff --git a/internal/note/infra/outbox/wire.go b/internal/note/infra/outbox/wire.go new file mode 100644 index 00000000..fff1ad77 --- /dev/null +++ b/internal/note/infra/outbox/wire.go @@ -0,0 +1,16 @@ +package outbox + +import ( + "github.com/ThreeDotsLabs/watermill-sql/v4/pkg/sql" + "github.com/goforj/wire" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgrepo" +) + +var ProviderSet = wire.NewSet( + ProvideFromPersistenceToQSLForwarder, + ProvideSchemaAdapter, + wire.Bind(new(sql.SchemaAdapter), new(*sql.DefaultPostgreSQLSchema)), + wire.Bind(new(pgrepo.PublisherFactory), new(*FromPersistenceToQSLForwarder)), + + ProvideOutbox, +) diff --git a/internal/note/infra/persistence/persistencepg.go b/internal/note/infra/persistence/pg.go similarity index 59% rename from internal/note/infra/persistence/persistencepg.go rename to internal/note/infra/persistence/pg.go index e6dc20f4..793b83c6 100644 --- a/internal/note/infra/persistence/persistencepg.go +++ b/internal/note/infra/persistence/pg.go @@ -8,12 +8,49 @@ import ( "io/fs" "log/slog" + "github.com/exaring/otelpgx" "github.com/jackc/pgx/v5/pgxpool" - "github.com/notopia-uit/notopia/internal/note/app" + "github.com/jackc/pgx/v5/stdlib" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" + commonconfig "github.com/notopia-uit/notopia/pkg/common/config" "github.com/pressly/goose/v3" "github.com/pressly/goose/v3/lock" + "go.opentelemetry.io/otel/sdk/trace" ) +func NewPgPool( + ctx context.Context, + tracerProvider *trace.TracerProvider, + cfg *commonconfig.SQL, +) (*pgxpool.Pool, func(), error) { + pgxCfg, err := pgxpool.ParseConfig(cfg.GetURL()) + if err != nil { + return nil, nil, err + } + pgxCfg.ConnConfig.Tracer = otelpgx.NewTracer( + otelpgx.WithTracerProvider(tracerProvider), + ) + pool, err := pgxpool.NewWithConfig(ctx, pgxCfg) + if err != nil { + return nil, nil, err + } + return pool, pool.Close, nil +} + +var ProvidePgPool = NewPgPool + +func NewSQLCQueries(db pgsqlc.DBTX) *pgsqlc.Queries { + return pgsqlc.New(db) +} + +var ProvideSQLCQueries = NewSQLCQueries + +func NewPgxPoolStdlib(pool *pgxpool.Pool) *sql.DB { + return stdlib.OpenDBFromPool(pool) +} + +var ProvidePgxPoolStdlib = NewPgxPoolStdlib + func NewGooseProvider(db *sql.DB, logger *slog.Logger) (*goose.Provider, error) { locker, err := lock.NewPostgresSessionLocker() if err != nil { @@ -42,18 +79,16 @@ var ProvideGooseProvider = NewGooseProvider var PgMigrations embed.FS type Pg struct { - pgpool *pgxpool.Pool + pgxPool *pgxpool.Pool gooseProvider *goose.Provider } -var _ app.Persistence = (*Pg)(nil) - func NewPg( - pgxpool *pgxpool.Pool, + pgxPool *pgxpool.Pool, gooseProvider *goose.Provider, ) (*Pg, error) { return &Pg{ - pgpool: pgxpool, + pgxPool: pgxPool, gooseProvider: gooseProvider, }, nil } @@ -67,7 +102,7 @@ func (p *Pg) IsMigrationDone(ctx context.Context) (bool, error) { } func (p *Pg) Ping(ctx context.Context) error { - return p.pgpool.Ping(ctx) + return p.pgxPool.Ping(ctx) } func (p *Pg) RunMigrations(ctx context.Context) error { diff --git a/internal/note/infra/persistence/pg/folder.go b/internal/note/infra/persistence/pg/folder.go deleted file mode 100644 index 88b7369c..00000000 --- a/internal/note/infra/persistence/pg/folder.go +++ /dev/null @@ -1,229 +0,0 @@ -package pg - -import ( - "context" - "errors" - "fmt" - "time" - - . "github.com/go-jet/jet/v2/postgres" - "github.com/go-jet/jet/v2/qrm" - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgjet/public/model" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgjet/public/table" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" -) - -type Folder struct { - pgxPool *pgxpool.Pool - queries *pgsqlc.Queries - db qrm.DB - inTransaction bool -} - -var _ domain.FolderRepo = (*Folder)(nil) - -func NewFolder( - pgxPool *pgxpool.Pool, - queries *pgsqlc.Queries, - db qrm.DB, - inTransaction bool, -) *Folder { - return &Folder{ - pgxPool: pgxPool, - queries: queries, - db: db, - inTransaction: inTransaction, - } -} - -func NewNoTransactionFolder( - pgxPool *pgxpool.Pool, - queries *pgsqlc.Queries, - db qrm.DB, -) *Folder { - return NewFolder(pgxPool, queries, db, false) -} - -var ProvideFolder = NewNoTransactionFolder - -func (f *Folder) GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*domain.Folder, errs.Error) { - stmt := SELECT(table.Folders.AllColumns). - FROM(table.Folders). - WHERE(table.Folders.ID.EQ(UUID(id))) - if forUpdate { - stmt = stmt.FOR(UPDATE()) - } - - var dest *model.Folders - err := stmt.QueryContext(ctx, f.db, dest) - if err != nil { - if errors.Is(err, qrm.ErrNoRows) { - return nil, errs.NewFolderNotFound(id, err) - } - return nil, toDomainError(err) - } - return folderToDomain(dest), nil -} - -func (f *Folder) GetMany(ctx context.Context, params *domain.FolderRepoGetManyParams) ([]*domain.Folder, errs.Error) { - condition := Bool(true) - if params.WorkspaceID() != nil { - condition = condition.AND(table.Folders.WorkspaceID.EQ(UUID(params.WorkspaceID()))) - } - if len(params.IDs()) > 0 { - var idExprs []Expression - for _, id := range params.IDs() { - idExprs = append(idExprs, UUID(id)) - } - condition = condition.AND(table.Folders.ID.IN(idExprs...)) - } - if params.TrashedBy() != nil { - condition = condition.AND(table.Folders.TrashedBy.EQ(String(params.TrashedBy().String()))) - } - if params.IsTrashed() { - condition = condition.AND(table.Folders.TrashedAt.IS_NULL()) - } - - stmt := SELECT(table.Folders.AllColumns). - FROM(table.Folders). - WHERE(condition) - if params.ForUpdate() { - stmt = stmt.FOR(UPDATE()) - } - - var dest []*model.Folders - if err := stmt.QueryContext(ctx, f.db, &dest); err != nil { - return nil, toDomainError(err) - } - folders := make([]*domain.Folder, len(dest)) - for i, folder := range dest { - folders[i] = folderToDomain(folder) - } - return folders, nil -} - -func folderToDomain(folder *model.Folders) *domain.Folder { - var trashed *domain.Trashed - if folder.TrashedBy != nil && folder.TrashedAt != nil { - trashed = domain.NewTrashed( - domain.TrashedBy(folder.TrashedBy.String()), - *folder.TrashedAt, - ) - } - return domain.UnmarshalFolder( - folder.ID, - folder.Name, - folder.Icon, - folder.WorkspaceID, - *domain.NewFolderHierarchy(folder.ParentID), - trashed, - ) -} - -func (f *Folder) Save(ctx context.Context, folder *domain.Folder) errs.Error { - if err := f.queries.SaveFolder(ctx, &pgsqlc.SaveFolderParams{ - ID: folder.ID(), - Name: folder.Name(), - Icon: folder.Icon(), - WorkspaceID: folder.WorkspaceID(), - ParentID: folder.ParentID(), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - TrashedBy: folder.TrashedByString(), - TrashedAt: folder.TrashedAt(), - }); err != nil { - return toDomainError(err) - } - return nil -} - -func (f *Folder) SaveMany(ctx context.Context, folders []*domain.Folder) (cerr errs.Error) { - var queries *pgsqlc.Queries - var tx pgx.Tx - var err error - if !f.inTransaction { - tx, err = f.pgxPool.Begin(ctx) - if err != nil { - return toDomainError(err) - } - defer func() { - if err := tx.Rollback(ctx); err != nil { - cerr = errs.NewPersistenceInternal("failed to rollback transaction", fmt.Errorf("%w: %v", cerr, err)) - } - }() - queries = f.queries.WithTx(tx) - } else { - queries = f.queries - } - if err := queries.CreateTempTableFolders(ctx); err != nil { - return toDomainError(err) - } - saveFolderParams := make([]*pgsqlc.InsertTempFoldersParams, len(folders)) - for i, folder := range folders { - saveFolderParams[i] = &pgsqlc.InsertTempFoldersParams{ - ID: folder.ID(), - Name: folder.Name(), - Icon: folder.Icon(), - WorkspaceID: folder.WorkspaceID(), - ParentID: folder.ParentID(), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - TrashedBy: folder.TrashedByString(), - TrashedAt: folder.TrashedAt(), - } - } - affected, err := queries.InsertTempFolders(ctx, saveFolderParams) - if err != nil { - return toDomainError(err) - } - if affected != int64(len(folders)) { - return toDomainError(errors.New("not all folders were inserted into temp table")) - } - if err = queries.SaveFromTempFolders(ctx); err != nil { - return toDomainError(err) - } - if !f.inTransaction { - if err := tx.Commit(ctx); err != nil { - return errs.NewPersistenceInternal("failed to commit transaction", err) - } - } - return nil -} - -func (f *Folder) AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, errs.Error) { - count, err := f.queries.CountFoldersInWorkspaceByIDs(ctx, &pgsqlc.CountFoldersInWorkspaceByIDsParams{ - IDs: ids, - WorkspaceID: workspaceID, - }) - if err != nil { - return false, toDomainError(err) - } - return count == int64(len(ids)), nil -} - -func (f *Folder) PermanentlyDeleteByID(ctx context.Context, id uuid.UUID) errs.Error { - if err := f.queries.PermanentlyDeleteFolderByID(ctx, id); err != nil { - return toDomainError(err) - } - return nil -} - -func (f *Folder) PermanentlyDeleteByIDs(ctx context.Context, ids uuid.UUIDs) errs.Error { - if err := f.queries.PermanentlyDeleteFoldersByIDs(ctx, ids); err != nil { - return toDomainError(err) - } - return nil -} - -func (f *Folder) GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, errs.Error) { - workspaceID, err := f.queries.GetWorkspaceIDByFolderID(ctx, id) - if err != nil { - return uuid.Nil, toDomainError(err) - } - return workspaceID, nil -} diff --git a/internal/note/infra/persistence/pg/note.go b/internal/note/infra/persistence/pg/note.go deleted file mode 100644 index af35bc0a..00000000 --- a/internal/note/infra/persistence/pg/note.go +++ /dev/null @@ -1,309 +0,0 @@ -package pg - -import ( - "context" - "errors" - "fmt" - "time" - - . "github.com/go-jet/jet/v2/postgres" - "github.com/go-jet/jet/v2/qrm" - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgjet/public/model" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgjet/public/table" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" -) - -type Note struct { - pgxPool *pgxpool.Pool - queries *pgsqlc.Queries - db qrm.DB - inTransaction bool -} - -var _ domain.NoteRepo = (*Note)(nil) - -func NewNote( - pgxPool *pgxpool.Pool, - queries *pgsqlc.Queries, - db qrm.DB, - inTransaction bool, -) *Note { - return &Note{ - pgxPool: pgxPool, - queries: queries, - db: db, - inTransaction: inTransaction, - } -} - -func NewNoTransactionNote( - pgxPool *pgxpool.Pool, - queries *pgsqlc.Queries, - db qrm.DB, -) *Note { - return NewNote(pgxPool, queries, db, false) -} - -var ProvideNote = NewNoTransactionNote - -type GetNoteResult struct { - model.Notes - OutgoingLinks uuid.UUIDs `alias:"note_links.target_id"` -} - -func (n *Note) GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*domain.Note, errs.Error) { - stmt := SELECT(table.Notes.AllColumns). - FROM( - table.Notes. - LEFT_JOIN(table.NoteLinks, table.NoteLinks.SourceID.EQ(table.Notes.ID)), - ). - WHERE(table.Notes.ID.EQ(UUID(id))) - if forUpdate { - stmt = stmt.FOR(UPDATE()) - } - var dest *GetNoteResult - err := stmt.QueryContext(ctx, n.db, dest) - if err != nil { - if errors.Is(err, qrm.ErrNoRows) { - return nil, errs.NewNoteNotFound(id, err) - } - return nil, toDomainError(err) - } - return noteToDomain(dest), nil -} - -func (n *Note) GetMany(ctx context.Context, params *domain.NoteRepoGetManyParams) ([]*domain.Note, errs.Error) { - condition := Bool(true) - if params.WorkspaceID() != nil { - condition = condition.AND( - table.Notes.FolderID.IN( - SELECT(table.Folders.ID). - FROM(table.Folders). - WHERE(table.Folders.WorkspaceID.EQ(UUID(*params.WorkspaceID()))), - ), - ) - } - if len(params.IDs()) > 0 { - var idExprs []Expression - for _, id := range params.IDs() { - idExprs = append(idExprs, UUID(id)) - } - condition = condition.AND(table.Notes.ID.IN(idExprs...)) - } - if params.TrashedBy() != nil { - condition = condition.AND(table.Notes.TrashedBy.EQ(String(params.TrashedBy().String()))) - } - if params.IsTrashed() != nil { - condition = condition.AND(table.Notes.TrashedAt.IS_NULL()) - } - - stmt := SELECT(table.Notes.AllColumns). - FROM( - table.Notes. - LEFT_JOIN(table.NoteLinks, table.NoteLinks.SourceID.EQ(table.Notes.ID)), - ). - WHERE(condition) - if params.ForUpdate() { - stmt = stmt.FOR(UPDATE()) - } - - var dest []*GetNoteResult - err := stmt.QueryContext(ctx, n.db, &dest) - if err != nil { - return nil, toDomainError(err) - } - - if len(dest) == 0 { - return []*domain.Note{}, nil - } - - notes := make([]*domain.Note, len(dest)) - for i, noteResult := range dest { - notes[i] = noteToDomain(noteResult) - } - return notes, nil -} - -func noteToDomain(note *GetNoteResult) *domain.Note { - var trashed *domain.Trashed - if note.TrashedBy != nil && note.TrashedAt != nil { - trashed = domain.NewTrashed( - domain.TrashedBy(*note.TrashedBy), - *note.TrashedAt, - ) - } - var tags []string - if note.Tags != nil { - tags = *note.Tags - } - return domain.UnmarshalNote( - note.ID, - note.Name, - note.Icon, - tags, - uint64(note.Size), - note.FolderID, - note.OutgoingLinks, - trashed, - ) -} - -// TODO: It doesn't save the outgoing links -func (n *Note) Save(ctx context.Context, note *domain.Note) (cerr errs.Error) { - var queries *pgsqlc.Queries - var tx pgx.Tx - var err error - if !n.inTransaction { - tx, err = n.pgxPool.Begin(ctx) - if err != nil { - return toDomainError(err) - } - queries = n.queries.WithTx(tx) - defer func() { - if err := tx.Rollback(ctx); err != nil { - cerr = errs.NewPersistenceInternal("failed to rollback transaction", fmt.Errorf("%w: %v", cerr, err)) - } - }() - } else { - queries = n.queries - } - err = queries.SaveNote(ctx, &pgsqlc.SaveNoteParams{ - ID: note.ID(), - Name: note.Name(), - Icon: note.Icon(), - FolderID: note.FolderID(), - Tags: note.Tags(), - Size: int32(note.Size()), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - TrashedBy: note.TrashedByString(), - TrashedAt: note.TrashedAt(), - }) - if err != nil { - return toDomainError(err) - } - if err := queries.CreateTempTableNoteLinks(ctx); err != nil { - return toDomainError(err) - } - saveNoteLinkParams := make([]*pgsqlc.InsertTempNoteLinksParams, len(note.OutgoingLinks())) - for i, targetID := range note.OutgoingLinks() { - saveNoteLinkParams[i] = &pgsqlc.InsertTempNoteLinksParams{ - SourceID: note.ID(), - TargetID: targetID, - } - } - affected, err := queries.InsertTempNoteLinks(ctx, saveNoteLinkParams) - if err != nil { - return toDomainError(err) - } - if affected != int64(len(note.OutgoingLinks())) { - return toDomainError(errors.New("not all note links were inserted into temp table")) - } - if err := queries.DeleteObsoleteNoteLinks(ctx); err != nil { - return toDomainError(err) - } - if err := queries.SaveFromTempNoteLinks(ctx); err != nil { - return toDomainError(err) - } - if !n.inTransaction { - if err := tx.Commit(ctx); err != nil { - return errs.NewPersistenceInternal("failed to commit transaction", err) - } - } - return nil -} - -// TODO: It doesn't save the outgoing links -func (n *Note) SaveMany(ctx context.Context, notes []*domain.Note) (cerr errs.Error) { - var queries *pgsqlc.Queries - var tx pgx.Tx - var err error - if !n.inTransaction { - tx, err = n.pgxPool.Begin(ctx) - if err != nil { - return toDomainError(err) - } - defer func() { - if err := tx.Rollback(ctx); err != nil { - cerr = errs.NewPersistenceInternal("failed to rollback transaction", fmt.Errorf("%w: %v", cerr, err)) - } - }() - queries = n.queries.WithTx(tx) - } else { - queries = n.queries - } - if err = queries.CreateTempTableNotes(ctx); err != nil { - return toDomainError(err) - } - saveNoteParams := make([]*pgsqlc.InsertTempNotesParams, len(notes)) - for i, note := range notes { - saveNoteParams[i] = &pgsqlc.InsertTempNotesParams{ - ID: note.ID(), - Name: note.Name(), - Icon: note.Icon(), - FolderID: note.FolderID(), - Tags: note.Tags(), - Size: int32(note.Size()), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - TrashedBy: note.TrashedByString(), - TrashedAt: note.TrashedAt(), - } - } - affected, err := queries.InsertTempNotes(ctx, saveNoteParams) - if err != nil { - return toDomainError(err) - } - if affected != int64(len(notes)) { - return toDomainError(errors.New("not all notes were inserted into temp table")) - } - if err = queries.SaveFromTempNotes(ctx); err != nil { - return toDomainError(err) - } - if !n.inTransaction { - if err := tx.Commit(ctx); err != nil { - return errs.NewPersistenceInternal("failed to commit transaction", err) - } - } - return nil -} - -func (n *Note) AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, errs.Error) { - count, err := n.queries.CountNotesInWorkspaceByIDs(ctx, &pgsqlc.CountNotesInWorkspaceByIDsParams{ - IDs: ids, - WorkspaceID: workspaceID, - }) - if err != nil { - return false, toDomainError(err) - } - return count == int64(len(ids)), nil -} - -func (n *Note) PermanentlyDeleteByID(ctx context.Context, id uuid.UUID) errs.Error { - err := n.queries.PermanentlyDeleteNoteByID(ctx, id) - if err != nil { - return toDomainError(err) - } - return nil -} - -func (n *Note) PermanentlyDeleteByIDs(ctx context.Context, ids uuid.UUIDs) errs.Error { - err := n.queries.PermanentlyDeleteNotesByIDs(ctx, ids) - if err != nil { - return toDomainError(err) - } - return nil -} - -func (n *Note) GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, errs.Error) { - workspaceID, err := n.queries.GetWorkspaceIDByNoteID(ctx, id) - if err != nil { - return uuid.Nil, toDomainError(err) - } - return workspaceID, nil -} diff --git a/internal/note/infra/persistence/pg/pg.go b/internal/note/infra/persistence/pg/pg.go deleted file mode 100644 index f31b15a5..00000000 --- a/internal/note/infra/persistence/pg/pg.go +++ /dev/null @@ -1,46 +0,0 @@ -package pg - -import ( - "context" - "database/sql" - - "github.com/exaring/otelpgx" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/jackc/pgx/v5/stdlib" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" - commonconfig "github.com/notopia-uit/notopia/pkg/common/config" - "go.opentelemetry.io/otel/sdk/trace" -) - -func NewPgPool( - ctx context.Context, - tracerProvider *trace.TracerProvider, - cfg *commonconfig.SQL, -) (*pgxpool.Pool, func(), error) { - pgxCfg, err := pgxpool.ParseConfig(cfg.GetURL()) - if err != nil { - return nil, nil, err - } - pgxCfg.ConnConfig.Tracer = otelpgx.NewTracer( - otelpgx.WithTracerProvider(tracerProvider), - ) - pool, err := pgxpool.NewWithConfig(ctx, pgxCfg) - if err != nil { - return nil, nil, err - } - return pool, pool.Close, nil -} - -var ProvidePgPool = NewPgPool - -func NewQueries(db pgsqlc.DBTX) *pgsqlc.Queries { - return pgsqlc.New(db) -} - -var ProvideQueries = NewQueries - -func NewStdlib(pool *pgxpool.Pool) *sql.DB { - return stdlib.OpenDBFromPool(pool) -} - -var ProvideStdlib = NewStdlib diff --git a/internal/note/infra/persistence/pg/readmodel.go b/internal/note/infra/persistence/pg/readmodel.go deleted file mode 100644 index 77cef851..00000000 --- a/internal/note/infra/persistence/pg/readmodel.go +++ /dev/null @@ -1,491 +0,0 @@ -package pg - -import ( - "context" - "errors" - "math" - "time" - - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "github.com/notopia-uit/notopia/internal/note/app" - "github.com/notopia-uit/notopia/internal/note/errs" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" -) - -type ReadModel struct { - queries *pgsqlc.Queries -} - -func NewReadModel(queries *pgsqlc.Queries) *ReadModel { - return &ReadModel{queries: queries} -} - -var ProvideReadModel = NewReadModel - -var ( - _ app.GetWorkspaceTreeReadModel = (*ReadModel)(nil) - _ app.ShowTrashReadModel = (*ReadModel)(nil) - _ app.GetNoteGraphReadModel = (*ReadModel)(nil) - _ app.GetNoteLinksReadModel = (*ReadModel)(nil) - _ app.GetWorkspaceBySlugReadModel = (*ReadModel)(nil) - _ app.GetWorkspaceGraphReadModel = (*ReadModel)(nil) - _ app.CheckWorkspaceSlugExistsReadModel = (*ReadModel)(nil) - _ app.GetNoteReadModel = (*ReadModel)(nil) -) - -func (r *ReadModel) GetWorkspaceTree(ctx context.Context, q *app.GetWorkspaceTree) (*app.WorkspaceTreeFolder, errs.Error) { - var rootFolderID uuid.UUID - - if q.RootFolderID != nil { - rootFolderID = *q.RootFolderID - } else { - rootFolderIDs, err := r.queries.GetRootFolderIDsByWorkspaceID(ctx, q.WorkspaceID) - if err != nil { - return nil, toDomainError(err) - } - if len(rootFolderIDs) == 0 { - return nil, errs.NewWorkspaceRootFolderNotFound(q.WorkspaceID, pgx.ErrNoRows) - } - rootFolderID = rootFolderIDs[0] - } - - rootFolder, err := r.queries.GetFolder(ctx, &pgsqlc.GetFolderParams{ - ID: &rootFolderID, - WorkspaceID: &q.WorkspaceID, - IsRootFolder: false, - ParentID: nil, - TrashedBy: "", - IncludeTrashed: q.IncludeTrashed, - }) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, errs.NewFolderNotFound(rootFolderID, err) - } - return nil, toDomainError(err) - } - - var depth *int32 - if q.Depth != nil { - depth = new(int32(*q.Depth)) - } - recursiveFolders, err := r.queries.GetRecursiveFolderByParentID(ctx, &pgsqlc.GetRecursiveFolderByParentIDParams{ - ParentID: rootFolderID, - Depth: depth, - IncludeTrashed: q.IncludeTrashed, - }) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - - var folderIDs []uuid.UUID - folderIDs = append(folderIDs, rootFolderID) - folderMap := make(map[uuid.UUID]*pgsqlc.GetRecursiveFolderByParentIDRow) - for _, folder := range recursiveFolders { - folderIDs = append(folderIDs, folder.ID) - folderMap[folder.ID] = folder - } - - allNotes, err := r.queries.GetNotesByFolderIDs(ctx, &pgsqlc.GetNotesByFolderIDsParams{ - FolderIds: folderIDs, - IncludeTrashed: q.IncludeTrashed, - }) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - - notesByFolder := make(map[uuid.UUID][]*pgsqlc.Note) - for _, note := range allNotes { - notesByFolder[note.FolderID] = append(notesByFolder[note.FolderID], note) - } - - tree := r.buildFolderTree( - rootFolder.ID, - rootFolder.Name, - rootFolder.Icon, - rootFolder.UpdatedAt, - folderMap, - notesByFolder, - ) - return tree, nil -} - -func (r *ReadModel) buildFolderTree( - folderID uuid.UUID, - folderName string, - folderIcon *string, - updatedAt time.Time, - folderMap map[uuid.UUID]*pgsqlc.GetRecursiveFolderByParentIDRow, - notesByFolder map[uuid.UUID][]*pgsqlc.Note, -) *app.WorkspaceTreeFolder { - result := app.WorkspaceTreeFolder{ - ID: folderID, - Name: folderName, - Icon: folderIcon, - UpdatedAt: updatedAt, - Notes: []*app.WorkspaceTreeNote{}, - Children: []*app.WorkspaceTreeFolder{}, - } - - if notes, ok := notesByFolder[folderID]; ok { - for _, note := range notes { - result.Notes = append(result.Notes, &app.WorkspaceTreeNote{ - ID: note.ID, - Name: note.Name, - Icon: note.Icon, - UpdatedAt: note.UpdatedAt, - }) - } - } - - for _, childFolder := range folderMap { - if childFolder.ParentID != nil && *childFolder.ParentID == folderID { - childTree := r.buildFolderTree( - childFolder.ID, - childFolder.Name, - childFolder.Icon, - childFolder.UpdatedAt, - folderMap, - notesByFolder, - ) - result.Children = append(result.Children, childTree) - } - } - - return &result -} - -func (r *ReadModel) ShowTrash(ctx context.Context, q *app.ShowTrash) (*app.Trash, errs.Error) { - trashedNotes, err := r.queries.GetTrashedNotesByWorkspaceID(ctx, q.WorkspaceID) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - - trashedFolders, err := r.queries.GetFolders(ctx, &pgsqlc.GetFoldersParams{ - WorkspaceID: &q.WorkspaceID, - TrashedBy: new(string(pgsqlc.TrashedByPurpose)), - ParentID: nil, - IDs: nil, - IsRootFolder: false, - IncludeTrashed: true, - }) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - - notes := make([]*app.TrashedNote, len(trashedNotes)) - for i, note := range trashedNotes { - notes[i] = &app.TrashedNote{ - ID: note.ID, - Name: note.Name, - Icon: note.Icon, - Trashed: app.Trashed{ - TrashedBy: app.TrashedByPurpose, - TrashedAt: *note.TrashedAt, - }, - } - } - - folders := make([]*app.TrashedFolder, len(trashedFolders)) - for i, folder := range trashedFolders { - folders[i] = &app.TrashedFolder{ - ID: folder.ID, - Name: folder.Name, - Icon: folder.Icon, - Trashed: app.Trashed{ - TrashedBy: app.TrashedByPurpose, - TrashedAt: *folder.TrashedAt, - }, - } - } - - return &app.Trash{ - Notes: notes, - Folders: folders, - }, nil -} - -func (r *ReadModel) GetNoteLinks(ctx context.Context, q *app.GetNoteLinks) (*app.NoteLinkResult, errs.Error) { - _, err := r.queries.GetNote(ctx, q.ID) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, errs.NewNoteNotFound(q.ID, err) - } - return nil, toDomainError(err) - } - - result := app.NoteLinkResult{ - OutgoingLinks: []*app.NoteLink{}, - Backlinks: []*app.NoteLink{}, - } - - if q.OutgoingLinks { - outgoingLinks, err := r.queries.GetNoteOutgoingLinks(ctx, &pgsqlc.GetNoteOutgoingLinksParams{ - SourceID: &q.ID, - SourceIDs: nil, - }) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - - if len(outgoingLinks) > 0 { - outgoingNotes, err := r.queries.GetNotes(ctx, outgoingLinks) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - for _, linkedNote := range outgoingNotes { - result.OutgoingLinks = append(result.OutgoingLinks, &app.NoteLink{ - ID: linkedNote.ID, - Name: linkedNote.Name, - Icon: linkedNote.Icon, - }) - } - } - } - - if q.Backlinks { - backlinks, err := r.queries.GetNoteBacklinks(ctx, q.ID) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - - if len(backlinks) > 0 { - backlinkNotes, err := r.queries.GetNotes(ctx, backlinks) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return nil, toDomainError(err) - } - for _, linkedNote := range backlinkNotes { - result.Backlinks = append(result.Backlinks, &app.NoteLink{ - ID: linkedNote.ID, - Name: linkedNote.Name, - Icon: linkedNote.Icon, - }) - } - } - } - - return &result, nil -} - -func (r *ReadModel) GetWorkspaceBySlug(ctx context.Context, q *app.GetWorkspaceBySlug) (*app.Workspace, errs.Error) { - workspace, err := r.queries.GetWorkspace(ctx, &pgsqlc.GetWorkspaceParams{ - Slug: &q.Slug, - ID: nil, - }) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, errs.NewWorkspaceBySlugNotFound(q.Slug, err) - } - return nil, toDomainError(err) - } - - return &app.Workspace{ - ID: workspace.ID, - Slug: workspace.Slug, - Name: workspace.Name, - }, nil -} - -func (r *ReadModel) CheckWorkspaceSlugExists(ctx context.Context, q *app.CheckWorkspaceSlugExists) (*app.CheckWorkspaceSlugExistsResult, errs.Error) { - exists, err := r.queries.CheckSlugExists(ctx, q.Slug) - if err != nil { - return nil, toDomainError(err) - } - - return &app.CheckWorkspaceSlugExistsResult{ - Exists: exists, - }, nil -} - -func (r *ReadModel) GetWorkspaceGraph(ctx context.Context, q *app.GetWorkspaceGraph) (*app.Graph, errs.Error) { - notes, err := r.queries.GetNotesInWorkspace(ctx, &pgsqlc.GetNotesInWorkspaceParams{ - WorkspaceID: q.ID, - TrashedBy: nil, - }) - if err != nil { - return nil, toDomainError(err) - } - - links, err := r.queries.GetNoteLinksInWorkspace(ctx, q.ID) - if err != nil { - return nil, toDomainError(err) - } - - reachableIDs := make(map[string]bool) - - if q.IgnoreOrphans { - // Build adjacency to identify connected notes - adj := make(map[string]bool) - for _, l := range links { - adj[l.SourceID.String()] = true - adj[l.TargetID.String()] = true - } - for _, n := range notes { - if len(n.Tags) > 0 || adj[n.ID.String()] { - reachableIDs[n.ID.String()] = true - // Add tags for this note - for _, tag := range n.Tags { - reachableIDs["#"+tag] = true - } - } - } - } else { - // Include all notes and their tags - for _, n := range notes { - reachableIDs[n.ID.String()] = true - for _, tag := range n.Tags { - reachableIDs["#"+tag] = true - } - } - } - - return buildGraph(notes, links, reachableIDs), nil -} - -func (r *ReadModel) GetNoteGraph(ctx context.Context, q *app.GetNoteGraph) (*app.Graph, errs.Error) { - workspaceID, err := r.queries.GetWorkspaceIDByNoteID(ctx, q.ID) - if err != nil { - return nil, toDomainError(err) - } - - notes, err := r.queries.GetNotesInWorkspace(ctx, &pgsqlc.GetNotesInWorkspaceParams{ - WorkspaceID: workspaceID, - TrashedBy: nil, - }) - if err != nil { - return nil, toDomainError(err) - } - - links, err := r.queries.GetNoteLinksInWorkspace(ctx, workspaceID) - if err != nil { - return nil, toDomainError(err) - } - - // Build Adjacency List for Traversal (Bidirectional for backlinks and tags) - adj := make(map[string][]string) - for _, n := range notes { - for _, tag := range n.Tags { - tagID := "#" + tag - adj[n.ID.String()] = append(adj[n.ID.String()], tagID) // Note -> Tag - adj[tagID] = append(adj[tagID], n.ID.String()) // Tag -> Note - } - } - for _, l := range links { - adj[l.SourceID.String()] = append(adj[l.SourceID.String()], l.TargetID.String()) // Note -> Note - adj[l.TargetID.String()] = append(adj[l.TargetID.String()], l.SourceID.String()) // Backlink: Note <- Note - } - - // BFS Traversal to find reachable nodes within maxDepth - reachableIDs := make(map[string]bool) - type queueItem struct { - id string - depth int - } - - startID := q.ID.String() - queue := []queueItem{{id: startID, depth: 0}} - - for len(queue) > 0 { - curr := queue[0] - queue = queue[1:] - - if reachableIDs[curr.id] { - continue - } - reachableIDs[curr.id] = true - - if curr.depth < q.Depth { - for _, neighbor := range adj[curr.id] { - if !reachableIDs[neighbor] { - queue = append(queue, queueItem{id: neighbor, depth: curr.depth + 1}) - } - } - } - } - - return buildGraph(notes, links, reachableIDs), nil -} - -func calculateGraphWeight(size, minSize, maxSize int32) *float64 { - var w float64 - if maxSize == minSize { - w = 1 - } else { - w = float64(size-minSize) / float64(maxSize-minSize) - } - return &w -} - -func buildGraph(notes []*pgsqlc.Note, links []*pgsqlc.NoteLink, reachableIDs map[string]bool) *app.Graph { - var minSize int32 = math.MaxInt32 - var maxSize int32 = -1 - reachableNotesMap := make(map[uuid.UUID]*pgsqlc.Note) - - for _, n := range notes { - if reachableIDs[n.ID.String()] { - reachableNotesMap[n.ID] = n - if n.Size < minSize { - minSize = n.Size - } - if n.Size > maxSize { - maxSize = n.Size - } - } - } - - var graphNodes []*app.GraphNode - var graphLinks []*app.GraphLink - tagsAdded := make(map[string]bool) - - // 2. Build Nodes (Notes and Tags) - for _, n := range reachableNotesMap { - // Add Note Node - graphNodes = append(graphNodes, &app.GraphNode{ - ID: n.ID.String(), - Name: n.Name, - Type: app.GraphNodeTypeNote, - Weight: calculateGraphWeight(n.Size, minSize, maxSize), - }) - - for _, tag := range n.Tags { - tagID := "#" + tag - - if reachableIDs[tagID] { - if !tagsAdded[tagID] { - graphNodes = append(graphNodes, &app.GraphNode{ - ID: tagID, - Name: tag, - Type: app.GraphNodeTypeTag, - Weight: nil, - }) - tagsAdded[tagID] = true - } - - // Add structural link for Note -> Tag - graphLinks = append(graphLinks, &app.GraphLink{ - Source: n.ID.String(), - Target: tagID, - }) - } - } - } - - // 3. Build Note -> Note Links (filtering out unreachable ones) - for _, l := range links { - if reachableIDs[l.SourceID.String()] && reachableIDs[l.TargetID.String()] { - graphLinks = append(graphLinks, &app.GraphLink{ - Source: l.SourceID.String(), - Target: l.TargetID.String(), - }) - } - } - - return &app.Graph{ - Nodes: graphNodes, - Links: graphLinks, - } -} - -func (r *ReadModel) GetNote(ctx context.Context, q *app.GetNote) (*app.Note, errs.Error) { - return nil, nil -} diff --git a/internal/note/infra/persistence/pg/uow.go b/internal/note/infra/persistence/pg/uow.go deleted file mode 100644 index 15df4e12..00000000 --- a/internal/note/infra/persistence/pg/uow.go +++ /dev/null @@ -1,96 +0,0 @@ -package pg - -import ( - "context" - "database/sql" - "fmt" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/stdlib" - "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" -) - -type RepoRegistry struct { - workspace *Workspace - folder *Folder - note *Note -} - -var _ domain.RepoRegistry = (*RepoRegistry)(nil) - -func (r *RepoRegistry) Workspace() domain.WorkspaceRepo { return r.workspace } - -func (r *RepoRegistry) Folder() domain.FolderRepo { return r.folder } - -func (r *RepoRegistry) Note() domain.NoteRepo { return r.note } - -type UnitOfWork struct { - queries *pgsqlc.Queries - sdb *sql.DB -} - -var _ domain.UnitOfWork = (*UnitOfWork)(nil) - -func NewUnitOfWork(queries *pgsqlc.Queries, sdb *sql.DB) *UnitOfWork { - return &UnitOfWork{ - queries: queries, - sdb: sdb, - } -} - -var ProvideUnitOfWork = NewUnitOfWork - -// NOTE: an AI said about chaining error is not a good idea? -func (u *UnitOfWork) Execute( - ctx context.Context, - fn func(repoRegistry domain.RepoRegistry) errs.Error, -) (cerr errs.Error) { - conn, err := u.sdb.Conn(ctx) - if err != nil { - return errs.NewPersistenceInternal("failed to get connection from pool", err) - } - defer func() { - if err := conn.Close(); err != nil { - cerr = errs.NewPersistenceInternal("failed to close connection", fmt.Errorf("%w: %v", cerr, err)) - } - }() - - tx, err := conn.BeginTx(ctx, nil) - if err != nil { - return errs.NewPersistenceInternal("failed to begin transaction", err) - } - defer func() { - if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { - cerr = errs.NewPersistenceInternal("failed to rollback transaction", fmt.Errorf("%w: %v", cerr, err)) - } - }() - - var pgxConn *pgx.Conn - err = conn.Raw(func(driverConn any) error { - pgxConn = driverConn.(*stdlib.Conn).Conn() - return nil - }) - if err != nil { - return errs.NewPersistenceInternal("failed to get raw connection", err) - } - - txQueries := pgsqlc.New(pgxConn) - // NOTE: passing nil to pgxpool because pgxpool in repo used for starting a transaction - // but in this case transaction is already started in unit of work - repoRegistry := &RepoRegistry{ - workspace: NewWorkspace(nil, txQueries, tx, true), - folder: NewFolder(nil, txQueries, tx, true), - note: NewNote(nil, txQueries, tx, true), - } - - if err := fn(repoRegistry); err != nil { - return err - } - - if err := tx.Commit(); err != nil { - return errs.NewPersistenceInternal("failed to commit transaction", err) - } - return nil -} diff --git a/internal/note/infra/persistence/pg/workspace.go b/internal/note/infra/persistence/pg/workspace.go deleted file mode 100644 index d9c7c746..00000000 --- a/internal/note/infra/persistence/pg/workspace.go +++ /dev/null @@ -1,166 +0,0 @@ -package pg - -import ( - "context" - "errors" - "time" - - . "github.com/go-jet/jet/v2/postgres" - "github.com/go-jet/jet/v2/qrm" - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/errs" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgjet/public/table" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" -) - -type Workspace struct { - pgxPool *pgxpool.Pool - queries *pgsqlc.Queries - db qrm.DB - inTransaction bool -} - -var _ domain.WorkspaceRepo = (*Workspace)(nil) - -func NewWorkspace( - pgxPool *pgxpool.Pool, - queries *pgsqlc.Queries, - db qrm.DB, - inTransaction bool, -) *Workspace { - return &Workspace{ - pgxPool: pgxPool, - queries: queries, - db: db, - inTransaction: inTransaction, - } -} - -func NewNoTransactionWorkspace(pgxPool *pgxpool.Pool, queries *pgsqlc.Queries, db qrm.DB) *Workspace { - return NewWorkspace(pgxPool, queries, db, false) -} - -var ProvideWorkspace = NewNoTransactionWorkspace - -func (w *Workspace) GetBySlug(ctx context.Context, slug string, forUpdate bool) (*domain.Workspace, errs.Error) { - stmt := SELECT(table.Workspaces.AllColumns). - FROM(table.Workspaces). - WHERE(table.Workspaces.Slug.EQ(String(slug))) - if forUpdate { - stmt = stmt.FOR(UPDATE()) - } - - var dest []*pgsqlc.Workspace - err := stmt.QueryContext(ctx, w.db, &dest) - if err != nil { - return nil, toDomainError(err) - } - - if len(dest) == 0 { - return nil, errs.NewWorkspaceBySlugNotFound(slug, pgx.ErrNoRows) - } - workspaceResult := dest[0] - - folderStmt := SELECT(table.Folders.AllColumns). - FROM(table.Folders). - WHERE(table.Folders.WorkspaceID.EQ(UUID(workspaceResult.ID)).AND(table.Folders.ParentID.IS_NULL())) - if forUpdate { - folderStmt = folderStmt.FOR(UPDATE()) - } - - var folderDest []*pgsqlc.Folder - err = folderStmt.QueryContext(ctx, w.db, &folderDest) - if err != nil { - return nil, toDomainError(err) - } - - if len(folderDest) == 0 { - return nil, errs.NewWorkspaceRootFolderNotFound(workspaceResult.ID, pgx.ErrNoRows) - } - rootFolderResult := folderDest[0] - - return workspaceToDomain(workspaceResult, rootFolderResult.ID) -} - -func (w *Workspace) GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*domain.Workspace, errs.Error) { - stmt := SELECT(table.Workspaces.AllColumns). - FROM(table.Workspaces). - WHERE(table.Workspaces.ID.EQ(UUID(id))) - if forUpdate { - stmt = stmt.FOR(UPDATE()) - } - - var dest []*pgsqlc.Workspace - err := stmt.QueryContext(ctx, w.db, &dest) - if err != nil { - return nil, toDomainError(err) - } - - if len(dest) == 0 { - return nil, errs.NewWorkspaceNotFound(id, pgx.ErrNoRows) - } - workspaceResult := dest[0] - - folderStmt := SELECT(table.Folders.AllColumns). - FROM(table.Folders). - WHERE(table.Folders.WorkspaceID.EQ(UUID(workspaceResult.ID)).AND(table.Folders.ParentID.IS_NULL())) - if forUpdate { - folderStmt = folderStmt.FOR(UPDATE()) - } - - var folderDest []*pgsqlc.Folder - err = folderStmt.QueryContext(ctx, w.db, &folderDest) - if err != nil { - return nil, toDomainError(err) - } - - if len(folderDest) == 0 { - return nil, errs.NewWorkspaceRootFolderNotFound(id, pgx.ErrNoRows) - } - rootFolderResult := folderDest[0] - - return workspaceToDomain(workspaceResult, rootFolderResult.ID) -} - -func (w *Workspace) GetIDBySlug(ctx context.Context, slug string) (*uuid.UUID, errs.Error) { - result, err := w.queries.GetWorkspaceIDBySlug(ctx, slug) - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - return nil, errs.NewWorkspaceBySlugNotFound(slug, err) - } - return nil, toDomainError(err) - } - return &result, nil -} - -func (w *Workspace) CheckSlugExists(ctx context.Context, slug string) (bool, errs.Error) { - result, err := w.queries.CheckSlugExists(ctx, slug) - return result, toDomainError(err) -} - -func (w *Workspace) Save(ctx context.Context, workspace *domain.Workspace) errs.Error { - err := w.queries.SaveWorkspace(ctx, &pgsqlc.SaveWorkspaceParams{ - ID: workspace.ID(), - Slug: workspace.Slug(), - Name: workspace.Name(), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - DeletedAt: workspace.DeletedAt(), - }) - if err != nil { - return toDomainError(err) - } - return nil -} - -func workspaceToDomain(workspace *pgsqlc.Workspace, rootFolderID uuid.UUID) (*domain.Workspace, errs.Error) { - return domain.NewWorkspace( - workspace.ID, - workspace.Name, - workspace.Slug, - rootFolderID, - ) -} diff --git a/internal/note/infra/persistence/pgjet/public/enum/trashed_by.go b/internal/note/infra/persistence/pgjet/public/enum/trashed_by.go deleted file mode 100644 index e0e8bb92..00000000 --- a/internal/note/infra/persistence/pgjet/public/enum/trashed_by.go +++ /dev/null @@ -1,18 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package enum - -import "github.com/go-jet/jet/v2/postgres" - -var TrashedBy = &struct { - Purpose postgres.StringExpression - Parent postgres.StringExpression -}{ - Purpose: postgres.NewEnumValue("purpose"), - Parent: postgres.NewEnumValue("parent"), -} diff --git a/internal/note/infra/persistence/pgjet/public/model/folders.go b/internal/note/infra/persistence/pgjet/public/model/folders.go deleted file mode 100644 index 3c95a52e..00000000 --- a/internal/note/infra/persistence/pgjet/public/model/folders.go +++ /dev/null @@ -1,25 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package model - -import ( - "github.com/google/uuid" - "time" -) - -type Folders struct { - ID uuid.UUID `sql:"primary_key"` - Name string - Icon *string - WorkspaceID uuid.UUID - ParentID *uuid.UUID - CreatedAt time.Time - UpdatedAt time.Time - TrashedBy *TrashedBy - TrashedAt *time.Time -} diff --git a/internal/note/infra/persistence/pgjet/public/model/goose_db_version.go b/internal/note/infra/persistence/pgjet/public/model/goose_db_version.go deleted file mode 100644 index c7f68e86..00000000 --- a/internal/note/infra/persistence/pgjet/public/model/goose_db_version.go +++ /dev/null @@ -1,19 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package model - -import ( - "time" -) - -type GooseDbVersion struct { - ID int32 `sql:"primary_key"` - VersionID int64 - IsApplied bool - Tstamp time.Time -} diff --git a/internal/note/infra/persistence/pgjet/public/model/note_links.go b/internal/note/infra/persistence/pgjet/public/model/note_links.go deleted file mode 100644 index f7dc2d9e..00000000 --- a/internal/note/infra/persistence/pgjet/public/model/note_links.go +++ /dev/null @@ -1,17 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package model - -import ( - "github.com/google/uuid" -) - -type NoteLinks struct { - SourceID uuid.UUID `sql:"primary_key"` - TargetID uuid.UUID `sql:"primary_key"` -} diff --git a/internal/note/infra/persistence/pgjet/public/model/notes.go b/internal/note/infra/persistence/pgjet/public/model/notes.go deleted file mode 100644 index 723b11cc..00000000 --- a/internal/note/infra/persistence/pgjet/public/model/notes.go +++ /dev/null @@ -1,27 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package model - -import ( - "github.com/google/uuid" - "github.com/lib/pq" - "time" -) - -type Notes struct { - ID uuid.UUID `sql:"primary_key"` - Name string - Icon *string - FolderID uuid.UUID - Tags *pq.StringArray - Size int32 - CreatedAt time.Time - UpdatedAt time.Time - TrashedBy *TrashedBy - TrashedAt *time.Time -} diff --git a/internal/note/infra/persistence/pgjet/public/model/trashed_by.go b/internal/note/infra/persistence/pgjet/public/model/trashed_by.go deleted file mode 100644 index 49b1d163..00000000 --- a/internal/note/infra/persistence/pgjet/public/model/trashed_by.go +++ /dev/null @@ -1,49 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package model - -import "errors" - -type TrashedBy string - -const ( - TrashedBy_Purpose TrashedBy = "purpose" - TrashedBy_Parent TrashedBy = "parent" -) - -var TrashedByAllValues = []TrashedBy{ - TrashedBy_Purpose, - TrashedBy_Parent, -} - -func (e *TrashedBy) Scan(value interface{}) error { - var enumValue string - switch val := value.(type) { - case string: - enumValue = val - case []byte: - enumValue = string(val) - default: - return errors.New("jet: Invalid scan value for AllTypesEnum enum. Enum value has to be of type string or []byte") - } - - switch enumValue { - case "purpose": - *e = TrashedBy_Purpose - case "parent": - *e = TrashedBy_Parent - default: - return errors.New("jet: Invalid scan value '" + enumValue + "' for TrashedBy enum") - } - - return nil -} - -func (e TrashedBy) String() string { - return string(e) -} diff --git a/internal/note/infra/persistence/pgjet/public/model/workspaces.go b/internal/note/infra/persistence/pgjet/public/model/workspaces.go deleted file mode 100644 index eca48552..00000000 --- a/internal/note/infra/persistence/pgjet/public/model/workspaces.go +++ /dev/null @@ -1,22 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package model - -import ( - "github.com/google/uuid" - "time" -) - -type Workspaces struct { - ID uuid.UUID `sql:"primary_key"` - Slug string - Name string - CreatedAt time.Time - UpdatedAt time.Time - DeletedAt *time.Time -} diff --git a/internal/note/infra/persistence/pgjet/public/table/folders.go b/internal/note/infra/persistence/pgjet/public/table/folders.go deleted file mode 100644 index 57256142..00000000 --- a/internal/note/infra/persistence/pgjet/public/table/folders.go +++ /dev/null @@ -1,102 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package table - -import ( - "github.com/go-jet/jet/v2/postgres" -) - -var Folders = newFoldersTable("public", "folders", "") - -type foldersTable struct { - postgres.Table - - // Columns - ID postgres.ColumnString - Name postgres.ColumnString - Icon postgres.ColumnString - WorkspaceID postgres.ColumnString - ParentID postgres.ColumnString - CreatedAt postgres.ColumnTimestampz - UpdatedAt postgres.ColumnTimestampz - TrashedBy postgres.ColumnString - TrashedAt postgres.ColumnTimestampz - - AllColumns postgres.ColumnList - MutableColumns postgres.ColumnList - DefaultColumns postgres.ColumnList -} - -type FoldersTable struct { - foldersTable - - EXCLUDED foldersTable -} - -// AS creates new FoldersTable with assigned alias -func (a FoldersTable) AS(alias string) *FoldersTable { - return newFoldersTable(a.SchemaName(), a.TableName(), alias) -} - -// Schema creates new FoldersTable with assigned schema name -func (a FoldersTable) FromSchema(schemaName string) *FoldersTable { - return newFoldersTable(schemaName, a.TableName(), a.Alias()) -} - -// WithPrefix creates new FoldersTable with assigned table prefix -func (a FoldersTable) WithPrefix(prefix string) *FoldersTable { - return newFoldersTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) -} - -// WithSuffix creates new FoldersTable with assigned table suffix -func (a FoldersTable) WithSuffix(suffix string) *FoldersTable { - return newFoldersTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) -} - -func newFoldersTable(schemaName, tableName, alias string) *FoldersTable { - return &FoldersTable{ - foldersTable: newFoldersTableImpl(schemaName, tableName, alias), - EXCLUDED: newFoldersTableImpl("", "excluded", ""), - } -} - -func newFoldersTableImpl(schemaName, tableName, alias string) foldersTable { - var ( - IDColumn = postgres.StringColumn("id") - NameColumn = postgres.StringColumn("name") - IconColumn = postgres.StringColumn("icon") - WorkspaceIDColumn = postgres.StringColumn("workspace_id") - ParentIDColumn = postgres.StringColumn("parent_id") - CreatedAtColumn = postgres.TimestampzColumn("created_at") - UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - TrashedByColumn = postgres.StringColumn("trashed_by") - TrashedAtColumn = postgres.TimestampzColumn("trashed_at") - allColumns = postgres.ColumnList{IDColumn, NameColumn, IconColumn, WorkspaceIDColumn, ParentIDColumn, CreatedAtColumn, UpdatedAtColumn, TrashedByColumn, TrashedAtColumn} - mutableColumns = postgres.ColumnList{NameColumn, IconColumn, WorkspaceIDColumn, ParentIDColumn, CreatedAtColumn, UpdatedAtColumn, TrashedByColumn, TrashedAtColumn} - defaultColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn} - ) - - return foldersTable{ - Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), - - //Columns - ID: IDColumn, - Name: NameColumn, - Icon: IconColumn, - WorkspaceID: WorkspaceIDColumn, - ParentID: ParentIDColumn, - CreatedAt: CreatedAtColumn, - UpdatedAt: UpdatedAtColumn, - TrashedBy: TrashedByColumn, - TrashedAt: TrashedAtColumn, - - AllColumns: allColumns, - MutableColumns: mutableColumns, - DefaultColumns: defaultColumns, - } -} diff --git a/internal/note/infra/persistence/pgjet/public/table/goose_db_version.go b/internal/note/infra/persistence/pgjet/public/table/goose_db_version.go deleted file mode 100644 index 8d943a43..00000000 --- a/internal/note/infra/persistence/pgjet/public/table/goose_db_version.go +++ /dev/null @@ -1,87 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package table - -import ( - "github.com/go-jet/jet/v2/postgres" -) - -var GooseDbVersion = newGooseDbVersionTable("public", "goose_db_version", "") - -type gooseDbVersionTable struct { - postgres.Table - - // Columns - ID postgres.ColumnInteger - VersionID postgres.ColumnInteger - IsApplied postgres.ColumnBool - Tstamp postgres.ColumnTimestamp - - AllColumns postgres.ColumnList - MutableColumns postgres.ColumnList - DefaultColumns postgres.ColumnList -} - -type GooseDbVersionTable struct { - gooseDbVersionTable - - EXCLUDED gooseDbVersionTable -} - -// AS creates new GooseDbVersionTable with assigned alias -func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable { - return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias) -} - -// Schema creates new GooseDbVersionTable with assigned schema name -func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable { - return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias()) -} - -// WithPrefix creates new GooseDbVersionTable with assigned table prefix -func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable { - return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) -} - -// WithSuffix creates new GooseDbVersionTable with assigned table suffix -func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable { - return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) -} - -func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable { - return &GooseDbVersionTable{ - gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias), - EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""), - } -} - -func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable { - var ( - IDColumn = postgres.IntegerColumn("id") - VersionIDColumn = postgres.IntegerColumn("version_id") - IsAppliedColumn = postgres.BoolColumn("is_applied") - TstampColumn = postgres.TimestampColumn("tstamp") - allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn} - mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn} - defaultColumns = postgres.ColumnList{TstampColumn} - ) - - return gooseDbVersionTable{ - Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), - - //Columns - ID: IDColumn, - VersionID: VersionIDColumn, - IsApplied: IsAppliedColumn, - Tstamp: TstampColumn, - - AllColumns: allColumns, - MutableColumns: mutableColumns, - DefaultColumns: defaultColumns, - } -} diff --git a/internal/note/infra/persistence/pgjet/public/table/note_links.go b/internal/note/infra/persistence/pgjet/public/table/note_links.go deleted file mode 100644 index e49afacb..00000000 --- a/internal/note/infra/persistence/pgjet/public/table/note_links.go +++ /dev/null @@ -1,81 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package table - -import ( - "github.com/go-jet/jet/v2/postgres" -) - -var NoteLinks = newNoteLinksTable("public", "note_links", "") - -type noteLinksTable struct { - postgres.Table - - // Columns - SourceID postgres.ColumnString - TargetID postgres.ColumnString - - AllColumns postgres.ColumnList - MutableColumns postgres.ColumnList - DefaultColumns postgres.ColumnList -} - -type NoteLinksTable struct { - noteLinksTable - - EXCLUDED noteLinksTable -} - -// AS creates new NoteLinksTable with assigned alias -func (a NoteLinksTable) AS(alias string) *NoteLinksTable { - return newNoteLinksTable(a.SchemaName(), a.TableName(), alias) -} - -// Schema creates new NoteLinksTable with assigned schema name -func (a NoteLinksTable) FromSchema(schemaName string) *NoteLinksTable { - return newNoteLinksTable(schemaName, a.TableName(), a.Alias()) -} - -// WithPrefix creates new NoteLinksTable with assigned table prefix -func (a NoteLinksTable) WithPrefix(prefix string) *NoteLinksTable { - return newNoteLinksTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) -} - -// WithSuffix creates new NoteLinksTable with assigned table suffix -func (a NoteLinksTable) WithSuffix(suffix string) *NoteLinksTable { - return newNoteLinksTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) -} - -func newNoteLinksTable(schemaName, tableName, alias string) *NoteLinksTable { - return &NoteLinksTable{ - noteLinksTable: newNoteLinksTableImpl(schemaName, tableName, alias), - EXCLUDED: newNoteLinksTableImpl("", "excluded", ""), - } -} - -func newNoteLinksTableImpl(schemaName, tableName, alias string) noteLinksTable { - var ( - SourceIDColumn = postgres.StringColumn("source_id") - TargetIDColumn = postgres.StringColumn("target_id") - allColumns = postgres.ColumnList{SourceIDColumn, TargetIDColumn} - mutableColumns = postgres.ColumnList{} - defaultColumns = postgres.ColumnList{} - ) - - return noteLinksTable{ - Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), - - //Columns - SourceID: SourceIDColumn, - TargetID: TargetIDColumn, - - AllColumns: allColumns, - MutableColumns: mutableColumns, - DefaultColumns: defaultColumns, - } -} diff --git a/internal/note/infra/persistence/pgjet/public/table/notes.go b/internal/note/infra/persistence/pgjet/public/table/notes.go deleted file mode 100644 index 2bdecf5f..00000000 --- a/internal/note/infra/persistence/pgjet/public/table/notes.go +++ /dev/null @@ -1,105 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package table - -import ( - "github.com/go-jet/jet/v2/postgres" -) - -var Notes = newNotesTable("public", "notes", "") - -type notesTable struct { - postgres.Table - - // Columns - ID postgres.ColumnString - Name postgres.ColumnString - Icon postgres.ColumnString - FolderID postgres.ColumnString - Tags postgres.ColumnStringArray - Size postgres.ColumnInteger - CreatedAt postgres.ColumnTimestampz - UpdatedAt postgres.ColumnTimestampz - TrashedBy postgres.ColumnString - TrashedAt postgres.ColumnTimestampz - - AllColumns postgres.ColumnList - MutableColumns postgres.ColumnList - DefaultColumns postgres.ColumnList -} - -type NotesTable struct { - notesTable - - EXCLUDED notesTable -} - -// AS creates new NotesTable with assigned alias -func (a NotesTable) AS(alias string) *NotesTable { - return newNotesTable(a.SchemaName(), a.TableName(), alias) -} - -// Schema creates new NotesTable with assigned schema name -func (a NotesTable) FromSchema(schemaName string) *NotesTable { - return newNotesTable(schemaName, a.TableName(), a.Alias()) -} - -// WithPrefix creates new NotesTable with assigned table prefix -func (a NotesTable) WithPrefix(prefix string) *NotesTable { - return newNotesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) -} - -// WithSuffix creates new NotesTable with assigned table suffix -func (a NotesTable) WithSuffix(suffix string) *NotesTable { - return newNotesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) -} - -func newNotesTable(schemaName, tableName, alias string) *NotesTable { - return &NotesTable{ - notesTable: newNotesTableImpl(schemaName, tableName, alias), - EXCLUDED: newNotesTableImpl("", "excluded", ""), - } -} - -func newNotesTableImpl(schemaName, tableName, alias string) notesTable { - var ( - IDColumn = postgres.StringColumn("id") - NameColumn = postgres.StringColumn("name") - IconColumn = postgres.StringColumn("icon") - FolderIDColumn = postgres.StringColumn("folder_id") - TagsColumn = postgres.StringArrayColumn("tags") - SizeColumn = postgres.IntegerColumn("size") - CreatedAtColumn = postgres.TimestampzColumn("created_at") - UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - TrashedByColumn = postgres.StringColumn("trashed_by") - TrashedAtColumn = postgres.TimestampzColumn("trashed_at") - allColumns = postgres.ColumnList{IDColumn, NameColumn, IconColumn, FolderIDColumn, TagsColumn, SizeColumn, CreatedAtColumn, UpdatedAtColumn, TrashedByColumn, TrashedAtColumn} - mutableColumns = postgres.ColumnList{NameColumn, IconColumn, FolderIDColumn, TagsColumn, SizeColumn, CreatedAtColumn, UpdatedAtColumn, TrashedByColumn, TrashedAtColumn} - defaultColumns = postgres.ColumnList{SizeColumn, CreatedAtColumn, UpdatedAtColumn} - ) - - return notesTable{ - Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), - - //Columns - ID: IDColumn, - Name: NameColumn, - Icon: IconColumn, - FolderID: FolderIDColumn, - Tags: TagsColumn, - Size: SizeColumn, - CreatedAt: CreatedAtColumn, - UpdatedAt: UpdatedAtColumn, - TrashedBy: TrashedByColumn, - TrashedAt: TrashedAtColumn, - - AllColumns: allColumns, - MutableColumns: mutableColumns, - DefaultColumns: defaultColumns, - } -} diff --git a/internal/note/infra/persistence/pgjet/public/table/table_use_schema.go b/internal/note/infra/persistence/pgjet/public/table/table_use_schema.go deleted file mode 100644 index 0972a09e..00000000 --- a/internal/note/infra/persistence/pgjet/public/table/table_use_schema.go +++ /dev/null @@ -1,18 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package table - -// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke -// this method only once at the beginning of the program. -func UseSchema(schema string) { - Folders = Folders.FromSchema(schema) - GooseDbVersion = GooseDbVersion.FromSchema(schema) - NoteLinks = NoteLinks.FromSchema(schema) - Notes = Notes.FromSchema(schema) - Workspaces = Workspaces.FromSchema(schema) -} diff --git a/internal/note/infra/persistence/pgjet/public/table/workspaces.go b/internal/note/infra/persistence/pgjet/public/table/workspaces.go deleted file mode 100644 index 2da735c0..00000000 --- a/internal/note/infra/persistence/pgjet/public/table/workspaces.go +++ /dev/null @@ -1,93 +0,0 @@ -// -// Code generated by go-jet DO NOT EDIT. -// -// WARNING: Changes to this file may cause incorrect behavior -// and will be lost if the code is regenerated -// - -package table - -import ( - "github.com/go-jet/jet/v2/postgres" -) - -var Workspaces = newWorkspacesTable("public", "workspaces", "") - -type workspacesTable struct { - postgres.Table - - // Columns - ID postgres.ColumnString - Slug postgres.ColumnString - Name postgres.ColumnString - CreatedAt postgres.ColumnTimestampz - UpdatedAt postgres.ColumnTimestampz - DeletedAt postgres.ColumnTimestampz - - AllColumns postgres.ColumnList - MutableColumns postgres.ColumnList - DefaultColumns postgres.ColumnList -} - -type WorkspacesTable struct { - workspacesTable - - EXCLUDED workspacesTable -} - -// AS creates new WorkspacesTable with assigned alias -func (a WorkspacesTable) AS(alias string) *WorkspacesTable { - return newWorkspacesTable(a.SchemaName(), a.TableName(), alias) -} - -// Schema creates new WorkspacesTable with assigned schema name -func (a WorkspacesTable) FromSchema(schemaName string) *WorkspacesTable { - return newWorkspacesTable(schemaName, a.TableName(), a.Alias()) -} - -// WithPrefix creates new WorkspacesTable with assigned table prefix -func (a WorkspacesTable) WithPrefix(prefix string) *WorkspacesTable { - return newWorkspacesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) -} - -// WithSuffix creates new WorkspacesTable with assigned table suffix -func (a WorkspacesTable) WithSuffix(suffix string) *WorkspacesTable { - return newWorkspacesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) -} - -func newWorkspacesTable(schemaName, tableName, alias string) *WorkspacesTable { - return &WorkspacesTable{ - workspacesTable: newWorkspacesTableImpl(schemaName, tableName, alias), - EXCLUDED: newWorkspacesTableImpl("", "excluded", ""), - } -} - -func newWorkspacesTableImpl(schemaName, tableName, alias string) workspacesTable { - var ( - IDColumn = postgres.StringColumn("id") - SlugColumn = postgres.StringColumn("slug") - NameColumn = postgres.StringColumn("name") - CreatedAtColumn = postgres.TimestampzColumn("created_at") - UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - DeletedAtColumn = postgres.TimestampzColumn("deleted_at") - allColumns = postgres.ColumnList{IDColumn, SlugColumn, NameColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} - mutableColumns = postgres.ColumnList{SlugColumn, NameColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} - defaultColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn} - ) - - return workspacesTable{ - Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), - - //Columns - ID: IDColumn, - Slug: SlugColumn, - Name: NameColumn, - CreatedAt: CreatedAtColumn, - UpdatedAt: UpdatedAtColumn, - DeletedAt: DeletedAtColumn, - - AllColumns: allColumns, - MutableColumns: mutableColumns, - DefaultColumns: defaultColumns, - } -} diff --git a/internal/note/infra/persistence/pgreadmodel/checkslugexists.go b/internal/note/infra/persistence/pgreadmodel/checkslugexists.go new file mode 100644 index 00000000..32255b91 --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/checkslugexists.go @@ -0,0 +1,31 @@ +package pgreadmodel + +import ( + "context" + + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type CheckWorkspaceSlugExists struct { + queries *pgsqlc.Queries +} + +var _ app.CheckWorkspaceSlugExistsReadModel = (*CheckWorkspaceSlugExists)(nil) + +func NewCheckWorkspaceSlugExists(queries *pgsqlc.Queries) *CheckWorkspaceSlugExists { + return &CheckWorkspaceSlugExists{queries: queries} +} + +var ProvideCheckWorkspaceSlugExists = NewCheckWorkspaceSlugExists + +func (h *CheckWorkspaceSlugExists) CheckWorkspaceSlugExists(ctx context.Context, q *app.CheckWorkspaceSlugExists) (*app.CheckWorkspaceSlugExistsResult, error) { + exists, err := h.queries.CheckSlugExists(ctx, q.Slug) + if err != nil { + return nil, toErr(err) + } + + return &app.CheckWorkspaceSlugExistsResult{ + Exists: exists, + }, nil +} diff --git a/internal/note/infra/persistence/pgreadmodel/err.go b/internal/note/infra/persistence/pgreadmodel/err.go new file mode 100644 index 00000000..8a1972f7 --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/err.go @@ -0,0 +1,34 @@ +package pgreadmodel + +import ( + "errors" + + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +func toErr(err error) error { + var pgErr *pgconn.PgError + if !errors.As(err, &pgErr) { + return errs.NewPersistenceInternal( + "an unexpected error occurred, not a pg error", + err, + ) + } + switch pgErr.Code { + case pgerrcode.StringDataRightTruncationDataException, + pgerrcode.InvalidTextRepresentation, + pgerrcode.InvalidBinaryRepresentation, + pgerrcode.SerializationFailure: + return errs.NewPersistenceInvalid( + "invalid data", + err, + ) + default: + return errs.NewPersistenceInternal( + "an unexpected error occurred", + err, + ) + } +} diff --git a/internal/note/infra/persistence/pgreadmodel/graph.go b/internal/note/infra/persistence/pgreadmodel/graph.go new file mode 100644 index 00000000..e490b3d2 --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/graph.go @@ -0,0 +1,85 @@ +package pgreadmodel + +import ( + "math" + + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +func calculateGraphWeight(size, minSize, maxSize int32) float64 { + var w float64 + if maxSize == minSize { + w = 1 + } else { + w = float64(size-minSize) / float64(maxSize-minSize) + } + return w +} + +func buildGraph(notes []*pgsqlc.Note, links []*pgsqlc.NoteLink, reachableIDs map[string]bool) *app.Graph { + var minSize int32 = math.MaxInt32 + var maxSize int32 = -1 + reachableNotesMap := make(map[uuid.UUID]*pgsqlc.Note) + + for _, n := range notes { + if reachableIDs[n.ID.String()] { + reachableNotesMap[n.ID] = n + if n.Size < minSize { + minSize = n.Size + } + if n.Size > maxSize { + maxSize = n.Size + } + } + } + + var graphNodes []*app.GraphNode + var graphLinks []*app.GraphLink + tagsAdded := make(map[string]bool) + + for _, n := range reachableNotesMap { + graphNodes = append(graphNodes, &app.GraphNode{ + ID: n.ID.String(), + Name: n.Name, + Type: app.GraphNodeTypeNote, + Weight: calculateGraphWeight(n.Size, minSize, maxSize), + }) + + for _, tag := range n.Tags { + tagID := "#" + tag + + if reachableIDs[tagID] { + if !tagsAdded[tagID] { + graphNodes = append(graphNodes, &app.GraphNode{ + ID: tagID, + Name: tag, + Type: app.GraphNodeTypeTag, + Weight: 0, + }) + tagsAdded[tagID] = true + } + + graphLinks = append(graphLinks, &app.GraphLink{ + Source: n.ID.String(), + Target: tagID, + }) + } + } + } + + for _, l := range links { + if reachableIDs[l.SourceID.String()] && reachableIDs[l.TargetID.String()] { + graphLinks = append(graphLinks, &app.GraphLink{ + Source: l.SourceID.String(), + Target: l.TargetID.String(), + }) + } + } + + return &app.Graph{ + Nodes: graphNodes, + Links: graphLinks, + } +} diff --git a/internal/note/infra/persistence/pgreadmodel/note.go b/internal/note/infra/persistence/pgreadmodel/note.go new file mode 100644 index 00000000..e491421c --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/note.go @@ -0,0 +1,24 @@ +package pgreadmodel + +import ( + "context" + + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type Note struct { + queries *pgsqlc.Queries +} + +var _ app.GetNoteReadModel = (*Note)(nil) + +func GetNote(queries *pgsqlc.Queries) *Note { + return &Note{queries: queries} +} + +var ProvideNote = GetNote + +func (h *Note) GetNote(ctx context.Context, q *app.GetNote) (*app.Note, error) { + return nil, nil +} diff --git a/internal/note/infra/persistence/pgreadmodel/notegraph.go b/internal/note/infra/persistence/pgreadmodel/notegraph.go new file mode 100644 index 00000000..54fdc941 --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/notegraph.go @@ -0,0 +1,82 @@ +package pgreadmodel + +import ( + "context" + + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type NoteGraph struct { + queries *pgsqlc.Queries +} + +var _ app.GetNoteGraphReadModel = (*NoteGraph)(nil) + +func NewNoteGraph(queries *pgsqlc.Queries) *NoteGraph { + return &NoteGraph{queries: queries} +} + +var ProvideNoteGraph = NewNoteGraph + +func (h *NoteGraph) GetNoteGraph(ctx context.Context, q *app.GetNoteGraph) (*app.Graph, error) { + workspaceID, err := h.queries.GetWorkspaceIDByNoteID(ctx, q.ID) + if err != nil { + return nil, toErr(err) + } + + notes, err := h.queries.ReadGetNotesInWorkspace(ctx, pgsqlc.ReadGetNotesInWorkspaceParams{ + WorkspaceID: workspaceID, + ExcludeTrash: true, + }) + if err != nil { + return nil, toErr(err) + } + + links, err := h.queries.ReadGetNoteLinksInWorkspace(ctx, workspaceID) + if err != nil { + return nil, toErr(err) + } + + adj := make(map[string][]string) + for _, n := range notes { + for _, tag := range n.Tags { + tagID := "#" + tag + adj[n.ID.String()] = append(adj[n.ID.String()], tagID) + adj[tagID] = append(adj[tagID], n.ID.String()) + } + } + for _, l := range links { + adj[l.SourceID.String()] = append(adj[l.SourceID.String()], l.TargetID.String()) + adj[l.TargetID.String()] = append(adj[l.TargetID.String()], l.SourceID.String()) + } + + reachableIDs := make(map[string]bool) + type queueItem struct { + id string + depth int + } + + startID := q.ID.String() + queue := []queueItem{{id: startID, depth: 0}} + + for len(queue) > 0 { + curr := queue[0] + queue = queue[1:] + + if reachableIDs[curr.id] { + continue + } + reachableIDs[curr.id] = true + + if curr.depth < q.Depth { + for _, neighbor := range adj[curr.id] { + if !reachableIDs[neighbor] { + queue = append(queue, queueItem{id: neighbor, depth: curr.depth + 1}) + } + } + } + } + + return buildGraph(notes, links, reachableIDs), nil +} diff --git a/internal/note/infra/persistence/pgreadmodel/notelinks.go b/internal/note/infra/persistence/pgreadmodel/notelinks.go new file mode 100644 index 00000000..101cca1e --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/notelinks.go @@ -0,0 +1,122 @@ +package pgreadmodel + +import ( + "context" + "errors" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type NoteLinks struct { + queries *pgsqlc.Queries +} + +var _ app.GetNoteLinksReadModel = (*NoteLinks)(nil) + +func GetNoteLinks(queries *pgsqlc.Queries) *NoteLinks { + return &NoteLinks{queries: queries} +} + +var ProvideNoteLinks = GetNoteLinks + +func (h *NoteLinks) GetNoteLinks(ctx context.Context, q *app.GetNoteLinks) (*app.NoteLinkResult, error) { + _, err := h.queries.GetNoteByID(ctx, + //exhaustruct:ignore + pgsqlc.GetNoteByIDParams{ + ID: q.ID, + }, + ) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewNoteNotFound(q.ID, err) + } + return nil, toErr(err) + } + + result := app.NoteLinkResult{ + OutgoingLinks: []*app.NoteLink{}, + Backlinks: []*app.NoteLink{}, + } + + if q.OutgoingLinks { + outgoingLinks, err := h.getOutgoingLinks(ctx, q.ID) + if err != nil { + return nil, err + } + result.OutgoingLinks = outgoingLinks + } + + if q.Backlinks { + backlinks, err := h.getBacklinks(ctx, q.ID) + if err != nil { + return nil, err + } + result.Backlinks = backlinks + } + + return &result, nil +} + +func (h *NoteLinks) getOutgoingLinks(ctx context.Context, noteID uuid.UUID) ([]*app.NoteLink, error) { + outgoingLinks, err := h.queries.ReadGetNoteOutgoingLinks(ctx, noteID) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + if len(outgoingLinks) == 0 { + return []*app.NoteLink{}, nil + } + + outgoingNotes, err := h.queries.ReadGetNotesByIDs(ctx, outgoingLinks) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + result := make([]*app.NoteLink, len(outgoingNotes)) + for i, linkedNote := range outgoingNotes { + var icon string + if linkedNote.Icon != nil { + icon = *linkedNote.Icon + } + result[i] = &app.NoteLink{ + ID: linkedNote.ID, + Name: linkedNote.Name, + Icon: icon, + } + } + return result, nil +} + +func (h *NoteLinks) getBacklinks(ctx context.Context, noteID uuid.UUID) ([]*app.NoteLink, error) { + backlinks, err := h.queries.ReadGetNoteBacklinks(ctx, noteID) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + if len(backlinks) == 0 { + return []*app.NoteLink{}, nil + } + + backlinkNotes, err := h.queries.ReadGetNotesByIDs(ctx, backlinks) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + result := make([]*app.NoteLink, len(backlinkNotes)) + for i, linkedNote := range backlinkNotes { + var icon string + if linkedNote.Icon != nil { + icon = *linkedNote.Icon + } + result[i] = &app.NoteLink{ + ID: linkedNote.ID, + Name: linkedNote.Name, + Icon: icon, + } + } + return result, nil +} diff --git a/internal/note/infra/persistence/pgreadmodel/showtrash.go b/internal/note/infra/persistence/pgreadmodel/showtrash.go new file mode 100644 index 00000000..8c447a8e --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/showtrash.go @@ -0,0 +1,73 @@ +package pgreadmodel + +import ( + "context" + "errors" + + "github.com/jackc/pgx/v5" + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type ShowTrash struct { + queries *pgsqlc.Queries +} + +var _ app.ShowTrashReadModel = (*ShowTrash)(nil) + +func NewShowTrash(queries *pgsqlc.Queries) *ShowTrash { + return &ShowTrash{queries: queries} +} + +var ProvideShowTrash = NewShowTrash + +func (h *ShowTrash) ShowTrash(ctx context.Context, q *app.ShowTrash) (*app.Trash, error) { + trashedNotes, err := h.queries.ReadGetTrashedNotesByWorkspaceID(ctx, q.WorkspaceID) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + trashedFolders, err := h.queries.ReadGetTrashedFolderByWorkspaceID(ctx, q.WorkspaceID) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + notes := make([]*app.TrashedNote, len(trashedNotes)) + for i, note := range trashedNotes { + var icon string + if note.Icon != nil { + icon = *note.Icon + } + notes[i] = &app.TrashedNote{ + ID: note.ID, + Name: note.Name, + Icon: icon, + Trashed: app.Trashed{ + TrashedBy: app.TrashedByPurpose, + TrashedAt: *note.TrashedAt, + }, + } + } + + folders := make([]*app.TrashedFolder, len(trashedFolders)) + for i, folder := range trashedFolders { + var icon string + if folder.Icon != nil { + icon = *folder.Icon + } + folders[i] = &app.TrashedFolder{ + ID: folder.ID, + Name: folder.Name, + Icon: icon, + Trashed: app.Trashed{ + TrashedBy: app.TrashedByPurpose, + TrashedAt: *folder.TrashedAt, + }, + } + } + + return &app.Trash{ + Notes: notes, + Folders: folders, + }, nil +} diff --git a/internal/note/infra/persistence/pgreadmodel/workspacebyslug.go b/internal/note/infra/persistence/pgreadmodel/workspacebyslug.go new file mode 100644 index 00000000..65bd0940 --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/workspacebyslug.go @@ -0,0 +1,39 @@ +package pgreadmodel + +import ( + "context" + "errors" + + "github.com/jackc/pgx/v5" + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type WorkspaceBySlug struct { + queries *pgsqlc.Queries +} + +var _ app.WorkspaceBySlugReadModel = (*WorkspaceBySlug)(nil) + +func NewWorkspaceBySlug(queries *pgsqlc.Queries) *WorkspaceBySlug { + return &WorkspaceBySlug{queries: queries} +} + +var ProvideWorkspaceBySlug = NewWorkspaceBySlug + +func (h *WorkspaceBySlug) GetWorkspaceBySlug(ctx context.Context, q *app.GetWorkspaceBySlug) (*app.Workspace, error) { + workspace, err := h.queries.ReadGetWorkspaceBySlug(ctx, q.Slug) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewWorkspaceBySlugNotFound(q.Slug, err) + } + return nil, toErr(err) + } + + return &app.Workspace{ + ID: workspace.ID, + Slug: workspace.Slug, + Name: workspace.Name, + }, nil +} diff --git a/internal/note/infra/persistence/pgreadmodel/workspacegraph.go b/internal/note/infra/persistence/pgreadmodel/workspacegraph.go new file mode 100644 index 00000000..6da86ae3 --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/workspacegraph.go @@ -0,0 +1,62 @@ +package pgreadmodel + +import ( + "context" + + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type WorkspaceGraph struct { + queries *pgsqlc.Queries +} + +var _ app.GetWorkspaceGraphReadModel = (*WorkspaceGraph)(nil) + +func GetWorkspaceGraph(queries *pgsqlc.Queries) *WorkspaceGraph { + return &WorkspaceGraph{queries: queries} +} + +var ProvideWorkspaceGraph = GetWorkspaceGraph + +func (h *WorkspaceGraph) GetWorkspaceGraph(ctx context.Context, q *app.GetWorkspaceGraph) (*app.Graph, error) { + notes, err := h.queries.ReadGetNotesInWorkspace(ctx, pgsqlc.ReadGetNotesInWorkspaceParams{ + WorkspaceID: q.ID, + ExcludeTrash: true, + }) + if err != nil { + return nil, toErr(err) + } + + links, err := h.queries.ReadGetNoteLinksInWorkspace(ctx, q.ID) + if err != nil { + return nil, toErr(err) + } + + reachableIDs := make(map[string]bool) + + if q.IgnoreOrphans { + adj := make(map[string]bool) + for _, l := range links { + adj[l.SourceID.String()] = true + adj[l.TargetID.String()] = true + } + for _, n := range notes { + if len(n.Tags) > 0 || adj[n.ID.String()] { + reachableIDs[n.ID.String()] = true + for _, tag := range n.Tags { + reachableIDs["#"+tag] = true + } + } + } + } else { + for _, n := range notes { + reachableIDs[n.ID.String()] = true + for _, tag := range n.Tags { + reachableIDs["#"+tag] = true + } + } + } + + return buildGraph(notes, links, reachableIDs), nil +} diff --git a/internal/note/infra/persistence/pgreadmodel/workspacetree.go b/internal/note/infra/persistence/pgreadmodel/workspacetree.go new file mode 100644 index 00000000..aa733fcd --- /dev/null +++ b/internal/note/infra/persistence/pgreadmodel/workspacetree.go @@ -0,0 +1,165 @@ +package pgreadmodel + +import ( + "context" + "errors" + "sort" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type WorkspaceTree struct { + queries *pgsqlc.Queries +} + +var _ app.GetWorkspaceTreeReadModel = (*WorkspaceTree)(nil) + +func NewWorkspaceTree(queries *pgsqlc.Queries) *WorkspaceTree { + return &WorkspaceTree{queries: queries} +} + +var ProvideWorkspaceTree = NewWorkspaceTree + +func (h *WorkspaceTree) GetWorkspaceTree(ctx context.Context, q *app.GetWorkspaceTree) (*app.WorkspaceTreeFolder, error) { + var rootFolderID uuid.UUID + + if q.RootFolderID != uuid.Nil { + rootFolderID = q.RootFolderID + } else { + rootFolderIDs, err := h.queries.ReadGetRootFolderIDsByWorkspaceID(ctx, q.WorkspaceID) + if err != nil { + return nil, toErr(err) + } + if len(rootFolderIDs) == 0 { + return nil, errs.NewWorkspaceRootFolderNotFound(q.WorkspaceID, pgx.ErrNoRows) + } + rootFolderID = rootFolderIDs[0] + } + + rootFolder, err := h.queries.ReadGetFolderByID(ctx, rootFolderID) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewFolderNotFound(rootFolderID, err) + } + return nil, toErr(err) + } + + var depth *int32 + if q.Depth != 0 { + depth = new(int32(q.Depth)) + } + recursiveFolders, err := h.queries.ReadGetRecursiveFolderByParentID(ctx, &pgsqlc.ReadGetRecursiveFolderByParentIDParams{ + ParentID: rootFolderID, + Depth: depth, + IncludeTrashed: q.IncludeTrashed, + }) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + var folderIDs []uuid.UUID + folderIDs = append(folderIDs, rootFolderID) + folderMap := make(map[uuid.UUID]*pgsqlc.ReadGetRecursiveFolderByParentIDRow) + childrenByParentID := make(map[uuid.UUID][]*pgsqlc.ReadGetRecursiveFolderByParentIDRow) + + for _, folder := range recursiveFolders { + folderIDs = append(folderIDs, folder.ID) + folderMap[folder.ID] = folder + } + + // Precompute parent -> children mapping for O(1) access + for _, folder := range recursiveFolders { + if folder.ParentID != nil { + childrenByParentID[*folder.ParentID] = append(childrenByParentID[*folder.ParentID], folder) + } + } + + // Sort children by name for deterministic ordering + for _, children := range childrenByParentID { + sort.Slice(children, func(i, j int) bool { + return children[i].Name < children[j].Name + }) + } + + allNotes, err := h.queries.ReadGetNotesByFolderIDs(ctx, pgsqlc.ReadGetNotesByFolderIDsParams{ + FolderIds: folderIDs, + ExcludeTrash: !q.IncludeTrashed, + }) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, toErr(err) + } + + notesByFolder := make(map[uuid.UUID][]*pgsqlc.Note) + for _, note := range allNotes { + notesByFolder[note.FolderID] = append(notesByFolder[note.FolderID], note) + } + + tree := h.buildFolderTree( + rootFolder.ID, + rootFolder.Name, + rootFolder.Icon, + rootFolder.UpdatedAt, + childrenByParentID, + notesByFolder, + ) + return tree, nil +} + +func (h *WorkspaceTree) buildFolderTree( + folderID uuid.UUID, + folderName string, + folderIcon *string, + updatedAt time.Time, + childrenByParentID map[uuid.UUID][]*pgsqlc.ReadGetRecursiveFolderByParentIDRow, + notesByFolder map[uuid.UUID][]*pgsqlc.Note, +) *app.WorkspaceTreeFolder { + var icon string + if folderIcon != nil { + icon = *folderIcon + } + result := app.WorkspaceTreeFolder{ + ID: folderID, + Name: folderName, + Icon: icon, + UpdatedAt: updatedAt, + Notes: []*app.WorkspaceTreeNote{}, + Children: []*app.WorkspaceTreeFolder{}, + } + + if notes, ok := notesByFolder[folderID]; ok { + for _, note := range notes { + var noteIcon string + if note.Icon != nil { + noteIcon = *note.Icon + } + result.Notes = append(result.Notes, &app.WorkspaceTreeNote{ + ID: note.ID, + Name: note.Name, + Icon: noteIcon, + UpdatedAt: note.UpdatedAt, + }) + } + } + + // Only iterate through direct children, not the entire folderMap + if children, ok := childrenByParentID[folderID]; ok { + for _, childFolder := range children { + childTree := h.buildFolderTree( + childFolder.ID, + childFolder.Name, + childFolder.Icon, + childFolder.UpdatedAt, + childrenByParentID, + notesByFolder, + ) + result.Children = append(result.Children, childTree) + } + } + + return &result +} diff --git a/internal/note/infra/persistence/pgrepo/common.go b/internal/note/infra/persistence/pgrepo/common.go new file mode 100644 index 00000000..ad6ede17 --- /dev/null +++ b/internal/note/infra/persistence/pgrepo/common.go @@ -0,0 +1,30 @@ +package pgrepo + +import ( + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +func fromDomainTrashedBy(trashedBy domain.TrashedBy) (*string, bool) { + switch trashedBy { + case domain.TrashedByPurpose: + return new(string(pgsqlc.TrashedByPurpose)), true + case domain.TrashedByParent: + return new(string(pgsqlc.TrashedByParent)), true + case domain.TrashedByUnspecified: + return nil, false + default: + return nil, false + } +} + +func toDomainTrashedBy(trashedBy string) domain.TrashedBy { + switch trashedBy { + case string(pgsqlc.TrashedByPurpose): + return domain.TrashedByPurpose + case string(pgsqlc.TrashedByParent): + return domain.TrashedByParent + default: + return domain.TrashedByUnspecified + } +} diff --git a/internal/note/infra/persistence/pg/err.go b/internal/note/infra/persistence/pgrepo/err.go similarity index 93% rename from internal/note/infra/persistence/pg/err.go rename to internal/note/infra/persistence/pgrepo/err.go index a4b6a837..1e220919 100644 --- a/internal/note/infra/persistence/pg/err.go +++ b/internal/note/infra/persistence/pgrepo/err.go @@ -1,4 +1,4 @@ -package pg +package pgrepo import ( "errors" @@ -8,7 +8,7 @@ import ( "github.com/notopia-uit/notopia/internal/note/errs" ) -func toDomainError(err error) errs.Error { +func toErr(err error) error { var pgErr *pgconn.PgError if !errors.As(err, &pgErr) { return errs.NewPersistenceInternal( diff --git a/internal/note/infra/persistence/pgrepo/folder.go b/internal/note/infra/persistence/pgrepo/folder.go new file mode 100644 index 00000000..eb96deb2 --- /dev/null +++ b/internal/note/infra/persistence/pgrepo/folder.go @@ -0,0 +1,303 @@ +package pgrepo + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type Folder struct { + pgxPool *pgxpool.Pool + queries *pgsqlc.Queries + publisher Publisher // This is nil when not in transaction, because we will provide it inside a transaction + runInTx *RunInTx + inTransaction bool +} + +var _ domain.FolderRepo = (*Folder)(nil) + +func NewFolder( + pgxPool *pgxpool.Pool, + queries *pgsqlc.Queries, + publisher Publisher, + runInTx *RunInTx, + inTransaction bool, +) *Folder { + return &Folder{ + pgxPool: pgxPool, + queries: queries, + publisher: publisher, + runInTx: runInTx, + inTransaction: inTransaction, + } +} + +func NewNoTransactionFolder( + pgxPool *pgxpool.Pool, + queries *pgsqlc.Queries, + runInTx *RunInTx, +) *Folder { + return NewFolder(pgxPool, queries, nil, runInTx, false) +} + +var ProvideFolder = NewNoTransactionFolder + +func (f *Folder) GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*domain.Folder, error) { + folder, err := f.queries.GetFolder(ctx, pgsqlc.GetFolderParams{ + ID: id, + ForUpdate: forUpdate, + }) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewFolderNotFound(id, err) + } + return nil, toErr(err) + } + return folderToDomainRepo(folder), nil +} + +func (f *Folder) GetMany(ctx context.Context, params *domain.FolderRepoGetManyParams) ([]*domain.Folder, error) { + var ids *[]uuid.UUID + if len(params.IDs) > 0 { + ids = ¶ms.IDs + } + + var workspaceID *uuid.UUID + if params.WorkspaceID != uuid.Nil { + workspaceID = ¶ms.WorkspaceID + } + + var trashedBy *string + if params.TrashedBy != domain.TrashedByUnspecified { + var ok bool + trashedBy, ok = fromDomainTrashedBy(params.TrashedBy) + if !ok { + return nil, errs.NewPersistenceInvalid(fmt.Sprintf("invalid trashed by value: %v", params.TrashedBy), nil) + } + } + + folders, err := f.queries.GetFolders(ctx, + &pgsqlc.GetFoldersParams{ + IDs: ids, + WorkspaceID: workspaceID, + TrashedBy: trashedBy, + TrashedOnly: params.TrashOnly, + ForUpdate: params.ForUpdate, + }) + if err != nil { + return nil, toErr(err) + } + + result := make([]*domain.Folder, len(folders)) + for i, folder := range folders { + result[i] = folderToDomainRepo(folder) + } + return result, nil +} + +func folderToDomainRepo(folder *pgsqlc.Folder) *domain.Folder { + var icon string + if folder.Icon != nil { + icon = *folder.Icon + } + var trashed *domain.Trashed + if folder.TrashedBy != nil && folder.TrashedAt != nil { + trashed = domain.NewTrashed( + toDomainTrashedBy(*folder.TrashedBy), + *folder.TrashedAt, + ) + } + var parentID uuid.UUID + if folder.ParentID != nil { + parentID = *folder.ParentID + } + return domain.UnmarshalFolder( + folder.ID, + folder.Name, + icon, + folder.WorkspaceID, + domain.NewFolderHierarchy(parentID), + trashed, + false, + ) +} + +func (f *Folder) Save(ctx context.Context, folder *domain.Folder) (cerr error) { + return f.runInTx.Execute(ctx, &runInTxParams{ + pgxPool: f.pgxPool, + queries: f.queries, + publisher: f.publisher, + inTransaction: f.inTransaction, + }, func(params *RunInTxFnParams) error { + queries := params.queries + if folder.Deleted() { + if err := queries.PermanentlyDeleteFolderByID(ctx, folder.ID()); err != nil { + return toErr(err) + } + } else { + var icon *string + if folder.Icon() != "" { + icon = new(folder.Icon()) + } + var parentID *uuid.UUID + if folder.ParentID() != uuid.Nil { + parentID = new(folder.ParentID()) + } + var trashedBy *string + var trashedAt *time.Time + if folder.IsTrashed() { + trashedBy = new(folder.TrashedBy().String()) + trashedAt = new(folder.TrashedAt()) + } + if err := queries.SaveFolder(ctx, &pgsqlc.SaveFolderParams{ + ID: folder.ID(), + Name: folder.Name(), + Icon: icon, + WorkspaceID: folder.WorkspaceID(), + ParentID: parentID, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + TrashedBy: trashedBy, + TrashedAt: trashedAt, + }); err != nil { + return toErr(err) + } + } + for _, event := range folder.PopEvents() { + if err := params.publisher.PublishWorkspaceItem(ctx, event, folder.WorkspaceID()); err != nil { + return errs.NewPersistenceInternal("failed to publish events", err) + } + } + return nil + }) +} + +func (f *Folder) SaveMany(ctx context.Context, folders []*domain.Folder) (cerr error) { + return f.runInTx.Execute(ctx, &runInTxParams{ + pgxPool: f.pgxPool, + queries: f.queries, + publisher: f.publisher, + inTransaction: f.inTransaction, + }, func(params *RunInTxFnParams) error { + var deleteIDs []uuid.UUID + var upsertFolders []*domain.Folder + + for _, folder := range folders { + if folder.Deleted() { + deleteIDs = append(deleteIDs, folder.ID()) + } else { + upsertFolders = append(upsertFolders, folder) + } + } + + if err := f.deleteMany(ctx, params.queries, deleteIDs); err != nil { + return err + } + + if err := f.upsertMany(ctx, params.queries, upsertFolders); err != nil { + return err + } + + for _, folder := range folders { + for _, event := range folder.PopEvents() { + if err := params.publisher.PublishWorkspaceItem(ctx, event, folder.WorkspaceID()); err != nil { + return errs.NewPersistenceInternal("failed to publish events", err) + } + } + } + return nil + }) +} + +func (f *Folder) deleteMany(ctx context.Context, queries *pgsqlc.Queries, deleteIDs []uuid.UUID) error { + if len(deleteIDs) == 0 { + return nil + } + if err := queries.PermanentlyDeleteFoldersByIDs(ctx, deleteIDs); err != nil { + return toErr(err) + } + return nil +} + +func (f *Folder) upsertMany(ctx context.Context, queries *pgsqlc.Queries, folders []*domain.Folder) error { + if err := queries.CreateTempTableFolders(ctx); err != nil { + return toErr(err) + } + saveFolderParams := make([]*pgsqlc.InsertTempFoldersParams, len(folders)) + for i, folder := range folders { + var icon *string + if folder.Icon() != "" { + icon = new(folder.Icon()) + } + var parentID *uuid.UUID + if folder.ParentID() != uuid.Nil { + parentID = new(folder.ParentID()) + } + var trashedBy *string + var trashedAt *time.Time + if folder.IsTrashed() { + trashedBy = new(folder.TrashedBy().String()) + trashedAt = new(folder.TrashedAt()) + } + saveFolderParams[i] = &pgsqlc.InsertTempFoldersParams{ + ID: folder.ID(), + Name: folder.Name(), + Icon: icon, + WorkspaceID: folder.WorkspaceID(), + ParentID: parentID, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + TrashedBy: trashedBy, + TrashedAt: trashedAt, + } + } + affected, err := queries.InsertTempFolders(ctx, saveFolderParams) + if err != nil { + return toErr(err) + } + if affected != int64(len(folders)) { + return errs.NewPersistenceInvalid(fmt.Sprintf("not all folders were inserted into temp table (expected %d, got %d)", len(folders), affected), nil) + } + if err = queries.SaveFromTempFolders(ctx); err != nil { + return toErr(err) + } + return nil +} + +func (f *Folder) AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, error) { + count, err := f.queries.CountFoldersInWorkspaceByIDs(ctx, &pgsqlc.CountFoldersInWorkspaceByIDsParams{ + IDs: ids, + WorkspaceID: workspaceID, + }) + if err != nil { + return false, toErr(err) + } + return count == int64(len(ids)), nil +} + +func (f *Folder) GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, error) { + workspaceID, err := f.queries.GetWorkspaceIDByFolderID(ctx, id) + if err != nil { + return uuid.Nil, toErr(err) + } + return workspaceID, nil +} + +func (f *Folder) GetParentIDs(ctx context.Context, id uuid.UUID, forUpdate bool) ([]uuid.UUID, error) { + parentIDs, err := f.queries.GetParentIDsByFolderID(ctx, pgsqlc.GetParentIDsByFolderIDParams{ + ID: id, + ForUpdate: forUpdate, + }) + if err != nil { + return nil, toErr(err) + } + return parentIDs, nil +} diff --git a/internal/note/infra/persistence/pgrepo/note.go b/internal/note/infra/persistence/pgrepo/note.go new file mode 100644 index 00000000..316e3ff6 --- /dev/null +++ b/internal/note/infra/persistence/pgrepo/note.go @@ -0,0 +1,380 @@ +package pgrepo + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +// TODO: what, it has so many fmt error? we have to map to toErr only +type Note struct { + pgxPool *pgxpool.Pool + queries *pgsqlc.Queries + publisher Publisher + runInTx *RunInTx + inTransaction bool +} + +var _ domain.NoteRepo = (*Note)(nil) + +func NewNote( + pgxPool *pgxpool.Pool, + queries *pgsqlc.Queries, + publisher Publisher, + runInTx *RunInTx, + inTransaction bool, +) *Note { + return &Note{ + pgxPool: pgxPool, + queries: queries, + publisher: publisher, + runInTx: runInTx, + inTransaction: inTransaction, + } +} + +func NewNoTransactionNote( + pgxPool *pgxpool.Pool, + queries *pgsqlc.Queries, + runInTx *RunInTx, +) *Note { + return NewNote(pgxPool, queries, nil, runInTx, false) +} + +var ProvideNote = NewNoTransactionNote + +func (n *Note) GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*domain.Note, error) { + note, err := n.queries.GetNoteByID(ctx, pgsqlc.GetNoteByIDParams{ + ID: id, + ForUpdate: forUpdate, + }) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewNoteNotFound(id, err) + } + return nil, toErr(err) + } + + links, err := n.queries.GetNoteOutgoingLinks(ctx, id) + if err != nil { + return nil, toErr(err) + } + + return noteToDomainRepo(note, links), nil +} + +func (n *Note) GetMany(ctx context.Context, params *domain.NoteRepoGetManyParams) ([]*domain.Note, error) { + var ids *[]uuid.UUID + if len(params.IDs) > 0 { + ids = ¶ms.IDs + } + + var workspaceID *uuid.UUID + if params.WorkspaceID != uuid.Nil { + workspaceID = ¶ms.WorkspaceID + } + + var trashedBy *string + if params.TrashedBy != domain.TrashedByUnspecified { + var ok bool + trashedBy, ok = fromDomainTrashedBy(params.TrashedBy) + if !ok { + return nil, errs.NewPersistenceInvalid(fmt.Sprintf("invalid trashed by value: %v", params.TrashedBy), nil) + } + } + + notes, err := n.queries.GetNotes(ctx, &pgsqlc.GetNotesParams{ + IDs: ids, + WorkspaceID: workspaceID, + TrashedBy: trashedBy, + TrashedOnly: params.TrashOnly, + ForUpdate: params.ForUpdate, + }) + if err != nil { + return nil, toErr(err) + } + + // FIXME: Hey, this is N+1 query + // Query links per note (type-safe approach) + noteIDs := make([]uuid.UUID, len(notes)) + for i, note := range notes { + noteIDs[i] = note.ID + } + noteIDOutgoingLinksPairs, err := n.queries.GetNotesOutgoingLinks(ctx, noteIDs) + if err != nil { + return nil, toErr(err) + } + noteIDOutgoingLinksMap := make(map[uuid.UUID][]uuid.UUID, len(noteIDOutgoingLinksPairs)) + for _, pair := range noteIDOutgoingLinksPairs { + targetIDs, ok := pair.TargetIDs.([]uuid.UUID) + if !ok { + return nil, errs.NewPersistenceInvalid(fmt.Sprintf("invalid type for target ids: %T", pair.TargetIDs), nil) + } + noteIDOutgoingLinksMap[pair.SourceID] = targetIDs + } + result := make([]*domain.Note, len(notes)) + for i, note := range notes { + links := noteIDOutgoingLinksMap[note.ID] + result[i] = noteToDomainRepo(note, links) + } + return result, nil +} + +func noteToDomainRepo(note *pgsqlc.Note, links []uuid.UUID) *domain.Note { + var icon string + if note.Icon != nil { + icon = *note.Icon + } + var trashed *domain.Trashed + if note.TrashedBy != nil && note.TrashedAt != nil { + trashed = domain.NewTrashed( + toDomainTrashedBy(*note.TrashedBy), + *note.TrashedAt, + ) + } + return domain.UnmarshalNote( + note.ID, + note.Name, + icon, + note.Tags, + uint64(note.Size), + note.FolderID, + links, + trashed, + false, + ) +} + +// TODO: It doesn't save the outgoing links +func (n *Note) Save(ctx context.Context, note *domain.Note) error { + return n.runInTx.Execute(ctx, &runInTxParams{ + pgxPool: n.pgxPool, + queries: n.queries, + publisher: n.publisher, + inTransaction: n.inTransaction, + }, func(params *RunInTxFnParams) error { + queries := params.queries + if note.Deleted() { + if err := queries.PermanentlyDeleteNoteByID(ctx, note.ID()); err != nil { + return toErr(err) + } + } else { + var icon *string + if note.Icon() != "" { + icon = new(note.Icon()) + } + var trashedBy *string + var trashedAt *time.Time + if note.IsTrashed() { + by := note.TrashedBy().String() + trashedBy = &by + t := note.TrashedAt() + trashedAt = &t + } + err := queries.SaveNote(ctx, &pgsqlc.SaveNoteParams{ + ID: note.ID(), + Name: note.Name(), + Icon: icon, + FolderID: note.FolderID(), + Tags: note.Tags(), + Size: int32(note.Size()), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + TrashedBy: trashedBy, + TrashedAt: trashedAt, + }) + if err != nil { + return toErr(err) + } + if err := queries.CreateTempTableNoteLinks(ctx); err != nil { + return toErr(err) + } + saveNoteLinkParams := make([]*pgsqlc.InsertTempNoteLinksParams, len(note.OutgoingLinks())) + for i, targetID := range note.OutgoingLinks() { + saveNoteLinkParams[i] = &pgsqlc.InsertTempNoteLinksParams{ + SourceID: note.ID(), + TargetID: targetID, + } + } + affected, err := queries.InsertTempNoteLinks(ctx, saveNoteLinkParams) + if err != nil { + return toErr(err) + } + if affected != int64(len(note.OutgoingLinks())) { + return errs.NewPersistenceInvalid("not all note links were inserted into temp table", nil) + } + if err := queries.DeleteObsoleteNoteLinks(ctx); err != nil { + return toErr(err) + } + if err := queries.SaveFromTempNoteLinks(ctx); err != nil { + return toErr(err) + } + } + workspaceID, err := queries.GetWorkspaceIDByNoteID(ctx, note.ID()) + if err != nil { + return toErr(err) + } + for _, event := range note.PopEvents() { + if err := params.publisher.PublishWorkspaceItem(ctx, event, workspaceID); err != nil { + return errs.NewPersistenceInternal("failed to publish events", err) + } + } + return nil + }) +} + +// TODO: It doesn't save the outgoing links +func (n *Note) SaveMany(ctx context.Context, notes []*domain.Note) error { + return n.runInTx.Execute(ctx, &runInTxParams{ + pgxPool: n.pgxPool, + queries: n.queries, + publisher: n.publisher, + inTransaction: n.inTransaction, + }, func(params *RunInTxFnParams) error { + var deleteIDs []uuid.UUID + var upsertNotes []*domain.Note + var allOutgoingLinks []*pgsqlc.InsertTempNoteLinksParams + + for _, note := range notes { + if note.Deleted() { + deleteIDs = append(deleteIDs, note.ID()) + } else { + upsertNotes = append(upsertNotes, note) + for _, targetID := range note.OutgoingLinks() { + allOutgoingLinks = append(allOutgoingLinks, &pgsqlc.InsertTempNoteLinksParams{ + SourceID: note.ID(), + TargetID: targetID, + }) + } + } + } + + if err := n.deleteMany(ctx, params.queries, deleteIDs); err != nil { + return err + } + + if err := n.upsertMany(ctx, params.queries, upsertNotes, allOutgoingLinks); err != nil { + return err + } + + noteIDs := make([]uuid.UUID, len(notes)) + for i, note := range notes { + noteIDs[i] = note.ID() + } + noteIDworkspaceIDPairs, err := params.queries.GetWorkspaceIDsByNoteIDs(ctx, noteIDs) + noteIDWorkspaceIDMap := make(map[uuid.UUID]uuid.UUID, len(noteIDworkspaceIDPairs)) + for _, pair := range noteIDworkspaceIDPairs { + noteIDWorkspaceIDMap[pair.ID] = pair.WorkspaceID + } + if err != nil { + return toErr(err) + } + for _, note := range notes { + workspaceID, ok := noteIDWorkspaceIDMap[note.ID()] + if !ok { + return errs.NewPersistenceInvalid(fmt.Sprintf("failed to find workspace id for note id %s", note.ID()), nil) + } + for _, event := range note.PopEvents() { + if err := params.publisher.PublishWorkspaceItem(ctx, event, workspaceID); err != nil { + return errs.NewPersistenceInternal("failed to publish events", err) + } + } + } + return nil + }) +} + +func (n *Note) deleteMany(ctx context.Context, queries *pgsqlc.Queries, deleteIDs []uuid.UUID) error { + if len(deleteIDs) == 0 { + return nil + } + if err := queries.PermanentlyDeleteNotesByIDs(ctx, deleteIDs); err != nil { + return toErr(err) + } + return nil +} + +func (n *Note) upsertMany(ctx context.Context, queries *pgsqlc.Queries, upsertNotes []*domain.Note, allOutgoingLinks []*pgsqlc.InsertTempNoteLinksParams) error { + if len(upsertNotes) == 0 { + return nil + } + if err := queries.CreateTempTableNotes(ctx); err != nil { + return toErr(err) + } + + saveNoteParams := make([]*pgsqlc.InsertTempNotesParams, len(upsertNotes)) + for i, note := range upsertNotes { + var icon *string + if note.Icon() != "" { + icon = new(note.Icon()) + } + var trashedBy *string + var trashedAt *time.Time + if note.IsTrashed() { + trashedBy = new(note.TrashedBy().String()) + trashedAt = new(note.TrashedAt()) + } + saveNoteParams[i] = &pgsqlc.InsertTempNotesParams{ + ID: note.ID(), + Name: note.Name(), + Icon: icon, + FolderID: note.FolderID(), + Tags: note.Tags(), + Size: int32(note.Size()), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + TrashedBy: trashedBy, + TrashedAt: trashedAt, + } + } + + if _, err := queries.InsertTempNotes(ctx, saveNoteParams); err != nil { + return toErr(err) + } + if err := queries.SaveFromTempNotes(ctx); err != nil { + return toErr(err) + } + + if len(allOutgoingLinks) > 0 { + if err := queries.CreateTempTableNoteLinks(ctx); err != nil { + return toErr(err) + } + if _, err := queries.InsertTempNoteLinks(ctx, allOutgoingLinks); err != nil { + return toErr(err) + } + if err := queries.DeleteObsoleteNoteLinks(ctx); err != nil { + return toErr(err) + } + if err := queries.SaveFromTempNoteLinks(ctx); err != nil { + return toErr(err) + } + } + return nil +} + +func (n *Note) AreAllInWorkspace(ctx context.Context, ids []uuid.UUID, workspaceID uuid.UUID) (bool, error) { + count, err := n.queries.CountNotesInWorkspaceByIDs(ctx, &pgsqlc.CountNotesInWorkspaceByIDsParams{ + IDs: ids, + WorkspaceID: workspaceID, + }) + if err != nil { + return false, toErr(err) + } + return count == int64(len(ids)), nil +} + +func (n *Note) GetWorkspaceIDByID(ctx context.Context, id uuid.UUID) (uuid.UUID, error) { + workspaceID, err := n.queries.GetWorkspaceIDByNoteID(ctx, id) + if err != nil { + return uuid.Nil, toErr(err) + } + return workspaceID, nil +} diff --git a/internal/note/infra/persistence/pgrepo/publisher.go b/internal/note/infra/persistence/pgrepo/publisher.go new file mode 100644 index 00000000..ec8abefc --- /dev/null +++ b/internal/note/infra/persistence/pgrepo/publisher.go @@ -0,0 +1,18 @@ +package pgrepo + +import ( + "context" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/notopia-uit/notopia/internal/note/domain" +) + +type Publisher interface { + PublishWorkspaceItem(ctx context.Context, event domain.Event, workspaceID uuid.UUID) error + Publish(ctx context.Context, event domain.Event) error +} + +type PublisherFactory interface { + Create(pgxTx pgx.Tx) (Publisher, error) +} diff --git a/internal/note/infra/persistence/pgrepo/runintx.go b/internal/note/infra/persistence/pgrepo/runintx.go new file mode 100644 index 00000000..13056c02 --- /dev/null +++ b/internal/note/infra/persistence/pgrepo/runintx.go @@ -0,0 +1,92 @@ +package pgrepo + +import ( + "context" + "log/slog" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type runInTxParams struct { + pgxPool *pgxpool.Pool // Required when not in transaction + queries *pgsqlc.Queries // Required when in transaction (should be tx-backed) + publisher Publisher // Required when in transaction (should be tx-backed) + inTransaction bool // Indicates if we're already in a transaction +} + +type RunInTxFnParams struct { + queries *pgsqlc.Queries + publisher Publisher +} + +type RunInTx struct { + publisherFactory PublisherFactory +} + +func NewRunInTx(publisherFactory PublisherFactory) *RunInTx { + return &RunInTx{ + publisherFactory: publisherFactory, + } +} + +var ProvideRunInTx = NewRunInTx + +func (r *RunInTx) Execute( + ctx context.Context, + params *runInTxParams, + fn func(params *RunInTxFnParams) error, +) error { + if params.inTransaction { + return fn(&RunInTxFnParams{ + queries: params.queries, + publisher: params.publisher, + }) + } + + tx, err := params.pgxPool.Begin(ctx) + if err != nil { + return errs.NewPersistenceInternal("failed to begin transaction in RunInTx", err) + } + + defer func() { + if p := recover(); p != nil { + if err = tx.Rollback(ctx); err != nil { + slog.ErrorContext( + ctx, "failed to rollback transaction after panic in RunInTx", + slog.Any("error", err), + ) + } + panic(p) + } else if err != nil { + if err = tx.Rollback(ctx); err != nil { + slog.ErrorContext( + ctx, "failed to rollback transaction after error in RunInTx", + slog.Any("error", err), + ) + } + } + }() + + queries := pgsqlc.New(tx) + publisher, err := r.publisherFactory.Create(tx) + if err != nil { + return errs.NewPersistenceInternal("failed to create publisher in RunInTx", err) + } + + fnParams := &RunInTxFnParams{ + queries: queries, + publisher: publisher, + } + + if err := fn(fnParams); err != nil { + return err + } + + if err := tx.Commit(ctx); err != nil { + return errs.NewPersistenceInternal("failed to commit transaction in RunInTx", err) + } + + return nil +} diff --git a/internal/note/infra/persistence/pgrepo/uow.go b/internal/note/infra/persistence/pgrepo/uow.go new file mode 100644 index 00000000..6d1325bb --- /dev/null +++ b/internal/note/infra/persistence/pgrepo/uow.go @@ -0,0 +1,128 @@ +package pgrepo + +import ( + "context" + "log/slog" + "sync" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type RepoRegistry struct { + uow *UnitOfWork + txQueries *pgsqlc.Queries + publisher Publisher + runInTx *RunInTx + + workspace domain.WorkspaceRepo + folder domain.FolderRepo + note domain.NoteRepo + + wsOnce sync.Once + folderOnce sync.Once + noteOnce sync.Once +} + +var _ domain.RepoRegistry = (*RepoRegistry)(nil) + +func (r *RepoRegistry) Workspace() domain.WorkspaceRepo { + r.wsOnce.Do(func() { + r.workspace = NewWorkspace(nil, r.txQueries, r.publisher, r.runInTx, true) + }) + return r.workspace +} + +func (r *RepoRegistry) Folder() domain.FolderRepo { + r.folderOnce.Do(func() { + r.folder = NewFolder(nil, r.txQueries, r.publisher, r.runInTx, true) + }) + return r.folder +} + +func (r *RepoRegistry) Note() domain.NoteRepo { + r.noteOnce.Do(func() { + r.note = NewNote(nil, r.txQueries, r.publisher, r.runInTx, true) + }) + return r.note +} + +type UnitOfWork struct { + pool *pgxpool.Pool + publisherFactory PublisherFactory + runInTx *RunInTx +} + +var _ domain.UnitOfWork = (*UnitOfWork)(nil) + +func NewUnitOfWork( + pool *pgxpool.Pool, + publisherFactory PublisherFactory, + runInTx *RunInTx, +) *UnitOfWork { + return &UnitOfWork{ + pool: pool, + publisherFactory: publisherFactory, + runInTx: runInTx, + } +} + +var ProvideUnitOfWork = NewUnitOfWork + +// NOTE: an AI said about chaining error is not a good idea? +func (u *UnitOfWork) Execute( + ctx context.Context, + fn func(repoRegistry domain.RepoRegistry) error, +) error { + tx, err := u.pool.Begin(ctx) + if err != nil { + return errs.NewPersistenceInternal("failed to begin transaction in RunInTx", err) + } + defer func() { + if p := recover(); p != nil { + if err = tx.Rollback(ctx); err != nil { + slog.ErrorContext( + ctx, "failed to rollback transaction after panic in unit of work", + slog.Any("error", err), + ) + } + panic(p) + } else if err != nil { + if err = tx.Rollback(ctx); err != nil { + slog.ErrorContext( + ctx, "failed to rollback transaction after error in unit of work", + slog.Any("error", err), + ) + } + } + }() + + txQueries := pgsqlc.New(tx) + publisher, err := u.publisherFactory.Create(tx) + if err != nil { + return errs.NewPersistenceInternal("failed to create publisher in unit of work", err) + } + repoRegistry := &RepoRegistry{ + uow: u, + runInTx: u.runInTx, + txQueries: txQueries, + publisher: publisher, + workspace: nil, + folder: nil, + note: nil, + wsOnce: sync.Once{}, + folderOnce: sync.Once{}, + noteOnce: sync.Once{}, + } + + if err := fn(repoRegistry); err != nil { + return err + } + + if err := tx.Commit(ctx); err != nil { + return errs.NewPersistenceInternal("failed to commit transaction in unit of work", err) + } + return nil +} diff --git a/internal/note/infra/persistence/pgrepo/workspace.go b/internal/note/infra/persistence/pgrepo/workspace.go new file mode 100644 index 00000000..cd2cfb18 --- /dev/null +++ b/internal/note/infra/persistence/pgrepo/workspace.go @@ -0,0 +1,160 @@ +package pgrepo + +import ( + "context" + "errors" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/notopia-uit/notopia/internal/note/domain" + "github.com/notopia-uit/notopia/internal/note/errs" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" +) + +type Workspace struct { + pgxPool *pgxpool.Pool + queries *pgsqlc.Queries + publisher Publisher + runInTx *RunInTx + inTransaction bool +} + +var _ domain.WorkspaceRepo = (*Workspace)(nil) + +func NewWorkspace( + pgxPool *pgxpool.Pool, + queries *pgsqlc.Queries, + publisher Publisher, + runInTx *RunInTx, + inTransaction bool, +) *Workspace { + return &Workspace{ + pgxPool: pgxPool, + queries: queries, + publisher: publisher, + runInTx: runInTx, + inTransaction: inTransaction, + } +} + +func NewNoTransactionWorkspace( + pgxPool *pgxpool.Pool, + queries *pgsqlc.Queries, + runInTx *RunInTx, +) *Workspace { + return NewWorkspace(pgxPool, queries, nil, runInTx, false) +} + +var ProvideWorkspace = NewNoTransactionWorkspace + +func (w *Workspace) GetBySlug(ctx context.Context, slug string, forUpdate bool) (*domain.Workspace, error) { + workspace, err := w.queries.GetWorkspaceBySlug(ctx, pgsqlc.GetWorkspaceBySlugParams{ + Slug: slug, + ForUpdate: forUpdate, + }) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewWorkspaceBySlugNotFound(slug, err) + } + return nil, toErr(err) + } + + folders, err := w.queries.GetFoldersByWorkspaceID(ctx, pgsqlc.GetFoldersByWorkspaceIDParams{ + WorkspaceID: workspace.ID, + ForUpdate: forUpdate, + }) + if err != nil { + return nil, toErr(err) + } + + if len(folders) == 0 { + return nil, errs.NewWorkspaceRootFolderNotFound(workspace.ID, pgx.ErrNoRows) + } + + return workspaceToDomainRepo(workspace, folders[0].ID) +} + +func (w *Workspace) GetByID(ctx context.Context, id uuid.UUID, forUpdate bool) (*domain.Workspace, error) { + workspace, err := w.queries.GetWorkspaceByID(ctx, pgsqlc.GetWorkspaceByIDParams{ + ID: id, + ForUpdate: forUpdate, + }, + ) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewWorkspaceNotFound(id, err) + } + return nil, toErr(err) + } + + folders, err := w.queries.GetFoldersByWorkspaceID(ctx, pgsqlc.GetFoldersByWorkspaceIDParams{ + WorkspaceID: workspace.ID, + ForUpdate: forUpdate, + }) + if err != nil { + return nil, toErr(err) + } + + if len(folders) == 0 { + return nil, errs.NewWorkspaceRootFolderNotFound(id, pgx.ErrNoRows) + } + + return workspaceToDomainRepo(workspace, folders[0].ID) +} + +func (w *Workspace) GetIDBySlug(ctx context.Context, slug string) (*uuid.UUID, error) { + result, err := w.queries.GetWorkspaceIDBySlug(ctx, slug) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errs.NewWorkspaceBySlugNotFound(slug, err) + } + return nil, toErr(err) + } + return &result, nil +} + +func (w *Workspace) CheckSlugExists(ctx context.Context, slug string) (bool, error) { + result, err := w.queries.CheckSlugExists(ctx, slug) + if err != nil { + return false, toErr(err) + } + return result, nil +} + +func (w *Workspace) Save(ctx context.Context, workspace *domain.Workspace) (cerr error) { + return w.runInTx.Execute(ctx, &runInTxParams{ + pgxPool: w.pgxPool, + queries: w.queries, + publisher: w.publisher, + inTransaction: w.inTransaction, + }, func(params *RunInTxFnParams) error { + err := params.queries.SaveWorkspace(ctx, &pgsqlc.SaveWorkspaceParams{ + ID: workspace.ID(), + Slug: workspace.Slug(), + Name: workspace.Name(), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + DeletedAt: workspace.DeletedAt(), + }) + if err != nil { + return toErr(err) + } + for _, event := range workspace.PopEvents() { + if err := params.publisher.Publish(ctx, event); err != nil { + return errs.NewPersistenceInternal("failed to publish events", err) + } + } + return nil + }) +} + +func workspaceToDomainRepo(workspace *pgsqlc.Workspace, rootFolderID uuid.UUID) (*domain.Workspace, error) { + return domain.NewWorkspace( + workspace.ID, + workspace.Name, + workspace.Slug, + rootFolderID, + ) +} diff --git a/internal/note/infra/persistence/pgsqlc/dynfilter.go b/internal/note/infra/persistence/pgsqlc/dynfilter.go new file mode 100644 index 00000000..dab3e8bc --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/dynfilter.go @@ -0,0 +1,379 @@ +// Code generated by sqlc. DO NOT EDIT. + +package pgsqlc + +import ( + "reflect" + "strings" +) + +// dynCompiledSeg is one segment of a pre-compiled dynamic SQL query. +type dynCompiledSeg struct { + // parts splits the segment text at each $N placeholder: + // parts[0] + "$" + parts[1] + "$" + ... + parts[K] + // len(parts) == len(argNums)+1 + parts []string + // argNums are the 1-based $N SQL parameter numbers in order of appearance. + // args[argNums[i]-1] provides the value for the i-th placeholder. + argNums []int + // condIdxs are 0-based indices into the args slice passed to Build. + // All referenced args must be "active" (non-nil pointer / true bool) + // for this segment to be included. Empty means always include. + condIdxs []int +} + +// dynCompiledQuery is a pre-parsed dynamic SQL query. +// Create one at package level with dynCompile and reuse it across calls. +type dynCompiledQuery struct { + segs []dynCompiledSeg +} + +// dynCompile parses an annotated SQL string (containing -- :if $N markers) +// into a dynCompiledQuery. Call this once at package initialisation time +// and store the result in a package-level variable. +func dynCompile(annotatedSQL string) *dynCompiledQuery { + var segs []dynCompiledSeg + var staticBuf strings.Builder + + flushStatic := func() { + if staticBuf.Len() == 0 { + return + } + parts, argNums := dynSplitPlaceholders(staticBuf.String()) + segs = append(segs, dynCompiledSeg{parts: parts, argNums: argNums}) + staticBuf.Reset() + } + + rest := annotatedSQL + firstLine := true + + for rest != "" { + var line string + if i := strings.IndexByte(rest, '\n'); i >= 0 { + line, rest = rest[:i], rest[i+1:] + } else { + line, rest = rest, "" + } + + sep := "\n" + if firstLine { + sep = "" + firstLine = false + } + + trimmed := strings.TrimSpace(line) + + // Standalone block annotation: "-- :if $N" โ€” the next line is conditional. + if after, ok := strings.CutPrefix(trimmed, "-- :if $"); ok && after != "" { + n := dynParseInt(after, len(after)) + if n > 0 { + flushStatic() + condIdx := n - 1 + // Read and process next line. + if rest != "" { + var nextLine string + if i := strings.IndexByte(rest, '\n'); i >= 0 { + nextLine, rest = rest[:i], rest[i+1:] + } else { + nextLine, rest = rest, "" + } + condIdxs, cleaned := dynExtractCondIdxs(nextLine) + condIdxs = append([]int{condIdx}, condIdxs...) + parts, argNums := dynSplitPlaceholders("\n" + cleaned) + segs = append(segs, dynCompiledSeg{ + parts: parts, + argNums: argNums, + condIdxs: condIdxs, + }) + } + continue + } + } + + // Inline annotation(s): "text -- :if $N [-- :if $M ...]" + if condIdxs, cleaned := dynExtractCondIdxs(line); len(condIdxs) > 0 { + flushStatic() + parts, argNums := dynSplitPlaceholders(sep + cleaned) + segs = append(segs, dynCompiledSeg{ + parts: parts, + argNums: argNums, + condIdxs: condIdxs, + }) + continue + } + + // Unconditional line: accumulate into the static buffer. + staticBuf.WriteString(sep + line) + } + + flushStatic() + return &dynCompiledQuery{segs: segs} +} + +// Build applies the pre-compiled filter to args and returns the final SQL +// and the trimmed args slice. The method is safe to call concurrently. +// +// When the same original $N appears more than once in the active segments +// (e.g. both "name = $1" and "email = $1"), the returned SQL reuses the +// same renumbered placeholder and the arg is included only once. +func (q *dynCompiledQuery) Build(args []any) (string, []any) { + var b strings.Builder + var outArgs []any + n := 1 + argIdxToN := make(map[int]int) // original argIdx (0-based) -> output $N + + for _, seg := range q.segs { + // Check all conditions. + active := true + for _, idx := range seg.condIdxs { + if idx >= len(args) || !dynArgActive(args[idx]) { + active = false + break + } + } + if !active { + continue + } + + // Write text parts interleaved with sequential $N placeholders. + for i, part := range seg.parts { + b.WriteString(part) + if i < len(seg.argNums) { + argIdx := seg.argNums[i] - 1 + if existing, ok := argIdxToN[argIdx]; ok { + // Same original param already emitted โ€” reuse its placeholder. + b.WriteByte('$') + dynWriteInt(&b, existing) + } else { + b.WriteByte('$') + dynWriteInt(&b, n) + argIdxToN[argIdx] = n + n++ + if argIdx >= 0 && argIdx < len(args) { + outArgs = append(outArgs, args[argIdx]) + } + } + } + } + } + + return dynFinalizeQuery(b.String()), outArgs +} + +// dynExtractCondIdxs extracts all " -- :if $N" annotations from line, +// returning the 0-based condition indices and the cleaned line text. +func dynExtractCondIdxs(line string) (condIdxs []int, cleaned string) { + idx := strings.Index(line, " -- :if $") + if idx == -1 { + return nil, line + } + cleaned = strings.TrimRight(line[:idx], " \t") + tail := line[idx:] + for { + i := strings.Index(tail, " -- :if $") + if i == -1 { + break + } + numStart := i + 9 // len(" -- :if $") + numEnd := numStart + for numEnd < len(tail) && tail[numEnd] >= '0' && tail[numEnd] <= '9' { + numEnd++ + } + if numEnd > numStart { + if n := dynParseInt(tail[numStart:numEnd], numEnd-numStart); n > 0 { + condIdxs = append(condIdxs, n-1) + } + } + tail = tail[i+1:] + } + return condIdxs, cleaned +} + +// dynSplitPlaceholders splits text at $N placeholder boundaries. +// Returns parts (len = len(argNums)+1) and 1-based argNums. +func dynSplitPlaceholders(text string) (parts []string, argNums []int) { + // Fast path: scan without a Builder when every '$' is followed by digits. + // Fall back to a Builder only when a bare '$' (not followed by a digit) is found. + var buf *strings.Builder + for { + i := strings.IndexByte(text, '$') + if i == -1 { + if buf != nil { + buf.WriteString(text) + } + break + } + j := i + 1 + for j < len(text) && text[j] >= '0' && text[j] <= '9' { + j++ + } + if j == i+1 { + // '$' not followed by digit; switch to Builder to preserve the text. + if buf == nil { + buf = &strings.Builder{} + } + buf.WriteString(text[:i+1]) + text = text[i+1:] + continue + } + n := dynParseInt(text[i+1:j], j-i-1) + if n <= 0 { + if buf != nil { + buf.WriteString(text[:j]) + } + text = text[j:] + continue + } + if buf != nil { + buf.WriteString(text[:i]) + parts = append(parts, buf.String()) + buf.Reset() + } else { + parts = append(parts, text[:i]) + } + argNums = append(argNums, n) + text = text[j:] + } + if buf != nil { + parts = append(parts, buf.String()) + } else { + parts = append(parts, text) + } + return parts, argNums +} + +// dynFinalizeQuery cleans up the output SQL after conditional lines have been +// removed. It loops until stable, handling cascading cases such as: +// - trailing comma on the new last line โ†’ strip the comma +// - orphaned clause keyword (WHERE, ORDER BY, GROUP BY, HAVING) โ†’ remove it +func dynFinalizeQuery(query string) string { + for { + // Find the end of the last non-empty line. + end := len(query) + for end > 0 && (query[end-1] == ' ' || query[end-1] == '\t' || query[end-1] == '\n') { + end-- + } + if end == 0 { + return query + } + // Find start of last non-empty line. + start := end + for start > 0 && query[start-1] != '\n' { + start-- + } + trimmed := strings.TrimSpace(query[start:end]) + + // Strip trailing comma. + if len(trimmed) > 0 && trimmed[len(trimmed)-1] == ',' { + commaPos := strings.LastIndexByte(query[start:end], ',') + if commaPos >= 0 { + query = query[:start+commaPos] + query[start+commaPos+1:] + } + continue // re-evaluate after removal + } + + // Remove orphaned SQL clause keywords. + if strings.EqualFold(trimmed, "ORDER BY") || + strings.EqualFold(trimmed, "WHERE") || + strings.EqualFold(trimmed, "GROUP BY") || + strings.EqualFold(trimmed, "HAVING") { + newEnd := start + for newEnd > 0 && (query[newEnd-1] == '\n' || query[newEnd-1] == ' ' || query[newEnd-1] == '\t') { + newEnd-- + } + query = query[:newEnd] + continue // re-evaluate after removal + } + + break // nothing changed + } + return query +} + +// DynSQL is a pre-compiled dynamic SQL query. +// Use CompileDynSQL to create one at package level for maximum performance. +type DynSQL = dynCompiledQuery + +// CompileDynSQL parses an annotated SQL string (containing -- :if $N markers) +// once and returns a *DynSQL whose Build method can be called repeatedly +// with no per-call string scanning. +func CompileDynSQL(annotatedSQL string) *DynSQL { + return dynCompile(annotatedSQL) +} + +// DynamicSQL processes a SQL query with -- :if $N annotations at runtime. +// Prefer dynCompile + Build for repeated calls; DynamicSQL is provided for +// one-off use and backward compatibility. +// +// For each annotated line, it checks args[N-1] (N is 1-based, matching $N): +// - nil pointer โ†’ skip the line +// - false bool โ†’ skip the line +// - otherwise โ†’ keep the line +// +// After filtering, remaining $N placeholders are renumbered sequentially and +// the args slice is trimmed to match, so the query is always valid. +func DynamicSQL(query string, args []any) (string, []any) { + return dynCompile(query).Build(args) +} + +// dynParseInt parses a non-negative integer from s[0:length] without allocations. +// Returns -1 if the string contains non-digit characters. +func dynParseInt(s string, length int) int { + n := 0 + for i := 0; i < length; i++ { + c := s[i] + if c < '0' || c > '9' { + return -1 + } + n = n*10 + int(c-'0') + } + return n +} + +// dynWriteInt writes a positive integer to buf without allocations for values < 10. +func dynWriteInt(buf *strings.Builder, n int) { + if n < 10 { + buf.WriteByte(byte('0' + n)) + } else if n < 100 { + buf.WriteByte(byte('0' + n/10)) + buf.WriteByte(byte('0' + n%10)) + } else { + var tmp [20]byte + i := len(tmp) + for n > 0 { + i-- + tmp[i] = byte('0' + n%10) + n /= 10 + } + buf.Write(tmp[i:]) + } +} + +func dynArgActive(arg any) bool { + if arg == nil { + return false + } + switch v := arg.(type) { + case bool: + return v + case *string: + return v != nil + case *int: + return v != nil + case *int32: + return v != nil + case *int64: + return v != nil + case *float64: + return v != nil + case *bool: + return v != nil + } + v := reflect.ValueOf(arg) + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return !v.IsNil() + default: + return true + } +} diff --git a/internal/note/infra/persistence/pgsqlc/folder.sql b/internal/note/infra/persistence/pgsqlc/folder.sql index 03d8db47..4979d408 100644 --- a/internal/note/infra/persistence/pgsqlc/folder.sql +++ b/internal/note/infra/persistence/pgsqlc/folder.sql @@ -90,128 +90,73 @@ SELECT FROM folders WHERE - CASE - WHEN sqlc.narg('id')::uuid IS NOT NULL - THEN id = sqlc.narg('id')::uuid - ELSE TRUE - END - AND CASE - WHEN sqlc.narg('workspace_id')::uuid IS NOT NULL - THEN workspace_id = sqlc.narg('workspace_id')::uuid - ELSE TRUE - END - AND CASE - WHEN sqlc.narg('parent_id')::uuid IS NOT NULL - THEN parent_id = sqlc.narg('parent_id')::uuid - ELSE TRUE - END - AND CASE - WHEN sqlc.arg('is_root_folder')::bool = TRUE - THEN parent_id IS NULL - ELSE TRUE - END - AND CASE - WHEN sqlc.arg('trashed_by')::text <> '' - THEN trashed_by = sqlc.arg('trashed_by')::text - ELSE TRUE - END - AND CASE - WHEN sqlc.arg('include_trashed')::bool = FALSE - THEN trashed_by IS NULL - ELSE TRUE - END -ORDER BY - created_at DESC; + id = sqlc.arg('id') +FOR UPDATE -- :if @for_update +; --- name: GetFolders :many +-- name: GetFoldersByWorkspaceID :many SELECT * FROM folders WHERE - CASE - WHEN sqlc.narg('ids')::uuid[] IS NOT NULL - THEN id = ANY(sqlc.narg('ids')::uuid[]) - ELSE TRUE - END - AND CASE - WHEN sqlc.narg('workspace_id')::uuid IS NOT NULL - THEN workspace_id = sqlc.narg('workspace_id')::uuid - ELSE TRUE - END - AND CASE - WHEN sqlc.narg('parent_id')::uuid IS NOT NULL - THEN parent_id = sqlc.narg('parent_id')::uuid - ELSE TRUE - END - AND CASE - WHEN sqlc.arg('is_root_folder')::bool = TRUE - THEN parent_id IS NULL - ELSE TRUE - END - AND CASE - WHEN sqlc.narg('trashed_by')::text IS NOT NULL - THEN trashed_by = sqlc.narg('trashed_by')::text - ELSE TRUE - END - AND CASE - WHEN sqlc.arg('include_trashed')::bool = FALSE - THEN trashed_by IS NULL - ELSE TRUE - END + workspace_id = sqlc.arg('workspace_id') ORDER BY - created_at DESC; + created_at DESC +FOR UPDATE -- :if @for_update +; --- name: GetWorkspaceIDByFolderID :one -SELECT - workspace_id -FROM - folders -WHERE - id = sqlc.arg('id'); - --- name: GetRootFolderIDsByWorkspaceID :many +-- name: GetFolders :many SELECT - id + * FROM folders WHERE - workspace_id = sqlc.arg('workspace_id') - AND parent_id IS NULL; + id = ANY(sqlc.narg('ids')::uuid[]) -- :if @ids + AND workspace_id = sqlc.narg('workspace_id')::uuid -- :if @workspace_id + AND ( -- :if @trashed_by + trashed_by = sqlc.narg('trashed_by')::text + OR trashed_by IS NULL + ) + AND trashed_by IS NOT NULL -- :if @trashed_only +ORDER BY + created_at DESC +FOR UPDATE -- :if @for_update +; --- name: GetRecursiveFolderByParentID :many -WITH RECURSIVE subfolders AS ( +-- name: GetParentIDsByFolderID :many +WITH RECURSIVE parent_folders(id, parent_id) AS ( SELECT - *, - 1 AS depth + id, + parent_id FROM - folders + folders AS start WHERE - parent_id = sqlc.arg('parent_id')::uuid - AND CASE - WHEN sqlc.arg('include_trashed')::bool = FALSE - THEN trashed_by IS NULL - ELSE TRUE - END + id = sqlc.arg('id')::uuid UNION ALL SELECT - f.*, - s.depth + 1 AS depth + id, + parent_id FROM - folders AS f - INNER JOIN subfolders s ON f.parent_id = s.id - WHERE - s.depth < COALESCE(sqlc.narg('depth')::int, 9999) - AND CASE - WHEN sqlc.arg('include_trashed')::bool = FALSE - THEN f.trashed_by IS NULL - ELSE TRUE - END + folders + INNER JOIN parent_folders AS pf ON id = pf.parent_id ) SELECT - * + id FROM - subfolders; + parent_folders +WHERE + id != sqlc.arg('id')::uuid +FOR UPDATE -- :if @for_update +; + +-- name: GetWorkspaceIDByFolderID :one +SELECT + workspace_id +FROM + folders +WHERE + id = sqlc.arg('id'); -- name: CountFoldersInWorkspaceByIDs :one SELECT diff --git a/internal/note/infra/persistence/pgsqlc/folder.sql.go b/internal/note/infra/persistence/pgsqlc/folder.sql.go index 08e721f6..e54000e5 100644 --- a/internal/note/infra/persistence/pgsqlc/folder.sql.go +++ b/internal/note/infra/persistence/pgsqlc/folder.sql.go @@ -10,6 +10,7 @@ import ( "time" "github.com/google/uuid" + "go.opentelemetry.io/otel" ) const countFoldersInWorkspaceByIDs = `-- name: CountFoldersInWorkspaceByIDs :one @@ -28,6 +29,8 @@ type CountFoldersInWorkspaceByIDsParams struct { } func (q *Queries) CountFoldersInWorkspaceByIDs(ctx context.Context, arg *CountFoldersInWorkspaceByIDsParams) (int64, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "CountFoldersInWorkspaceByIDs") + defer span.End() row := q.db.QueryRow(ctx, countFoldersInWorkspaceByIDs, arg.WorkspaceID, arg.IDs) var count int64 err := row.Scan(&count) @@ -40,58 +43,22 @@ SELECT FROM folders WHERE - CASE - WHEN $1::uuid IS NOT NULL - THEN id = $1::uuid - ELSE TRUE - END - AND CASE - WHEN $2::uuid IS NOT NULL - THEN workspace_id = $2::uuid - ELSE TRUE - END - AND CASE - WHEN $3::uuid IS NOT NULL - THEN parent_id = $3::uuid - ELSE TRUE - END - AND CASE - WHEN $4::bool = TRUE - THEN parent_id IS NULL - ELSE TRUE - END - AND CASE - WHEN $5::text <> '' - THEN trashed_by = $5::text - ELSE TRUE - END - AND CASE - WHEN $6::bool = FALSE - THEN trashed_by IS NULL - ELSE TRUE - END -ORDER BY - created_at DESC + id = $1 +FOR UPDATE -- :if $2 ` +var _getFolderDynQ = dynCompile(getFolder) + type GetFolderParams struct { - ID *uuid.UUID - WorkspaceID *uuid.UUID - ParentID *uuid.UUID - IsRootFolder bool - TrashedBy string - IncludeTrashed bool + ID uuid.UUID + ForUpdate bool } -func (q *Queries) GetFolder(ctx context.Context, arg *GetFolderParams) (*Folder, error) { - row := q.db.QueryRow(ctx, getFolder, - arg.ID, - arg.WorkspaceID, - arg.ParentID, - arg.IsRootFolder, - arg.TrashedBy, - arg.IncludeTrashed, - ) +func (q *Queries) GetFolder(ctx context.Context, arg GetFolderParams) (*Folder, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetFolder") + defer span.End() + dynQuery, dynArgs := _getFolderDynQ.Build([]any{arg.ID, arg.ForUpdate}) + row := q.db.QueryRow(ctx, dynQuery, dynArgs...) var i Folder err := row.Scan( &i.ID, @@ -113,58 +80,33 @@ SELECT FROM folders WHERE - CASE - WHEN $1::uuid[] IS NOT NULL - THEN id = ANY($1::uuid[]) - ELSE TRUE - END - AND CASE - WHEN $2::uuid IS NOT NULL - THEN workspace_id = $2::uuid - ELSE TRUE - END - AND CASE - WHEN $3::uuid IS NOT NULL - THEN parent_id = $3::uuid - ELSE TRUE - END - AND CASE - WHEN $4::bool = TRUE - THEN parent_id IS NULL - ELSE TRUE - END - AND CASE - WHEN $5::text IS NOT NULL - THEN trashed_by = $5::text - ELSE TRUE - END - AND CASE - WHEN $6::bool = FALSE - THEN trashed_by IS NULL - ELSE TRUE - END + id = ANY($1::uuid[]) -- :if $1 + AND workspace_id = $2::uuid -- :if $2 + AND ( -- :if $3 + trashed_by = $3::text -- :if $3 + OR trashed_by IS NULL -- :if $3 + ) -- :if $3 + AND trashed_by IS NOT NULL -- :if $4 ORDER BY created_at DESC +FOR UPDATE -- :if $5 ` +var _getFoldersDynQ = dynCompile(getFolders) + type GetFoldersParams struct { - IDs []uuid.UUID - WorkspaceID *uuid.UUID - ParentID *uuid.UUID - IsRootFolder bool - TrashedBy *string - IncludeTrashed bool + IDs *[]uuid.UUID + WorkspaceID *uuid.UUID + TrashedBy *string + TrashedOnly bool + ForUpdate bool } func (q *Queries) GetFolders(ctx context.Context, arg *GetFoldersParams) ([]*Folder, error) { - rows, err := q.db.Query(ctx, getFolders, - arg.IDs, - arg.WorkspaceID, - arg.ParentID, - arg.IsRootFolder, - arg.TrashedBy, - arg.IncludeTrashed, - ) + ctx, span := otel.Tracer("Queries").Start(ctx, "GetFolders") + defer span.End() + dynQuery, dynArgs := _getFoldersDynQ.Build([]any{arg.IDs, arg.WorkspaceID, arg.TrashedBy, arg.TrashedOnly, arg.ForUpdate}) + rows, err := q.db.Query(ctx, dynQuery, dynArgs...) if err != nil { return nil, err } @@ -193,69 +135,37 @@ func (q *Queries) GetFolders(ctx context.Context, arg *GetFoldersParams) ([]*Fol return items, nil } -const getRecursiveFolderByParentID = `-- name: GetRecursiveFolderByParentID :many -WITH RECURSIVE subfolders AS ( - SELECT - id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at, - 1 AS depth - FROM - folders - WHERE - parent_id = $1::uuid - AND CASE - WHEN $2::bool = FALSE - THEN trashed_by IS NULL - ELSE TRUE - END - UNION ALL - SELECT - f.id, f.name, f.icon, f.workspace_id, f.parent_id, f.created_at, f.updated_at, f.trashed_by, f.trashed_at, - s.depth + 1 AS depth - FROM - folders AS f - INNER JOIN subfolders s ON f.parent_id = s.id - WHERE - s.depth < COALESCE($3::int, 9999) - AND CASE - WHEN $2::bool = FALSE - THEN f.trashed_by IS NULL - ELSE TRUE - END -) +const getFoldersByWorkspaceID = `-- name: GetFoldersByWorkspaceID :many SELECT - id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at, depth + id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at FROM - subfolders + folders +WHERE + workspace_id = $1 +ORDER BY + created_at DESC +FOR UPDATE -- :if $2 ` -type GetRecursiveFolderByParentIDParams struct { - ParentID uuid.UUID - IncludeTrashed bool - Depth *int32 -} +var _getFoldersByWorkspaceIDDynQ = dynCompile(getFoldersByWorkspaceID) -type GetRecursiveFolderByParentIDRow struct { - ID uuid.UUID - Name string - Icon *string +type GetFoldersByWorkspaceIDParams struct { WorkspaceID uuid.UUID - ParentID *uuid.UUID - CreatedAt time.Time - UpdatedAt time.Time - TrashedBy *string - TrashedAt *time.Time - Depth int32 + ForUpdate bool } -func (q *Queries) GetRecursiveFolderByParentID(ctx context.Context, arg *GetRecursiveFolderByParentIDParams) ([]*GetRecursiveFolderByParentIDRow, error) { - rows, err := q.db.Query(ctx, getRecursiveFolderByParentID, arg.ParentID, arg.IncludeTrashed, arg.Depth) +func (q *Queries) GetFoldersByWorkspaceID(ctx context.Context, arg GetFoldersByWorkspaceIDParams) ([]*Folder, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetFoldersByWorkspaceID") + defer span.End() + dynQuery, dynArgs := _getFoldersByWorkspaceIDDynQ.Build([]any{arg.WorkspaceID, arg.ForUpdate}) + rows, err := q.db.Query(ctx, dynQuery, dynArgs...) if err != nil { return nil, err } defer rows.Close() - var items []*GetRecursiveFolderByParentIDRow + var items []*Folder for rows.Next() { - var i GetRecursiveFolderByParentIDRow + var i Folder if err := rows.Scan( &i.ID, &i.Name, @@ -266,7 +176,6 @@ func (q *Queries) GetRecursiveFolderByParentID(ctx context.Context, arg *GetRecu &i.UpdatedAt, &i.TrashedBy, &i.TrashedAt, - &i.Depth, ); err != nil { return nil, err } @@ -278,18 +187,44 @@ func (q *Queries) GetRecursiveFolderByParentID(ctx context.Context, arg *GetRecu return items, nil } -const getRootFolderIDsByWorkspaceID = `-- name: GetRootFolderIDsByWorkspaceID :many +const getParentIDsByFolderID = `-- name: GetParentIDsByFolderID :many +WITH RECURSIVE parent_folders(id, parent_id) AS ( + SELECT + id, + parent_id + FROM + folders AS start + WHERE + id = $1::uuid + UNION ALL + SELECT + id, + parent_id + FROM + folders + INNER JOIN parent_folders AS pf ON id = pf.parent_id +) SELECT id FROM - folders + parent_folders WHERE - workspace_id = $1 - AND parent_id IS NULL + id != $1::uuid +FOR UPDATE -- :if $2 ` -func (q *Queries) GetRootFolderIDsByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) ([]uuid.UUID, error) { - rows, err := q.db.Query(ctx, getRootFolderIDsByWorkspaceID, workspaceID) +var _getParentIDsByFolderIDDynQ = dynCompile(getParentIDsByFolderID) + +type GetParentIDsByFolderIDParams struct { + ID uuid.UUID + ForUpdate bool +} + +func (q *Queries) GetParentIDsByFolderID(ctx context.Context, arg GetParentIDsByFolderIDParams) ([]uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetParentIDsByFolderID") + defer span.End() + dynQuery, dynArgs := _getParentIDsByFolderIDDynQ.Build([]any{arg.ID, arg.ForUpdate}) + rows, err := q.db.Query(ctx, dynQuery, dynArgs...) if err != nil { return nil, err } @@ -318,6 +253,8 @@ WHERE ` func (q *Queries) GetWorkspaceIDByFolderID(ctx context.Context, id uuid.UUID) (uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetWorkspaceIDByFolderID") + defer span.End() row := q.db.QueryRow(ctx, getWorkspaceIDByFolderID, id) var workspace_id uuid.UUID err := row.Scan(&workspace_id) @@ -344,6 +281,8 @@ WHERE ` func (q *Queries) PermanentlyDeleteFolderByID(ctx context.Context, id uuid.UUID) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "PermanentlyDeleteFolderByID") + defer span.End() _, err := q.db.Exec(ctx, permanentlyDeleteFolderByID, id) return err } @@ -356,6 +295,8 @@ WHERE ` func (q *Queries) PermanentlyDeleteFoldersByIDs(ctx context.Context, ids []uuid.UUID) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "PermanentlyDeleteFoldersByIDs") + defer span.End() _, err := q.db.Exec(ctx, permanentlyDeleteFoldersByIDs, ids) return err } @@ -405,6 +346,8 @@ type SaveFolderParams struct { } func (q *Queries) SaveFolder(ctx context.Context, arg *SaveFolderParams) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "SaveFolder") + defer span.End() _, err := q.db.Exec(ctx, saveFolder, arg.ID, arg.Name, @@ -453,6 +396,8 @@ ON CONFLICT (id) DO UPDATE SET ` func (q *Queries) SaveFromTempFolders(ctx context.Context) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "SaveFromTempFolders") + defer span.End() _, err := q.db.Exec(ctx, saveFromTempFolders) return err } diff --git a/internal/note/infra/persistence/pgsqlc/folder_read.sql b/internal/note/infra/persistence/pgsqlc/folder_read.sql new file mode 100644 index 00000000..097f0fb8 --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/folder_read.sql @@ -0,0 +1,80 @@ +-- TODO: Check not use params +-- name: ReadGetFolder :one +SELECT + * +FROM + folders +WHERE + id = sqlc.arg('id') + AND workspace_id = sqlc.narg('workspace_id')::uuid -- :if @workspace_id + AND parent_id = sqlc.narg('parent_id')::uuid -- :if @parent_id + AND ( -- :if @trashed_by + trashed_by = sqlc.narg('trashed_by')::text + OR trashed_by IS NULL + ) + AND trashed_by IS NULL -- :if @only_non_trashed + AND trashed_by IS NOT NULL -- :if @only_trashed +; + +-- name: ReadGetFolderByID :one +SELECT + * +FROM + folders +WHERE + id = sqlc.arg('id') + AND trashed_at IS NULL; + +-- name: ReadGetTrashedFolderByWorkspaceID :many +SELECT + * +FROM + folders +WHERE + workspace_id = sqlc.arg('workspace_id') + AND trashed_at IS NOT NULL; + +-- name: ReadGetRootFolderIDsByWorkspaceID :many +SELECT + id +FROM + folders +WHERE + workspace_id = sqlc.arg('workspace_id') + AND parent_id IS NULL; + +-- TODO: Should give sqlc dynamic a try, if it run nicely, then it would be more performant + +-- name: ReadGetRecursiveFolderByParentID :many +WITH RECURSIVE subfolders AS ( + SELECT + *, + 1 AS depth + FROM + folders + WHERE + parent_id = sqlc.arg('parent_id')::uuid + AND CASE + WHEN sqlc.arg('include_trashed')::bool = FALSE + THEN trashed_at IS NULL + ELSE TRUE + END + UNION ALL + SELECT + f.*, + s.depth + 1 AS depth + FROM + folders AS f + INNER JOIN subfolders s ON f.parent_id = s.id + WHERE + s.depth < COALESCE(sqlc.narg('depth')::int, 9999) + AND CASE + WHEN sqlc.arg('include_trashed')::bool = FALSE + THEN f.trashed_at IS NULL + ELSE TRUE + END +) +SELECT + * +FROM + subfolders; diff --git a/internal/note/infra/persistence/pgsqlc/folder_read.sql.go b/internal/note/infra/persistence/pgsqlc/folder_read.sql.go new file mode 100644 index 00000000..40044e20 --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/folder_read.sql.go @@ -0,0 +1,255 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: folder_read.sql + +package pgsqlc + +import ( + "context" + "time" + + "github.com/google/uuid" + "go.opentelemetry.io/otel" +) + +const readGetFolder = `-- name: ReadGetFolder :one +SELECT + id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at +FROM + folders +WHERE + id = $1 + AND workspace_id = $2::uuid -- :if $2 + AND parent_id = $3::uuid -- :if $3 + AND ( -- :if $4 + trashed_by = $4::text -- :if $4 + OR trashed_by IS NULL -- :if $4 + ) -- :if $4 + AND trashed_by IS NULL -- :if $5 + AND trashed_by IS NOT NULL -- :if $6 +` + +var _readGetFolderDynQ = dynCompile(readGetFolder) + +type ReadGetFolderParams struct { + ID uuid.UUID + WorkspaceID *uuid.UUID + ParentID *uuid.UUID + TrashedBy *string + OnlyNonTrashed bool + OnlyTrashed bool +} + +// TODO: Check not use params +func (q *Queries) ReadGetFolder(ctx context.Context, arg *ReadGetFolderParams) (*Folder, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetFolder") + defer span.End() + dynQuery, dynArgs := _readGetFolderDynQ.Build([]any{arg.ID, arg.WorkspaceID, arg.ParentID, arg.TrashedBy, arg.OnlyNonTrashed, arg.OnlyTrashed}) + row := q.db.QueryRow(ctx, dynQuery, dynArgs...) + var i Folder + err := row.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.WorkspaceID, + &i.ParentID, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ) + return &i, err +} + +const readGetFolderByID = `-- name: ReadGetFolderByID :one +SELECT + id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at +FROM + folders +WHERE + id = $1 + AND trashed_at IS NULL +` + +func (q *Queries) ReadGetFolderByID(ctx context.Context, id uuid.UUID) (*Folder, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetFolderByID") + defer span.End() + row := q.db.QueryRow(ctx, readGetFolderByID, id) + var i Folder + err := row.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.WorkspaceID, + &i.ParentID, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ) + return &i, err +} + +const readGetRecursiveFolderByParentID = `-- name: ReadGetRecursiveFolderByParentID :many + +WITH RECURSIVE subfolders AS ( + SELECT + id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at, + 1 AS depth + FROM + folders + WHERE + parent_id = $1::uuid + AND CASE + WHEN $2::bool = FALSE + THEN trashed_at IS NULL + ELSE TRUE + END + UNION ALL + SELECT + f.id, f.name, f.icon, f.workspace_id, f.parent_id, f.created_at, f.updated_at, f.trashed_by, f.trashed_at, + s.depth + 1 AS depth + FROM + folders AS f + INNER JOIN subfolders s ON f.parent_id = s.id + WHERE + s.depth < COALESCE($3::int, 9999) + AND CASE + WHEN $2::bool = FALSE + THEN f.trashed_at IS NULL + ELSE TRUE + END +) +SELECT + id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at, depth +FROM + subfolders +` + +type ReadGetRecursiveFolderByParentIDParams struct { + ParentID uuid.UUID + IncludeTrashed bool + Depth *int32 +} + +type ReadGetRecursiveFolderByParentIDRow struct { + ID uuid.UUID + Name string + Icon *string + WorkspaceID uuid.UUID + ParentID *uuid.UUID + CreatedAt time.Time + UpdatedAt time.Time + TrashedBy *string + TrashedAt *time.Time + Depth int32 +} + +// TODO: Should give sqlc dynamic a try, if it run nicely, then it would be more performant +func (q *Queries) ReadGetRecursiveFolderByParentID(ctx context.Context, arg *ReadGetRecursiveFolderByParentIDParams) ([]*ReadGetRecursiveFolderByParentIDRow, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetRecursiveFolderByParentID") + defer span.End() + rows, err := q.db.Query(ctx, readGetRecursiveFolderByParentID, arg.ParentID, arg.IncludeTrashed, arg.Depth) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ReadGetRecursiveFolderByParentIDRow + for rows.Next() { + var i ReadGetRecursiveFolderByParentIDRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.WorkspaceID, + &i.ParentID, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + &i.Depth, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetRootFolderIDsByWorkspaceID = `-- name: ReadGetRootFolderIDsByWorkspaceID :many +SELECT + id +FROM + folders +WHERE + workspace_id = $1 + AND parent_id IS NULL +` + +func (q *Queries) ReadGetRootFolderIDsByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) ([]uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetRootFolderIDsByWorkspaceID") + defer span.End() + rows, err := q.db.Query(ctx, readGetRootFolderIDsByWorkspaceID, workspaceID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []uuid.UUID + for rows.Next() { + var id uuid.UUID + if err := rows.Scan(&id); err != nil { + return nil, err + } + items = append(items, id) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetTrashedFolderByWorkspaceID = `-- name: ReadGetTrashedFolderByWorkspaceID :many +SELECT + id, name, icon, workspace_id, parent_id, created_at, updated_at, trashed_by, trashed_at +FROM + folders +WHERE + workspace_id = $1 + AND trashed_at IS NOT NULL +` + +func (q *Queries) ReadGetTrashedFolderByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) ([]*Folder, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetTrashedFolderByWorkspaceID") + defer span.End() + rows, err := q.db.Query(ctx, readGetTrashedFolderByWorkspaceID, workspaceID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*Folder + for rows.Next() { + var i Folder + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.WorkspaceID, + &i.ParentID, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/internal/note/infra/persistence/pgsqlc/note.sql b/internal/note/infra/persistence/pgsqlc/note.sql index a65c0c21..354bb299 100644 --- a/internal/note/infra/persistence/pgsqlc/note.sql +++ b/internal/note/infra/persistence/pgsqlc/note.sql @@ -94,14 +94,15 @@ ON CONFLICT (id) DO UPDATE SET trashed_by = EXCLUDED.trashed_by, trashed_at = EXCLUDED.trashed_at; --- name: GetNote :one +-- name: GetNoteByID :one SELECT * FROM notes WHERE id = sqlc.arg('id') - AND trashed_at IS NULL; +FOR UPDATE -- :if @for_update +; -- name: GetNotes :many SELECT @@ -109,27 +110,17 @@ SELECT FROM notes WHERE - id = ANY(sqlc.arg('ids')::uuid[]) - AND trashed_at IS NULL; - --- name: GetNotesByFolderIDs :many -SELECT - * -FROM - notes -WHERE - CASE - WHEN CARDINALITY(sqlc.arg('folder_ids')::uuid[]) > 0 - THEN folder_id = ANY(sqlc.arg('folder_ids')::uuid[]) - ELSE FALSE - END - AND CASE - WHEN sqlc.arg('include_trashed')::bool = FALSE - THEN trashed_at IS NULL - ELSE TRUE - END -ORDER BY - created_at DESC; + id = ANY(sqlc.narg('ids')::uuid[]) -- :if @ids + AND folder_id IN ( + SELECT id FROM folders WHERE workspace_id = sqlc.narg('workspace_id')::uuid + ) -- :if @workspace_id + AND ( -- :if @trashed_by + trashed_by = sqlc.narg('trashed_by')::text + OR trashed_by IS NULL + ) + AND trashed_by IS NOT NULL -- :if @trashed_only +FOR UPDATE -- :if @for_update +; -- name: GetWorkspaceIDByNoteID :one SELECT @@ -142,35 +133,17 @@ INNER JOIN WHERE n.id = sqlc.arg('id'); --- name: GetNotesInWorkspace :many +-- name: GetWorkspaceIDsByNoteIDs :many SELECT - n.* -FROM - notes AS n -INNER JOIN - folders f - ON n.folder_id = f.id -WHERE - f.workspace_id = sqlc.arg('workspace_id') - AND CASE - WHEN sqlc.narg('trashed_by')::text IS NOT NULL - THEN n.trashed_by = sqlc.narg('trashed_by')::text - ELSE n.trashed_at IS NULL - END; - --- name: GetTrashedNotesByWorkspaceID :many -SELECT - n.* + n.id, + f.workspace_id FROM notes AS n INNER JOIN folders f ON n.folder_id = f.id WHERE - f.workspace_id = sqlc.arg('workspace_id') - AND n.trashed_at IS NOT NULL -ORDER BY - n.trashed_at DESC; + n.id = ANY(sqlc.arg('ids')::uuid[]); -- name: CountNotesInWorkspaceByIDs :one SELECT diff --git a/internal/note/infra/persistence/pgsqlc/note.sql.go b/internal/note/infra/persistence/pgsqlc/note.sql.go index 47a8f3dd..ed6087bb 100644 --- a/internal/note/infra/persistence/pgsqlc/note.sql.go +++ b/internal/note/infra/persistence/pgsqlc/note.sql.go @@ -10,6 +10,7 @@ import ( "time" "github.com/google/uuid" + "go.opentelemetry.io/otel" ) const countNotesInWorkspaceByIDs = `-- name: CountNotesInWorkspaceByIDs :one @@ -31,24 +32,36 @@ type CountNotesInWorkspaceByIDsParams struct { } func (q *Queries) CountNotesInWorkspaceByIDs(ctx context.Context, arg *CountNotesInWorkspaceByIDsParams) (int64, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "CountNotesInWorkspaceByIDs") + defer span.End() row := q.db.QueryRow(ctx, countNotesInWorkspaceByIDs, arg.WorkspaceID, arg.IDs) var count int64 err := row.Scan(&count) return count, err } -const getNote = `-- name: GetNote :one +const getNoteByID = `-- name: GetNoteByID :one SELECT id, name, icon, folder_id, tags, size, created_at, updated_at, trashed_by, trashed_at FROM notes WHERE id = $1 - AND trashed_at IS NULL +FOR UPDATE -- :if $2 ` -func (q *Queries) GetNote(ctx context.Context, id uuid.UUID) (*Note, error) { - row := q.db.QueryRow(ctx, getNote, id) +var _getNoteByIDDynQ = dynCompile(getNoteByID) + +type GetNoteByIDParams struct { + ID uuid.UUID + ForUpdate bool +} + +func (q *Queries) GetNoteByID(ctx context.Context, arg GetNoteByIDParams) (*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetNoteByID") + defer span.End() + dynQuery, dynArgs := _getNoteByIDDynQ.Build([]any{arg.ID, arg.ForUpdate}) + row := q.db.QueryRow(ctx, dynQuery, dynArgs...) var i Note err := row.Scan( &i.ID, @@ -71,68 +84,33 @@ SELECT FROM notes WHERE - id = ANY($1::uuid[]) - AND trashed_at IS NULL + id = ANY($1::uuid[]) -- :if $1 + AND folder_id IN ( + SELECT id FROM folders WHERE workspace_id = $2::uuid + ) -- :if $2 + AND ( -- :if $3 + trashed_by = $3::text -- :if $3 + OR trashed_by IS NULL -- :if $3 + ) -- :if $3 + AND trashed_by IS NOT NULL -- :if $4 +FOR UPDATE -- :if $5 ` -func (q *Queries) GetNotes(ctx context.Context, ids []uuid.UUID) ([]*Note, error) { - rows, err := q.db.Query(ctx, getNotes, ids) - if err != nil { - return nil, err - } - defer rows.Close() - var items []*Note - for rows.Next() { - var i Note - if err := rows.Scan( - &i.ID, - &i.Name, - &i.Icon, - &i.FolderID, - &i.Tags, - &i.Size, - &i.CreatedAt, - &i.UpdatedAt, - &i.TrashedBy, - &i.TrashedAt, - ); err != nil { - return nil, err - } - items = append(items, &i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - -const getNotesByFolderIDs = `-- name: GetNotesByFolderIDs :many -SELECT - id, name, icon, folder_id, tags, size, created_at, updated_at, trashed_by, trashed_at -FROM - notes -WHERE - CASE - WHEN CARDINALITY($1::uuid[]) > 0 - THEN folder_id = ANY($1::uuid[]) - ELSE FALSE - END - AND CASE - WHEN $2::bool = FALSE - THEN trashed_at IS NULL - ELSE TRUE - END -ORDER BY - created_at DESC -` +var _getNotesDynQ = dynCompile(getNotes) -type GetNotesByFolderIDsParams struct { - FolderIds []uuid.UUID - IncludeTrashed bool +type GetNotesParams struct { + IDs *[]uuid.UUID + WorkspaceID *uuid.UUID + TrashedBy *string + TrashedOnly bool + ForUpdate bool } -func (q *Queries) GetNotesByFolderIDs(ctx context.Context, arg *GetNotesByFolderIDsParams) ([]*Note, error) { - rows, err := q.db.Query(ctx, getNotesByFolderIDs, arg.FolderIds, arg.IncludeTrashed) +func (q *Queries) GetNotes(ctx context.Context, arg *GetNotesParams) ([]*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetNotes") + defer span.End() + dynQuery, dynArgs := _getNotesDynQ.Build([]any{arg.IDs, arg.WorkspaceID, arg.TrashedBy, arg.TrashedOnly, arg.ForUpdate}) + rows, err := q.db.Query(ctx, dynQuery, dynArgs...) if err != nil { return nil, err } @@ -162,95 +140,57 @@ func (q *Queries) GetNotesByFolderIDs(ctx context.Context, arg *GetNotesByFolder return items, nil } -const getNotesInWorkspace = `-- name: GetNotesInWorkspace :many +const getWorkspaceIDByNoteID = `-- name: GetWorkspaceIDByNoteID :one SELECT - n.id, n.name, n.icon, n.folder_id, n.tags, n.size, n.created_at, n.updated_at, n.trashed_by, n.trashed_at + f.workspace_id FROM notes AS n INNER JOIN folders f ON n.folder_id = f.id WHERE - f.workspace_id = $1 - AND CASE - WHEN $2::text IS NOT NULL - THEN n.trashed_by = $2::text - ELSE n.trashed_at IS NULL - END + n.id = $1 ` -type GetNotesInWorkspaceParams struct { - WorkspaceID uuid.UUID - TrashedBy *string -} - -func (q *Queries) GetNotesInWorkspace(ctx context.Context, arg *GetNotesInWorkspaceParams) ([]*Note, error) { - rows, err := q.db.Query(ctx, getNotesInWorkspace, arg.WorkspaceID, arg.TrashedBy) - if err != nil { - return nil, err - } - defer rows.Close() - var items []*Note - for rows.Next() { - var i Note - if err := rows.Scan( - &i.ID, - &i.Name, - &i.Icon, - &i.FolderID, - &i.Tags, - &i.Size, - &i.CreatedAt, - &i.UpdatedAt, - &i.TrashedBy, - &i.TrashedAt, - ); err != nil { - return nil, err - } - items = append(items, &i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil +func (q *Queries) GetWorkspaceIDByNoteID(ctx context.Context, id uuid.UUID) (uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetWorkspaceIDByNoteID") + defer span.End() + row := q.db.QueryRow(ctx, getWorkspaceIDByNoteID, id) + var workspace_id uuid.UUID + err := row.Scan(&workspace_id) + return workspace_id, err } -const getTrashedNotesByWorkspaceID = `-- name: GetTrashedNotesByWorkspaceID :many +const getWorkspaceIDsByNoteIDs = `-- name: GetWorkspaceIDsByNoteIDs :many SELECT - n.id, n.name, n.icon, n.folder_id, n.tags, n.size, n.created_at, n.updated_at, n.trashed_by, n.trashed_at + n.id, + f.workspace_id FROM notes AS n INNER JOIN folders f ON n.folder_id = f.id WHERE - f.workspace_id = $1 - AND n.trashed_at IS NOT NULL -ORDER BY - n.trashed_at DESC + n.id = ANY($1::uuid[]) ` -func (q *Queries) GetTrashedNotesByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) ([]*Note, error) { - rows, err := q.db.Query(ctx, getTrashedNotesByWorkspaceID, workspaceID) +type GetWorkspaceIDsByNoteIDsRow struct { + ID uuid.UUID + WorkspaceID uuid.UUID +} + +func (q *Queries) GetWorkspaceIDsByNoteIDs(ctx context.Context, ids []uuid.UUID) ([]*GetWorkspaceIDsByNoteIDsRow, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetWorkspaceIDsByNoteIDs") + defer span.End() + rows, err := q.db.Query(ctx, getWorkspaceIDsByNoteIDs, ids) if err != nil { return nil, err } defer rows.Close() - var items []*Note + var items []*GetWorkspaceIDsByNoteIDsRow for rows.Next() { - var i Note - if err := rows.Scan( - &i.ID, - &i.Name, - &i.Icon, - &i.FolderID, - &i.Tags, - &i.Size, - &i.CreatedAt, - &i.UpdatedAt, - &i.TrashedBy, - &i.TrashedAt, - ); err != nil { + var i GetWorkspaceIDsByNoteIDsRow + if err := rows.Scan(&i.ID, &i.WorkspaceID); err != nil { return nil, err } items = append(items, &i) @@ -261,25 +201,6 @@ func (q *Queries) GetTrashedNotesByWorkspaceID(ctx context.Context, workspaceID return items, nil } -const getWorkspaceIDByNoteID = `-- name: GetWorkspaceIDByNoteID :one -SELECT - f.workspace_id -FROM - notes AS n -INNER JOIN - folders f - ON n.folder_id = f.id -WHERE - n.id = $1 -` - -func (q *Queries) GetWorkspaceIDByNoteID(ctx context.Context, id uuid.UUID) (uuid.UUID, error) { - row := q.db.QueryRow(ctx, getWorkspaceIDByNoteID, id) - var workspace_id uuid.UUID - err := row.Scan(&workspace_id) - return workspace_id, err -} - type InsertTempNotesParams struct { ID uuid.UUID Name string @@ -301,6 +222,8 @@ WHERE ` func (q *Queries) PermanentlyDeleteNoteByID(ctx context.Context, id uuid.UUID) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "PermanentlyDeleteNoteByID") + defer span.End() _, err := q.db.Exec(ctx, permanentlyDeleteNoteByID, id) return err } @@ -313,6 +236,8 @@ WHERE ` func (q *Queries) PermanentlyDeleteNotesByIDs(ctx context.Context, ids []uuid.UUID) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "PermanentlyDeleteNotesByIDs") + defer span.End() _, err := q.db.Exec(ctx, permanentlyDeleteNotesByIDs, ids) return err } @@ -355,6 +280,8 @@ ON CONFLICT (id) DO UPDATE SET ` func (q *Queries) SaveFromTempNotes(ctx context.Context) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "SaveFromTempNotes") + defer span.End() _, err := q.db.Exec(ctx, saveFromTempNotes) return err } @@ -409,6 +336,8 @@ type SaveNoteParams struct { } func (q *Queries) SaveNote(ctx context.Context, arg *SaveNoteParams) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "SaveNote") + defer span.End() _, err := q.db.Exec(ctx, saveNote, arg.ID, arg.Name, diff --git a/internal/note/infra/persistence/pgsqlc/note_link.sql b/internal/note/infra/persistence/pgsqlc/note_link.sql index d099e22a..ea717b39 100644 --- a/internal/note/infra/persistence/pgsqlc/note_link.sql +++ b/internal/note/infra/persistence/pgsqlc/note_link.sql @@ -40,29 +40,15 @@ SELECT FROM note_links WHERE - CASE - WHEN sqlc.narg('source_id')::uuid IS NOT NULL THEN source_id = sqlc.narg('source_id')::uuid - ELSE TRUE - END - AND CASE - WHEN CARDINALITY(sqlc.arg('source_ids')::uuid[]) > 0 THEN source_id = ANY(sqlc.arg('source_ids')::uuid[]) - ELSE TRUE - END; + source_id = sqlc.arg('source_id')::uuid; --- name: GetNoteBacklinks :many +-- name: GetNotesOutgoingLinks :many SELECT - source_id + source_id, + ARRAY_AGG(target_id) AS target_ids FROM note_links WHERE - target_id = sqlc.arg('target_id'); - --- name: GetNoteLinksInWorkspace :many -SELECT - nl.* -FROM note_links AS nl -JOIN notes AS sn ON nl.source_id = sn.id -JOIN folders AS sf ON sn.folder_id = sf.id -WHERE sf.workspace_id = sqlc.arg('workspace_id')::uuid - AND sn.trashed_at IS NULL - AND sf.trashed_at IS NULL; + source_id = ANY(sqlc.arg('source_ids')::uuid[]) +GROUP BY + source_id; diff --git a/internal/note/infra/persistence/pgsqlc/note_link.sql.go b/internal/note/infra/persistence/pgsqlc/note_link.sql.go index 3d8cfc18..6442178f 100644 --- a/internal/note/infra/persistence/pgsqlc/note_link.sql.go +++ b/internal/note/infra/persistence/pgsqlc/note_link.sql.go @@ -9,6 +9,7 @@ import ( "context" "github.com/google/uuid" + "go.opentelemetry.io/otel" ) const deleteObsoleteNoteLinks = `-- name: DeleteObsoleteNoteLinks :exec @@ -28,63 +29,36 @@ WHERE ` func (q *Queries) DeleteObsoleteNoteLinks(ctx context.Context) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "DeleteObsoleteNoteLinks") + defer span.End() _, err := q.db.Exec(ctx, deleteObsoleteNoteLinks) return err } -const getNoteBacklinks = `-- name: GetNoteBacklinks :many +const getNoteOutgoingLinks = `-- name: GetNoteOutgoingLinks :many SELECT - source_id + target_id FROM note_links WHERE - target_id = $1 + source_id = $1::uuid ` -func (q *Queries) GetNoteBacklinks(ctx context.Context, targetID uuid.UUID) ([]uuid.UUID, error) { - rows, err := q.db.Query(ctx, getNoteBacklinks, targetID) +func (q *Queries) GetNoteOutgoingLinks(ctx context.Context, sourceID uuid.UUID) ([]uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetNoteOutgoingLinks") + defer span.End() + rows, err := q.db.Query(ctx, getNoteOutgoingLinks, sourceID) if err != nil { return nil, err } defer rows.Close() var items []uuid.UUID for rows.Next() { - var source_id uuid.UUID - if err := rows.Scan(&source_id); err != nil { - return nil, err - } - items = append(items, source_id) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - -const getNoteLinksInWorkspace = `-- name: GetNoteLinksInWorkspace :many -SELECT - nl.source_id, nl.target_id -FROM note_links AS nl -JOIN notes AS sn ON nl.source_id = sn.id -JOIN folders AS sf ON sn.folder_id = sf.id -WHERE sf.workspace_id = $1::uuid - AND sn.trashed_at IS NULL - AND sf.trashed_at IS NULL -` - -func (q *Queries) GetNoteLinksInWorkspace(ctx context.Context, workspaceID uuid.UUID) ([]*NoteLink, error) { - rows, err := q.db.Query(ctx, getNoteLinksInWorkspace, workspaceID) - if err != nil { - return nil, err - } - defer rows.Close() - var items []*NoteLink - for rows.Next() { - var i NoteLink - if err := rows.Scan(&i.SourceID, &i.TargetID); err != nil { + var target_id uuid.UUID + if err := rows.Scan(&target_id); err != nil { return nil, err } - items = append(items, &i) + items = append(items, target_id) } if err := rows.Err(); err != nil { return nil, err @@ -92,40 +66,38 @@ func (q *Queries) GetNoteLinksInWorkspace(ctx context.Context, workspaceID uuid. return items, nil } -const getNoteOutgoingLinks = `-- name: GetNoteOutgoingLinks :many +const getNotesOutgoingLinks = `-- name: GetNotesOutgoingLinks :many SELECT - target_id + source_id, + ARRAY_AGG(target_id) AS target_ids FROM note_links WHERE - CASE - WHEN $1::uuid IS NOT NULL THEN source_id = $1::uuid - ELSE TRUE - END - AND CASE - WHEN CARDINALITY($2::uuid[]) > 0 THEN source_id = ANY($2::uuid[]) - ELSE TRUE - END + source_id = ANY($1::uuid[]) +GROUP BY + source_id ` -type GetNoteOutgoingLinksParams struct { - SourceID *uuid.UUID - SourceIDs []uuid.UUID +type GetNotesOutgoingLinksRow struct { + SourceID uuid.UUID + TargetIDs interface{} } -func (q *Queries) GetNoteOutgoingLinks(ctx context.Context, arg *GetNoteOutgoingLinksParams) ([]uuid.UUID, error) { - rows, err := q.db.Query(ctx, getNoteOutgoingLinks, arg.SourceID, arg.SourceIDs) +func (q *Queries) GetNotesOutgoingLinks(ctx context.Context, sourceIds []uuid.UUID) ([]*GetNotesOutgoingLinksRow, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetNotesOutgoingLinks") + defer span.End() + rows, err := q.db.Query(ctx, getNotesOutgoingLinks, sourceIds) if err != nil { return nil, err } defer rows.Close() - var items []uuid.UUID + var items []*GetNotesOutgoingLinksRow for rows.Next() { - var target_id uuid.UUID - if err := rows.Scan(&target_id); err != nil { + var i GetNotesOutgoingLinksRow + if err := rows.Scan(&i.SourceID, &i.TargetIDs); err != nil { return nil, err } - items = append(items, target_id) + items = append(items, &i) } if err := rows.Err(); err != nil { return nil, err @@ -152,6 +124,8 @@ ON CONFLICT DO NOTHING ` func (q *Queries) SaveFromTempNoteLinks(ctx context.Context) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "SaveFromTempNoteLinks") + defer span.End() _, err := q.db.Exec(ctx, saveFromTempNoteLinks) return err } diff --git a/internal/note/infra/persistence/pgsqlc/note_link_read.sql b/internal/note/infra/persistence/pgsqlc/note_link_read.sql new file mode 100644 index 00000000..7e93e267 --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/note_link_read.sql @@ -0,0 +1,26 @@ +-- name: ReadGetNoteBacklinks :many +SELECT + source_id +FROM + note_links +WHERE + target_id = sqlc.arg('target_id'); + +-- name: ReadGetNoteOutgoingLinks :many +SELECT + target_id +FROM + note_links +WHERE + source_id = sqlc.arg('source_id')::uuid +; + +-- name: ReadGetNoteLinksInWorkspace :many +SELECT + nl.* +FROM note_links AS nl +JOIN notes AS sn ON nl.source_id = sn.id +JOIN folders AS sf ON sn.folder_id = sf.id +WHERE sf.workspace_id = sqlc.arg('workspace_id')::uuid + AND sn.trashed_at IS NULL + AND sf.trashed_at IS NULL; diff --git a/internal/note/infra/persistence/pgsqlc/note_link_read.sql.go b/internal/note/infra/persistence/pgsqlc/note_link_read.sql.go new file mode 100644 index 00000000..35f64870 --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/note_link_read.sql.go @@ -0,0 +1,108 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: note_link_read.sql + +package pgsqlc + +import ( + "context" + + "github.com/google/uuid" + "go.opentelemetry.io/otel" +) + +const readGetNoteBacklinks = `-- name: ReadGetNoteBacklinks :many +SELECT + source_id +FROM + note_links +WHERE + target_id = $1 +` + +func (q *Queries) ReadGetNoteBacklinks(ctx context.Context, targetID uuid.UUID) ([]uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNoteBacklinks") + defer span.End() + rows, err := q.db.Query(ctx, readGetNoteBacklinks, targetID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []uuid.UUID + for rows.Next() { + var source_id uuid.UUID + if err := rows.Scan(&source_id); err != nil { + return nil, err + } + items = append(items, source_id) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetNoteLinksInWorkspace = `-- name: ReadGetNoteLinksInWorkspace :many +SELECT + nl.source_id, nl.target_id +FROM note_links AS nl +JOIN notes AS sn ON nl.source_id = sn.id +JOIN folders AS sf ON sn.folder_id = sf.id +WHERE sf.workspace_id = $1::uuid + AND sn.trashed_at IS NULL + AND sf.trashed_at IS NULL +` + +func (q *Queries) ReadGetNoteLinksInWorkspace(ctx context.Context, workspaceID uuid.UUID) ([]*NoteLink, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNoteLinksInWorkspace") + defer span.End() + rows, err := q.db.Query(ctx, readGetNoteLinksInWorkspace, workspaceID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*NoteLink + for rows.Next() { + var i NoteLink + if err := rows.Scan(&i.SourceID, &i.TargetID); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetNoteOutgoingLinks = `-- name: ReadGetNoteOutgoingLinks :many +SELECT + target_id +FROM + note_links +WHERE + source_id = $1::uuid +` + +func (q *Queries) ReadGetNoteOutgoingLinks(ctx context.Context, sourceID uuid.UUID) ([]uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNoteOutgoingLinks") + defer span.End() + rows, err := q.db.Query(ctx, readGetNoteOutgoingLinks, sourceID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []uuid.UUID + for rows.Next() { + var target_id uuid.UUID + if err := rows.Scan(&target_id); err != nil { + return nil, err + } + items = append(items, target_id) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/internal/note/infra/persistence/pgsqlc/note_read.sql b/internal/note/infra/persistence/pgsqlc/note_read.sql new file mode 100644 index 00000000..eb11809a --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/note_read.sql @@ -0,0 +1,71 @@ +-- name: ReadGetNoteByID :one +SELECT + * +FROM + notes +WHERE + id = sqlc.arg('id'); + +-- name: ReadGetNotesByIDs :many +SELECT + * +FROM + notes +WHERE + id = ANY(sqlc.arg('ids')::uuid[]); + +-- TODO: Check if backlinks or outgoing link need those params +-- name: ReadGetNotes :many +SELECT + * +FROM + notes +WHERE + id = ANY(sqlc.narg('ids')::uuid[]) -- :if @ids + AND folder_id IN ( + SELECT id FROM folders WHERE workspace_id = sqlc.narg('workspace_id')::uuid + ) -- :if @workspace_id + AND ( -- :if @trashed_by + trashed_by = sqlc.narg('trashed_by')::text + OR trashed_by IS NULL + ) + AND trashed_by IS NULL -- :if @non_trashed_only + AND trashed_by IS NOT NULL -- :if @trashed_only +; + +-- name: ReadGetNotesByFolderIDs :many +SELECT + * +FROM + notes +WHERE + folder_id = ANY(sqlc.arg('folder_ids')::uuid[]) + AND trashed_at IS NULL -- :if @exclude_trash +; + +-- name: ReadGetNotesInWorkspace :many +SELECT + n.* +FROM + notes AS n +INNER JOIN + folders f + ON n.folder_id = f.id +WHERE + f.workspace_id = sqlc.arg('workspace_id') + AND n.trashed_at IS NULL -- :if @exclude_trash +; + +-- name: ReadGetTrashedNotesByWorkspaceID :many +SELECT + n.* +FROM + notes AS n +INNER JOIN + folders f + ON n.folder_id = f.id +WHERE + f.workspace_id = sqlc.arg('workspace_id') + AND n.trashed_at IS NOT NULL +ORDER BY + n.trashed_at DESC; diff --git a/internal/note/infra/persistence/pgsqlc/note_read.sql.go b/internal/note/infra/persistence/pgsqlc/note_read.sql.go new file mode 100644 index 00000000..c995133e --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/note_read.sql.go @@ -0,0 +1,300 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: note_read.sql + +package pgsqlc + +import ( + "context" + + "github.com/google/uuid" + "go.opentelemetry.io/otel" +) + +const readGetNoteByID = `-- name: ReadGetNoteByID :one +SELECT + id, name, icon, folder_id, tags, size, created_at, updated_at, trashed_by, trashed_at +FROM + notes +WHERE + id = $1 +` + +func (q *Queries) ReadGetNoteByID(ctx context.Context, id uuid.UUID) (*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNoteByID") + defer span.End() + row := q.db.QueryRow(ctx, readGetNoteByID, id) + var i Note + err := row.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.FolderID, + &i.Tags, + &i.Size, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ) + return &i, err +} + +const readGetNotes = `-- name: ReadGetNotes :many +SELECT + id, name, icon, folder_id, tags, size, created_at, updated_at, trashed_by, trashed_at +FROM + notes +WHERE + id = ANY($1::uuid[]) -- :if $1 + AND folder_id IN ( + SELECT id FROM folders WHERE workspace_id = $2::uuid + ) -- :if $2 + AND ( -- :if $3 + trashed_by = $3::text -- :if $3 + OR trashed_by IS NULL -- :if $3 + ) -- :if $3 + AND trashed_by IS NULL -- :if $4 + AND trashed_by IS NOT NULL -- :if $5 +` + +var _readGetNotesDynQ = dynCompile(readGetNotes) + +type ReadGetNotesParams struct { + IDs *[]uuid.UUID + WorkspaceID *uuid.UUID + TrashedBy *string + NonTrashedOnly bool + TrashedOnly bool +} + +// TODO: Check if backlinks or outgoing link need those params +func (q *Queries) ReadGetNotes(ctx context.Context, arg *ReadGetNotesParams) ([]*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNotes") + defer span.End() + dynQuery, dynArgs := _readGetNotesDynQ.Build([]any{arg.IDs, arg.WorkspaceID, arg.TrashedBy, arg.NonTrashedOnly, arg.TrashedOnly}) + rows, err := q.db.Query(ctx, dynQuery, dynArgs...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*Note + for rows.Next() { + var i Note + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.FolderID, + &i.Tags, + &i.Size, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetNotesByFolderIDs = `-- name: ReadGetNotesByFolderIDs :many +SELECT + id, name, icon, folder_id, tags, size, created_at, updated_at, trashed_by, trashed_at +FROM + notes +WHERE + folder_id = ANY($1::uuid[]) + AND trashed_at IS NULL -- :if $2 +` + +var _readGetNotesByFolderIDsDynQ = dynCompile(readGetNotesByFolderIDs) + +type ReadGetNotesByFolderIDsParams struct { + FolderIds []uuid.UUID + ExcludeTrash bool +} + +func (q *Queries) ReadGetNotesByFolderIDs(ctx context.Context, arg ReadGetNotesByFolderIDsParams) ([]*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNotesByFolderIDs") + defer span.End() + dynQuery, dynArgs := _readGetNotesByFolderIDsDynQ.Build([]any{arg.FolderIds, arg.ExcludeTrash}) + rows, err := q.db.Query(ctx, dynQuery, dynArgs...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*Note + for rows.Next() { + var i Note + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.FolderID, + &i.Tags, + &i.Size, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetNotesByIDs = `-- name: ReadGetNotesByIDs :many +SELECT + id, name, icon, folder_id, tags, size, created_at, updated_at, trashed_by, trashed_at +FROM + notes +WHERE + id = ANY($1::uuid[]) +` + +func (q *Queries) ReadGetNotesByIDs(ctx context.Context, ids []uuid.UUID) ([]*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNotesByIDs") + defer span.End() + rows, err := q.db.Query(ctx, readGetNotesByIDs, ids) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*Note + for rows.Next() { + var i Note + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.FolderID, + &i.Tags, + &i.Size, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetNotesInWorkspace = `-- name: ReadGetNotesInWorkspace :many +SELECT + n.id, n.name, n.icon, n.folder_id, n.tags, n.size, n.created_at, n.updated_at, n.trashed_by, n.trashed_at +FROM + notes AS n +INNER JOIN + folders f + ON n.folder_id = f.id +WHERE + f.workspace_id = $1 + AND n.trashed_at IS NULL -- :if $2 +` + +var _readGetNotesInWorkspaceDynQ = dynCompile(readGetNotesInWorkspace) + +type ReadGetNotesInWorkspaceParams struct { + WorkspaceID uuid.UUID + ExcludeTrash bool +} + +func (q *Queries) ReadGetNotesInWorkspace(ctx context.Context, arg ReadGetNotesInWorkspaceParams) ([]*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetNotesInWorkspace") + defer span.End() + dynQuery, dynArgs := _readGetNotesInWorkspaceDynQ.Build([]any{arg.WorkspaceID, arg.ExcludeTrash}) + rows, err := q.db.Query(ctx, dynQuery, dynArgs...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*Note + for rows.Next() { + var i Note + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.FolderID, + &i.Tags, + &i.Size, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const readGetTrashedNotesByWorkspaceID = `-- name: ReadGetTrashedNotesByWorkspaceID :many +SELECT + n.id, n.name, n.icon, n.folder_id, n.tags, n.size, n.created_at, n.updated_at, n.trashed_by, n.trashed_at +FROM + notes AS n +INNER JOIN + folders f + ON n.folder_id = f.id +WHERE + f.workspace_id = $1 + AND n.trashed_at IS NOT NULL +ORDER BY + n.trashed_at DESC +` + +func (q *Queries) ReadGetTrashedNotesByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) ([]*Note, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetTrashedNotesByWorkspaceID") + defer span.End() + rows, err := q.db.Query(ctx, readGetTrashedNotesByWorkspaceID, workspaceID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*Note + for rows.Next() { + var i Note + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Icon, + &i.FolderID, + &i.Tags, + &i.Size, + &i.CreatedAt, + &i.UpdatedAt, + &i.TrashedBy, + &i.TrashedAt, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/internal/note/infra/persistence/pgsqlc/temp_tables.sql.go b/internal/note/infra/persistence/pgsqlc/temp_tables.sql.go index c8f5de3c..856f1617 100644 --- a/internal/note/infra/persistence/pgsqlc/temp_tables.sql.go +++ b/internal/note/infra/persistence/pgsqlc/temp_tables.sql.go @@ -7,6 +7,8 @@ package pgsqlc import ( "context" + + "go.opentelemetry.io/otel" ) const createTempTableFolders = `-- name: CreateTempTableFolders :exec @@ -24,6 +26,8 @@ CREATE TEMP TABLE temp_folders ( ` func (q *Queries) CreateTempTableFolders(ctx context.Context) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "CreateTempTableFolders") + defer span.End() _, err := q.db.Exec(ctx, createTempTableFolders) return err } @@ -37,6 +41,8 @@ CREATE TEMP TABLE temp_note_links ( ` func (q *Queries) CreateTempTableNoteLinks(ctx context.Context) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "CreateTempTableNoteLinks") + defer span.End() _, err := q.db.Exec(ctx, createTempTableNoteLinks) return err } @@ -57,6 +63,8 @@ CREATE TEMP TABLE temp_notes ( ` func (q *Queries) CreateTempTableNotes(ctx context.Context) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "CreateTempTableNotes") + defer span.End() _, err := q.db.Exec(ctx, createTempTableNotes) return err } diff --git a/internal/note/infra/persistence/pgsqlc/workspace.sql b/internal/note/infra/persistence/pgsqlc/workspace.sql index c4374734..c4865617 100644 --- a/internal/note/infra/persistence/pgsqlc/workspace.sql +++ b/internal/note/infra/persistence/pgsqlc/workspace.sql @@ -1,15 +1,24 @@ --- name: GetWorkspace :one +-- name: GetWorkspaceByID :one SELECT * FROM workspaces WHERE - CASE - WHEN sqlc.narg('slug')::text IS NOT NULL THEN slug = sqlc.narg('slug') - WHEN sqlc.narg('id')::uuid IS NOT NULL THEN id = sqlc.narg('id') - ELSE FALSE - END - AND deleted_at IS NULL; + id = sqlc.arg('id')::uuid + AND deleted_at IS NULL +FOR UPDATE -- :if @for_update +; + +-- name: GetWorkspaceBySlug :one +SELECT + * +FROM + workspaces +WHERE + slug = sqlc.arg('slug')::text + AND deleted_at IS NULL +FOR UPDATE -- :if @for_update +; -- name: GetWorkspaceIDBySlug :one SELECT diff --git a/internal/note/infra/persistence/pgsqlc/workspace.sql.go b/internal/note/infra/persistence/pgsqlc/workspace.sql.go index 43d7cc4c..13140c28 100644 --- a/internal/note/infra/persistence/pgsqlc/workspace.sql.go +++ b/internal/note/infra/persistence/pgsqlc/workspace.sql.go @@ -10,6 +10,7 @@ import ( "time" "github.com/google/uuid" + "go.opentelemetry.io/otel" ) const checkSlugExists = `-- name: CheckSlugExists :one @@ -25,33 +26,72 @@ SELECT EXISTS( ` func (q *Queries) CheckSlugExists(ctx context.Context, slug string) (bool, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "CheckSlugExists") + defer span.End() row := q.db.QueryRow(ctx, checkSlugExists, slug) var exists bool err := row.Scan(&exists) return exists, err } -const getWorkspace = `-- name: GetWorkspace :one +const getWorkspaceByID = `-- name: GetWorkspaceByID :one SELECT id, slug, name, created_at, updated_at, deleted_at FROM workspaces WHERE - CASE - WHEN $1::text IS NOT NULL THEN slug = $1 - WHEN $2::uuid IS NOT NULL THEN id = $2 - ELSE FALSE - END + id = $1::uuid AND deleted_at IS NULL +FOR UPDATE -- :if $2 ` -type GetWorkspaceParams struct { - Slug *string - ID *uuid.UUID +var _getWorkspaceByIDDynQ = dynCompile(getWorkspaceByID) + +type GetWorkspaceByIDParams struct { + ID uuid.UUID + ForUpdate bool +} + +func (q *Queries) GetWorkspaceByID(ctx context.Context, arg GetWorkspaceByIDParams) (*Workspace, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetWorkspaceByID") + defer span.End() + dynQuery, dynArgs := _getWorkspaceByIDDynQ.Build([]any{arg.ID, arg.ForUpdate}) + row := q.db.QueryRow(ctx, dynQuery, dynArgs...) + var i Workspace + err := row.Scan( + &i.ID, + &i.Slug, + &i.Name, + &i.CreatedAt, + &i.UpdatedAt, + &i.DeletedAt, + ) + return &i, err +} + +const getWorkspaceBySlug = `-- name: GetWorkspaceBySlug :one +SELECT + id, slug, name, created_at, updated_at, deleted_at +FROM + workspaces +WHERE + slug = $1::text + AND deleted_at IS NULL +FOR UPDATE -- :if $2 +` + +var _getWorkspaceBySlugDynQ = dynCompile(getWorkspaceBySlug) + +type GetWorkspaceBySlugParams struct { + Slug string + ForUpdate bool } -func (q *Queries) GetWorkspace(ctx context.Context, arg *GetWorkspaceParams) (*Workspace, error) { - row := q.db.QueryRow(ctx, getWorkspace, arg.Slug, arg.ID) +func (q *Queries) GetWorkspaceBySlug(ctx context.Context, arg GetWorkspaceBySlugParams) (*Workspace, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetWorkspaceBySlug") + defer span.End() + dynQuery, dynArgs := _getWorkspaceBySlugDynQ.Build([]any{arg.Slug, arg.ForUpdate}) + row := q.db.QueryRow(ctx, dynQuery, dynArgs...) var i Workspace err := row.Scan( &i.ID, @@ -75,6 +115,8 @@ WHERE ` func (q *Queries) GetWorkspaceIDBySlug(ctx context.Context, slug string) (uuid.UUID, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "GetWorkspaceIDBySlug") + defer span.End() row := q.db.QueryRow(ctx, getWorkspaceIDBySlug, slug) var id uuid.UUID err := row.Scan(&id) @@ -115,6 +157,8 @@ type SaveWorkspaceParams struct { } func (q *Queries) SaveWorkspace(ctx context.Context, arg *SaveWorkspaceParams) error { + ctx, span := otel.Tracer("Queries").Start(ctx, "SaveWorkspace") + defer span.End() _, err := q.db.Exec(ctx, saveWorkspace, arg.ID, arg.Slug, diff --git a/internal/note/infra/persistence/pgsqlc/workspace_read.sql b/internal/note/infra/persistence/pgsqlc/workspace_read.sql new file mode 100644 index 00000000..4bb91c4e --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/workspace_read.sql @@ -0,0 +1,19 @@ +-- name: ReadGetWorkspaceBySlug :one +SELECT + * +FROM + workspaces +WHERE + slug = sqlc.arg('slug')::text + AND deleted_at IS NULL; + +-- name: ReadCheckSlugExists :one +SELECT EXISTS( + SELECT + 1 + FROM + workspaces + WHERE + slug = sqlc.arg('slug')::text + AND deleted_at IS NULL +) AS exists; diff --git a/internal/note/infra/persistence/pgsqlc/workspace_read.sql.go b/internal/note/infra/persistence/pgsqlc/workspace_read.sql.go new file mode 100644 index 00000000..8652e8cd --- /dev/null +++ b/internal/note/infra/persistence/pgsqlc/workspace_read.sql.go @@ -0,0 +1,59 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: workspace_read.sql + +package pgsqlc + +import ( + "context" + + "go.opentelemetry.io/otel" +) + +const readCheckSlugExists = `-- name: ReadCheckSlugExists :one +SELECT EXISTS( + SELECT + 1 + FROM + workspaces + WHERE + slug = $1::text + AND deleted_at IS NULL +) AS exists +` + +func (q *Queries) ReadCheckSlugExists(ctx context.Context, slug string) (bool, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadCheckSlugExists") + defer span.End() + row := q.db.QueryRow(ctx, readCheckSlugExists, slug) + var exists bool + err := row.Scan(&exists) + return exists, err +} + +const readGetWorkspaceBySlug = `-- name: ReadGetWorkspaceBySlug :one +SELECT + id, slug, name, created_at, updated_at, deleted_at +FROM + workspaces +WHERE + slug = $1::text + AND deleted_at IS NULL +` + +func (q *Queries) ReadGetWorkspaceBySlug(ctx context.Context, slug string) (*Workspace, error) { + ctx, span := otel.Tracer("Queries").Start(ctx, "ReadGetWorkspaceBySlug") + defer span.End() + row := q.db.QueryRow(ctx, readGetWorkspaceBySlug, slug) + var i Workspace + err := row.Scan( + &i.ID, + &i.Slug, + &i.Name, + &i.CreatedAt, + &i.UpdatedAt, + &i.DeletedAt, + ) + return &i, err +} diff --git a/internal/note/infra/persistence/wire.go b/internal/note/infra/persistence/wire.go index a4483515..496101fb 100644 --- a/internal/note/infra/persistence/wire.go +++ b/internal/note/infra/persistence/wire.go @@ -1,46 +1,54 @@ package persistence import ( - "database/sql" - - "github.com/go-jet/jet/v2/qrm" "github.com/goforj/wire" "github.com/jackc/pgx/v5/pgxpool" "github.com/notopia-uit/notopia/internal/note/app" "github.com/notopia-uit/notopia/internal/note/domain" - "github.com/notopia-uit/notopia/internal/note/infra/persistence/pg" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgreadmodel" + "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgrepo" "github.com/notopia-uit/notopia/internal/note/infra/persistence/pgsqlc" ) -var PostresReadModelProviderSet = wire.NewSet( - pg.ProvideReadModel, - wire.Bind(new(app.CheckWorkspaceSlugExistsReadModel), new(*pg.ReadModel)), - wire.Bind(new(app.GetNoteGraphReadModel), new(*pg.ReadModel)), - wire.Bind(new(app.GetNoteLinksReadModel), new(*pg.ReadModel)), - wire.Bind(new(app.GetNoteReadModel), new(*pg.ReadModel)), - wire.Bind(new(app.GetWorkspaceBySlugReadModel), new(*pg.ReadModel)), - wire.Bind(new(app.GetWorkspaceGraphReadModel), new(*pg.ReadModel)), - wire.Bind(new(app.GetWorkspaceTreeReadModel), new(*pg.ReadModel)), - wire.Bind(new(app.ShowTrashReadModel), new(*pg.ReadModel)), +var PGReadModelProviderSet = wire.NewSet( + pgreadmodel.ProvideCheckWorkspaceSlugExists, + pgreadmodel.ProvideNote, + pgreadmodel.ProvideNoteGraph, + pgreadmodel.ProvideNoteLinks, + pgreadmodel.ProvideShowTrash, + pgreadmodel.ProvideWorkspaceBySlug, + pgreadmodel.ProvideWorkspaceGraph, + pgreadmodel.ProvideWorkspaceTree, + wire.Bind(new(app.CheckWorkspaceSlugExistsReadModel), new(*pgreadmodel.CheckWorkspaceSlugExists)), + wire.Bind(new(app.GetNoteGraphReadModel), new(*pgreadmodel.NoteGraph)), + wire.Bind(new(app.GetNoteLinksReadModel), new(*pgreadmodel.NoteLinks)), + wire.Bind(new(app.GetNoteReadModel), new(*pgreadmodel.Note)), + wire.Bind(new(app.GetWorkspaceGraphReadModel), new(*pgreadmodel.WorkspaceGraph)), + wire.Bind(new(app.GetWorkspaceTreeReadModel), new(*pgreadmodel.WorkspaceTree)), + wire.Bind(new(app.ShowTrashReadModel), new(*pgreadmodel.ShowTrash)), + wire.Bind(new(app.WorkspaceBySlugReadModel), new(*pgreadmodel.WorkspaceBySlug)), +) + +var PGRepoProviderSet = wire.NewSet( + pgrepo.ProvideFolder, + pgrepo.ProvideNote, + pgrepo.ProvideUnitOfWork, + pgrepo.ProvideWorkspace, + pgrepo.ProvideRunInTx, + wire.Bind(new(domain.FolderRepo), new(*pgrepo.Folder)), + wire.Bind(new(domain.NoteRepo), new(*pgrepo.Note)), + wire.Bind(new(domain.UnitOfWork), new(*pgrepo.UnitOfWork)), + wire.Bind(new(domain.WorkspaceRepo), new(*pgrepo.Workspace)), ) var PostgresProviderSet = wire.NewSet( - PostresReadModelProviderSet, + PGRepoProviderSet, + PGReadModelProviderSet, ProvideGooseProvider, ProvidePg, - pg.ProvideFolder, - pg.ProvideNote, - pg.ProvidePgPool, - pg.ProvideQueries, - pg.ProvideStdlib, - pg.ProvideUnitOfWork, - pg.ProvideWorkspace, - wire.Bind(new(app.Persistence), new(*Pg)), - wire.Bind(new(domain.FolderRepo), new(*pg.Folder)), - wire.Bind(new(domain.NoteRepo), new(*pg.Note)), - wire.Bind(new(domain.UnitOfWork), new(*pg.UnitOfWork)), - wire.Bind(new(domain.WorkspaceRepo), new(*pg.Workspace)), + ProvidePgPool, + ProvideSQLCQueries, + ProvidePgxPoolStdlib, wire.Bind(new(pgsqlc.DBTX), new(*pgxpool.Pool)), - wire.Bind(new(qrm.DB), new(*sql.DB)), ) diff --git a/internal/note/infra/pubsub/common.go b/internal/note/infra/pubsub/common.go deleted file mode 100644 index 5bb67ef6..00000000 --- a/internal/note/infra/pubsub/common.go +++ /dev/null @@ -1,48 +0,0 @@ -package pubsub - -import ( - "fmt" - "log/slog" - - "github.com/ThreeDotsLabs/watermill" - "github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka" - "github.com/ThreeDotsLabs/watermill/components/cqrs" - "github.com/ThreeDotsLabs/watermill/message" - commonconfig "github.com/notopia-uit/notopia/pkg/common/config" -) - -func NewWatermillLogger(logger *slog.Logger) watermill.LoggerAdapter { - return watermill.NewSlogLogger(logger) -} - -var ProvideWatermillLogger = NewWatermillLogger - -func NewIntegrationMarshaler() cqrs.CommandEventMarshaler { - return cqrs.JSONMarshaler{} -} - -var ProvideIntegrationMarshaler = NewIntegrationMarshaler - -type KafkaPublisher struct { - message.Publisher -} - -func NewKafkaPublisher( - cfg commonconfig.Kafka, - logger watermill.LoggerAdapter, - tracer kafka.SaramaTracer, -) (*KafkaPublisher, error) { - publisher, err := kafka.NewPublisher( - kafka.PublisherConfig{ - Brokers: cfg.Brokers, - Tracer: tracer, - }, - logger, - ) - if err != nil { - return nil, fmt.Errorf("failed to create Kafka publisher: %w", err) - } - return &KafkaPublisher{Publisher: publisher}, nil -} - -var ProvideKafkaPublisher = NewKafkaPublisher diff --git a/internal/note/infra/pubsub/integration.go b/internal/note/infra/pubsub/integration.go deleted file mode 100644 index 7a12a74c..00000000 --- a/internal/note/infra/pubsub/integration.go +++ /dev/null @@ -1,73 +0,0 @@ -package pubsub - -import ( - "fmt" - - "github.com/ThreeDotsLabs/watermill" - "github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka" - "github.com/ThreeDotsLabs/watermill/components/cqrs" - "github.com/ThreeDotsLabs/watermill/message" - "github.com/ThreeDotsLabs/watermill/message/router/middleware" - "github.com/notopia-uit/notopia/internal/note/app" - commonconfig "github.com/notopia-uit/notopia/pkg/common/config" -) - -func NewIntegrationPubSub( - cfg *commonconfig.Kafka, - logger watermill.LoggerAdapter, - publisher *KafkaPublisher, - tracer kafka.SaramaTracer, - marshaler cqrs.CommandEventMarshaler, -) (*app.IntegrationPubSub, error) { - router, err := message.NewRouter(message.RouterConfig{}, logger) - if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) - } - router.AddMiddleware( - middleware.CorrelationID, - middleware.Recoverer, - ) - - eventBus, err := cqrs.NewEventBusWithConfig(publisher, cqrs.EventBusConfig{ - GeneratePublishTopic: func(params cqrs.GenerateEventPublishTopicParams) (string, error) { - return "events.integration." + params.EventName, nil - }, - Marshaler: marshaler, - Logger: logger, - }) - if err != nil { - return nil, fmt.Errorf("failed to create event bus: %w", err) - } - - eventProcessor, err := cqrs.NewEventProcessorWithConfig( - router, - cqrs.EventProcessorConfig{ - GenerateSubscribeTopic: func(params cqrs.EventProcessorGenerateSubscribeTopicParams) (string, error) { - return "events.integration." + params.EventName, nil - }, - SubscriberConstructor: func(params cqrs.EventProcessorSubscriberConstructorParams) (message.Subscriber, error) { - return kafka.NewSubscriber( - kafka.SubscriberConfig{ - Brokers: cfg.Brokers, - ConsumerGroup: cfg.ConsumerGroup + "." + params.HandlerName, - Tracer: tracer, - }, - logger, - ) - }, - Marshaler: marshaler, - Logger: logger, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create event processor: %w", err) - } - - return app.NewIntegrationPubSub( - eventBus, - eventProcessor, - router, - ), nil -} - -var ProvideIntegrationPubSub = NewIntegrationPubSub diff --git a/internal/note/infra/pubsub/outbox.go b/internal/note/infra/pubsub/outbox.go deleted file mode 100644 index bdd8147f..00000000 --- a/internal/note/infra/pubsub/outbox.go +++ /dev/null @@ -1 +0,0 @@ -package pubsub diff --git a/internal/note/infra/pubsub/wire.go b/internal/note/infra/pubsub/wire.go deleted file mode 100644 index b80f705e..00000000 --- a/internal/note/infra/pubsub/wire.go +++ /dev/null @@ -1,18 +0,0 @@ -package pubsub - -import ( - "github.com/goforj/wire" - "github.com/notopia-uit/notopia/internal/note/app" -) - -var ProviderSet = wire.NewSet( - ProvideIntegrationMarshaler, - ProvideIntegrationPubSub, - ProvideKafkaPublisher, - ProvideRedisClient, - ProvideWatermillLogger, - ProvideWorkspaceEvent, - ProvideWorkspaceEventHubPubSub, - ProvideWorkspaceEventInternalPubSub, - wire.Bind(new(app.WorkspaceEventPubSub), new(*WorkspaceEvent)), -) diff --git a/internal/note/infra/pubsub/workspaceevent.go b/internal/note/infra/pubsub/workspaceevent.go deleted file mode 100644 index 5d9a0be8..00000000 --- a/internal/note/infra/pubsub/workspaceevent.go +++ /dev/null @@ -1,264 +0,0 @@ -package pubsub - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "log/slog" - "time" - - "github.com/ThreeDotsLabs/watermill" - "github.com/ThreeDotsLabs/watermill-redisstream/pkg/redisstream" - "github.com/ThreeDotsLabs/watermill/components/cqrs" - "github.com/ThreeDotsLabs/watermill/message" - "github.com/ThreeDotsLabs/watermill/message/router/middleware" - "github.com/ThreeDotsLabs/watermill/pubsub/gochannel" - "github.com/google/uuid" - "github.com/notopia-uit/notopia/internal/note/app" - "github.com/notopia-uit/notopia/internal/note/errs" - "github.com/redis/go-redis/v9" -) - -const ( - MetadataWorkspaceIDKey = "workspaceId" - metadataUserIDKey = "userId" - metadataEventTypeKey = "eventType" -) - -type WorkspaceEventInternalPubSub struct { - router *message.Router - publisher message.Publisher - subscriber message.Subscriber - topic string - redisClient *RedisClient -} - -// TODO: If have time, try https://github.com/stong1994/watermill-rediszset, because we only need pubsub, not stream -// This would reduce memory overhead and be more efficient for ephemeral workspace events. -func NewWorkspaceEventInternalPubSub( - logger watermill.LoggerAdapter, - marshaler cqrs.CommandEventMarshaler, - redisClient *RedisClient, -) (*WorkspaceEventInternalPubSub, error) { - topic := "events:workspaces" - publisher, err := redisstream.NewPublisher(redisstream.PublisherConfig{ - Client: (*redis.Client)(redisClient), - DefaultMaxlen: 10000, - }, logger) - if err != nil { - return nil, fmt.Errorf("failed to create Redis publisher: %w", err) - } - subscriber, err := redisstream.NewSubscriber(redisstream.SubscriberConfig{ - Client: (*redis.Client)(redisClient), - FanOutOldestId: "$", - DisableIndefiniteInitialBlock: true, - BlockTime: 2 * time.Second, - }, logger, - ) - if err != nil { - return nil, fmt.Errorf("failed to create Redis subscriber: %w", err) - } - - router, err := message.NewRouter(message.RouterConfig{}, logger) - if err != nil { - return nil, fmt.Errorf("failed to create internal message router: %w", err) - } - router.AddMiddleware(middleware.CorrelationID, middleware.Recoverer) - - return &WorkspaceEventInternalPubSub{ - router: router, - publisher: publisher, - subscriber: subscriber, - redisClient: redisClient, - topic: topic, - }, nil -} - -var ProvideWorkspaceEventInternalPubSub = NewWorkspaceEventInternalPubSub - -type WorkspaceEventHubPubSub struct { - pubSub *gochannel.GoChannel -} - -func NewWorkspaceEventHubPubSub( - logger watermill.LoggerAdapter, -) *WorkspaceEventHubPubSub { - pubSub := gochannel.NewGoChannel( - gochannel.Config{ - OutputChannelBuffer: 100, - }, - logger, - ) - return &WorkspaceEventHubPubSub{ - pubSub: pubSub, - } -} - -var ProvideWorkspaceEventHubPubSub = NewWorkspaceEventHubPubSub - -type WorkspaceEvent struct { - internalPubSub *WorkspaceEventInternalPubSub - hubPubSub *WorkspaceEventHubPubSub -} - -var _ app.WorkspaceEventPubSub = (*WorkspaceEvent)(nil) - -func NewWorkspaceEvent( - internalPubSub *WorkspaceEventInternalPubSub, - hubPubSub *WorkspaceEventHubPubSub, -) *WorkspaceEvent { - internalPubSub.router.AddConsumerHandler( - "handler", - internalPubSub.topic, - internalPubSub.subscriber, - func(msg *message.Message) error { - workspaceID := msg.Metadata.Get(MetadataWorkspaceIDKey) - return hubPubSub.pubSub.Publish(workspaceID, msg.Copy()) - }, - ) - return &WorkspaceEvent{ - internalPubSub: internalPubSub, - hubPubSub: hubPubSub, - } -} - -var ProvideWorkspaceEvent = NewWorkspaceEvent - -func (w *WorkspaceEvent) Publish(ctx context.Context, workspaceID uuid.UUID, userID string, events ...app.WorkspaceEvent) errs.Error { - msgs := make([]*message.Message, 0, len(events)) - for _, event := range events { - payload, err := json.Marshal(event) - if err != nil { - return errs.NewWorkspaceEventPubSubFailedToCreateMessage( - userID, - workspaceID, - err, - ) - } - msg := message.NewMessage(watermill.NewUUID(), payload) - msg.Metadata.Set(MetadataWorkspaceIDKey, fmt.Sprintf("%v", workspaceID)) - msg.Metadata.Set(metadataUserIDKey, userID) - msg.Metadata.Set(metadataEventTypeKey, event.GetEvent()) - msg.SetContext(ctx) - msgs = append(msgs, msg) - } - err := w.internalPubSub.publisher.Publish(w.internalPubSub.topic, msgs...) - if err != nil { - return errs.NewWorkspaceEventPubSubPublishFailed( - userID, - workspaceID, - err, - ) - } - return nil -} - -func (w *WorkspaceEvent) Subscribe( - ctx context.Context, - workspaceID uuid.UUID, - userID string, -) (<-chan app.WorkspaceEvent, errs.Error) { - eventCh := make(chan app.WorkspaceEvent, 10) - - msgCh, err := w.hubPubSub.pubSub.Subscribe(ctx, fmt.Sprintf("%v", workspaceID)) - if err != nil { - return nil, errs.NewWorkspaceEventPubSubSubscribeFailed( - userID, - workspaceID, - err, - ) - } - - go func() { - defer close(eventCh) - - for { - select { - case <-ctx.Done(): - return - case msg, ok := <-msgCh: - if !ok { - return - } - if msg.Metadata.Get(metadataUserIDKey) == userID { - msg.Ack() - continue - } - eventType := msg.Metadata.Get(metadataEventTypeKey) - if eventType == "" { - slog.ErrorContext( - ctx, "missing event type in message metadata", - slog.String("workspace_id", workspaceID.String()), - slog.String("user_id", userID), - ) - msg.Ack() - continue - } - event, ok := app.NewEmptyWorkspaceEventFromType(eventType) - if !ok { - slog.ErrorContext( - ctx, "unknown event type in message metadata", - slog.String("event_type", eventType), - slog.String("workspace_id", workspaceID.String()), - slog.String("user_id", userID), - ) - msg.Ack() - continue - } - if err := json.Unmarshal(msg.Payload, event); err != nil { - slog.ErrorContext(ctx, "failed to unmarshal event", slog.Any("error", err)) - msg.Ack() - continue - } - select { - case eventCh <- event: - msg.Ack() - case <-ctx.Done(): - return - default: - slog.WarnContext(ctx, "dropping event", slog.String("workspace_id", workspaceID.String()), slog.String("user_id", userID)) - msg.Ack() - } - } - } - }() - - return eventCh, nil -} - -func (w *WorkspaceEvent) Run(ctx context.Context) error { - return w.internalPubSub.router.Run(ctx) -} - -func (w *WorkspaceEvent) Close() error { - var errs []error - - if err := w.internalPubSub.router.Close(); err != nil { - errs = append(errs, err) - } - - if err := w.internalPubSub.publisher.Close(); err != nil { - errs = append(errs, err) - } - - if err := w.internalPubSub.subscriber.Close(); err != nil { - errs = append(errs, err) - } - - if err := w.hubPubSub.pubSub.Close(); err != nil { - errs = append(errs, err) - } - - if len(errs) > 0 { - return errors.Join(append([]error{fmt.Errorf("failed to close workspace event pubsub")}, errs...)...) - } - return nil -} - -func (w *WorkspaceEvent) Check(ctx context.Context) error { - if statusCmd := w.internalPubSub.redisClient.Ping(ctx); statusCmd.Err() != nil { - return fmt.Errorf("failed to ping Redis: %w", statusCmd.Err()) - } - return nil -} diff --git a/internal/note/infra/service/authorization.go b/internal/note/infra/service/authorization.go index e213719e..2685a41b 100644 --- a/internal/note/infra/service/authorization.go +++ b/internal/note/infra/service/authorization.go @@ -16,11 +16,6 @@ import ( "google.golang.org/grpc/credentials/insecure" ) -func toAuthorizationServiceError(err error) errs.Error { - // NOTE: Lazy to convert all possible errors - return errs.NewAuthorizationInternal(err) -} - func authorizationUnaryClientErrorInterceptor() grpc.UnaryClientInterceptor { return func( ctx context.Context, @@ -31,7 +26,10 @@ func authorizationUnaryClientErrorInterceptor() grpc.UnaryClientInterceptor { opts ...grpc.CallOption, ) error { err := invoker(ctx, method, req, reply, cc, opts...) - return toAuthorizationServiceError(err) + if err != nil { + return errs.NewAuthorizationInternal(err) + } + return nil } } @@ -72,26 +70,26 @@ func NewAuthorization( var ProvideAuthorization = NewAuthorization -func (a *Authorization) HasWorkspacePermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspacePermission) (bool, errs.Error) { +func (a *Authorization) HasWorkspacePermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspacePermission) (bool, error) { return false, errs.NewUnimplemented() } -func (a *Authorization) HasWorkspaceItemPermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspaceItemPermission) (bool, errs.Error) { +func (a *Authorization) HasWorkspaceItemPermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspaceItemPermission) (bool, error) { return false, errs.NewUnimplemented() } -func (a *Authorization) HasWorkspaceNotePermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspaceItemPermission) (bool, errs.Error) { +func (a *Authorization) HasWorkspaceNotePermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspaceItemPermission) (bool, error) { return false, errs.NewUnimplemented() } -func (a *Authorization) HasWorkspaceFolderPermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspaceItemPermission) (bool, errs.Error) { +func (a *Authorization) HasWorkspaceFolderPermission(ctx context.Context, userID string, workspaceID uuid.UUID, permission app.WorkspaceItemPermission) (bool, error) { return false, errs.NewUnimplemented() } -func (a *Authorization) CreateWorkspaceWithOwnership(ctx context.Context, userID string, workspaceID uuid.UUID, ownerID uuid.UUID) errs.Error { +func (a *Authorization) CreateWorkspaceWithOwnership(ctx context.Context, ownerID string, workspaceID uuid.UUID) error { return errs.NewUnimplemented() } -func (a *Authorization) GetWorkspaceMembers(ctx context.Context, userID string, workspaceID uuid.UUID) ([]*app.WorkspaceMemberInfo, errs.Error) { +func (a *Authorization) GetWorkspaceMembers(ctx context.Context, userID string, workspaceID uuid.UUID) ([]*app.WorkspaceMemberInfo, error) { return nil, errs.NewUnimplemented() } diff --git a/internal/note/infra/wire.go b/internal/note/infra/wire.go index ddd5700d..36d28983 100644 --- a/internal/note/infra/wire.go +++ b/internal/note/infra/wire.go @@ -1,14 +1,24 @@ package infra import ( + "github.com/ThreeDotsLabs/watermill-sql/v4/pkg/sql" "github.com/goforj/wire" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/notopia-uit/notopia/internal/note/infra/common" + "github.com/notopia-uit/notopia/internal/note/infra/integrationpublisher" + "github.com/notopia-uit/notopia/internal/note/infra/outbox" "github.com/notopia-uit/notopia/internal/note/infra/persistence" - "github.com/notopia-uit/notopia/internal/note/infra/pubsub" "github.com/notopia-uit/notopia/internal/note/infra/service" + "github.com/notopia-uit/notopia/internal/note/infra/workspaceevent" ) var ProviderSet = wire.NewSet( - service.ProviderSet, + wire.Bind(new(sql.Conn), new(*pgxpool.Pool)), + + common.ProviderSet, + integrationpublisher.ProviderSet, + outbox.ProviderSet, persistence.PostgresProviderSet, - pubsub.ProviderSet, + service.ProviderSet, + workspaceevent.ProviderSet, ) diff --git a/internal/note/infra/pubsub/redis.go b/internal/note/infra/workspaceevent/redis.go similarity index 83% rename from internal/note/infra/pubsub/redis.go rename to internal/note/infra/workspaceevent/redis.go index 59b3c430..150ffd27 100644 --- a/internal/note/infra/pubsub/redis.go +++ b/internal/note/infra/workspaceevent/redis.go @@ -1,4 +1,4 @@ -package pubsub +package workspaceevent import ( "context" @@ -8,7 +8,9 @@ import ( "github.com/redis/go-redis/v9" ) -type RedisClient redis.Client +type RedisClient struct { + *redis.Client +} func NewRedisClient( ctx context.Context, @@ -24,7 +26,7 @@ func NewRedisClient( logger.Error("failed to shutdown Redis client", slog.Any("error", err)) } } - return (*RedisClient)(client), cleanup + return &RedisClient{client}, cleanup } var ProvideRedisClient = NewRedisClient diff --git a/internal/note/infra/workspaceevent/wire.go b/internal/note/infra/workspaceevent/wire.go new file mode 100644 index 00000000..1537313d --- /dev/null +++ b/internal/note/infra/workspaceevent/wire.go @@ -0,0 +1,14 @@ +package workspaceevent + +import ( + "github.com/goforj/wire" + "github.com/notopia-uit/notopia/internal/note/app" +) + +var ProviderSet = wire.NewSet( + ProvideRedisClient, + ProvideWorkspaceEventHub, + wire.Bind(new(app.WorkspaceEventPublisher), new(*WorkspaceEventHub)), + wire.Bind(new(app.WorkspaceEventSubscriber), new(*WorkspaceEventHub)), + wire.Bind(new(app.WorkspaceEventHub), new(*WorkspaceEventHub)), +) diff --git a/internal/note/infra/workspaceevent/workspaceevent.go b/internal/note/infra/workspaceevent/workspaceevent.go new file mode 100644 index 00000000..fb82063b --- /dev/null +++ b/internal/note/infra/workspaceevent/workspaceevent.go @@ -0,0 +1,272 @@ +package workspaceevent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "time" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill-redisstream/pkg/redisstream" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/ThreeDotsLabs/watermill/message/router/middleware" + "github.com/ThreeDotsLabs/watermill/pubsub/gochannel" + "github.com/google/uuid" + "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/config" + "github.com/notopia-uit/notopia/internal/note/errs" +) + +type WorkspaceEventHub struct { + redisPublisher message.Publisher + redisSubscriber message.Subscriber + internalPubSub *gochannel.GoChannel + router *message.Router + + topic string + MetadataWorkspaceIDKey string + metadataUserIDKey string + metadataEventTypeKey string +} + +var _ app.WorkspaceEventHub = (*WorkspaceEventHub)(nil) + +func NewWorkspaceEventHub( + workspaceEventCfg *config.WorkspaceEvent, + logger watermill.LoggerAdapter, + redisClient *RedisClient, +) (*WorkspaceEventHub, error) { + // TODO: If have time, try https://github.com/stong1994/watermill-rediszset, because we only need pubsub, not stream + // This would reduce memory overhead and be more efficient for ephemeral workspace events. + // Single Redis subscriber for ALL workspace events + publisher, err := redisstream.NewPublisher( + redisstream.PublisherConfig{ + Client: redisClient, + DefaultMaxlen: 10000, + }, + logger, + ) + if err != nil { + return nil, fmt.Errorf("failed to create workspace event hub redis publisher: %w", err) + } + + subscriber, err := redisstream.NewSubscriber( + redisstream.SubscriberConfig{ + Client: redisClient, + FanOutOldestId: "$", + DisableIndefiniteInitialBlock: true, + BlockTime: 2 * time.Second, + }, + logger, + ) + if err != nil { + return nil, fmt.Errorf("failed to create workspace event hub redis subscriber: %w", err) + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, fmt.Errorf("failed to create workspace event hub router: %w", err) + } + router.AddMiddleware(middleware.CorrelationID, middleware.Recoverer) + + return &WorkspaceEventHub{ + redisPublisher: publisher, + redisSubscriber: subscriber, + internalPubSub: gochannel.NewGoChannel(gochannel.Config{OutputChannelBuffer: 100}, logger), + router: router, + + topic: workspaceEventCfg.MessageGeneralTopic, + metadataUserIDKey: workspaceEventCfg.MessageMetadataUserIDKey, + metadataEventTypeKey: workspaceEventCfg.MessageMetadataEventTypeKey, + MetadataWorkspaceIDKey: workspaceEventCfg.MessageMetadataWorkspaceIDKey, + }, nil +} + +var ProvideWorkspaceEventHub = NewWorkspaceEventHub + +func (h *WorkspaceEventHub) setupRouting() { + h.router.AddConsumerHandler( + "workspace-router", + h.topic, + h.redisSubscriber, + func(msg *message.Message) error { + workspaceID := msg.Metadata.Get(h.MetadataWorkspaceIDKey) + if workspaceID == "" { + slog.ErrorContext(msg.Context(), "missing workspace ID in message metadata") + return nil + } + + internalTopic := fmt.Sprintf("workspace:%s", workspaceID) + if err := h.internalPubSub.Publish(internalTopic, msg.Copy()); err != nil { + slog.ErrorContext(msg.Context(), "failed to publish to internal topic", slog.Any("error", err)) + } + + return nil + }, + ) +} + +func (h *WorkspaceEventHub) Publish( + ctx context.Context, + workspaceID uuid.UUID, + userID string, + events ...app.WorkspaceEvent, +) error { + if len(events) == 0 { + return nil + } + + msgs := make([]*message.Message, 0, len(events)) + + for _, event := range events { + payload, err := json.Marshal(event) + if err != nil { + return errs.NewWorkspaceEventPubSubFailedToCreateMessage( + userID, + workspaceID, + err, + ) + } + + msg := message.NewMessage(watermill.NewUUID(), payload) + msg.SetContext(ctx) + msg.Metadata.Set(h.MetadataWorkspaceIDKey, workspaceID.String()) + msg.Metadata.Set(h.metadataUserIDKey, userID) + msg.Metadata.Set(h.metadataEventTypeKey, event.GetEvent()) + msgs = append(msgs, msg) + } + + if err := h.redisPublisher.Publish(h.topic, msgs...); err != nil { + return errs.NewWorkspaceEventPubSubPublishFailed( + userID, + workspaceID, + err, + ) + } + + return nil +} + +// FIXME: This should check if a user have permission to subcribe to +// But, currently the logic is under infra, I will somehow bring partially up to app layer +func (h *WorkspaceEventHub) Subscribe( + ctx context.Context, + workspaceID uuid.UUID, + userID string, +) (<-chan app.WorkspaceEvent, error) { + eventCh := make(chan app.WorkspaceEvent, 10) + workspaceKey := fmt.Sprintf("workspace:%s", workspaceID) + + msgCh, err := h.internalPubSub.Subscribe(ctx, workspaceKey) + if err != nil { + return nil, errs.NewWorkspaceEventPubSubSubscribeFailed(userID, workspaceID, err) + } + + go func() { + defer close(eventCh) + for { + select { + case <-ctx.Done(): + return + case msg, ok := <-msgCh: + if !ok { + return + } + h.processMessage(ctx, msg, eventCh, userID, workspaceID) + } + } + }() + + return eventCh, nil +} + +func (h *WorkspaceEventHub) processMessage( + ctx context.Context, + msg *message.Message, + eventCh chan app.WorkspaceEvent, + userID string, + workspaceID uuid.UUID, +) { + // Skip own events + if msg.Metadata.Get(h.metadataUserIDKey) == userID { + msg.Ack() + return + } + + eventType := msg.Metadata.Get(h.metadataEventTypeKey) + if eventType == "" { + slog.ErrorContext(ctx, "missing event type in message metadata", + slog.String("workspace_id", workspaceID.String()), + slog.String("user_id", userID), + ) + msg.Ack() + return + } + + event, ok := app.NewEmptyWorkspaceEventFromType(eventType) + if !ok { + slog.ErrorContext(ctx, "unknown event type", + slog.String("event_type", eventType), + slog.String("workspace_id", workspaceID.String()), + slog.String("user_id", userID), + ) + msg.Ack() + return + } + + if err := json.Unmarshal(msg.Payload, event); err != nil { + slog.ErrorContext(ctx, "failed to unmarshal event", + slog.Any("error", err), + slog.String("workspace_id", workspaceID.String()), + ) + msg.Ack() + return + } + + select { + case eventCh <- event: + msg.Ack() + case <-ctx.Done(): + return + default: + slog.WarnContext(ctx, "dropping event (subscriber buffer full)", + slog.String("workspace_id", workspaceID.String()), + slog.String("user_id", userID), + slog.String("event_type", eventType), + ) + msg.Ack() + } +} + +func (h *WorkspaceEventHub) Run(ctx context.Context) error { + h.setupRouting() + return h.router.Run(ctx) +} + +func (h *WorkspaceEventHub) Close() error { + var errs []error + + if err := h.router.Close(); err != nil { + errs = append(errs, fmt.Errorf("router close: %w", err)) + } + + if err := h.redisPublisher.Close(); err != nil { + errs = append(errs, fmt.Errorf("publisher close: %w", err)) + } + + // NOTE: actually we don't need to close the Redis subscriber and the go pubsub because router already closed them + if err := h.redisSubscriber.Close(); err != nil { + errs = append(errs, fmt.Errorf("subscriber close: %w", err)) + } + + if err := h.internalPubSub.Close(); err != nil { + errs = append(errs, fmt.Errorf("internal pubsub close: %w", err)) + } + + if len(errs) > 0 { + return fmt.Errorf("failed to close workspace event hub: %v", errors.Join(errs...)) + } + return nil +} diff --git a/internal/note/server.go b/internal/note/server.go index 08dcf88f..600593ce 100644 --- a/internal/note/server.go +++ b/internal/note/server.go @@ -2,50 +2,60 @@ package note import ( "context" + "fmt" "log/slog" - "github.com/notopia-uit/notopia/internal/note/app" + "github.com/notopia-uit/notopia/internal/note/controller/event" "github.com/notopia-uit/notopia/internal/note/controller/grpc" "github.com/notopia-uit/notopia/internal/note/controller/health" "github.com/notopia-uit/notopia/internal/note/controller/http" - "github.com/notopia-uit/notopia/internal/note/controller/integrationevent" + "github.com/notopia-uit/notopia/internal/note/infra/outbox" + "github.com/notopia-uit/notopia/internal/note/infra/persistence" + "github.com/notopia-uit/notopia/internal/note/infra/workspaceevent" "github.com/notopia-uit/notopia/pkg/otel" "golang.org/x/sync/errgroup" ) type Server struct { - http *http.HTTP - grpc *grpc.GRPC - integrationevent *integrationevent.IntegrationEvent - health *health.Health - app *app.Server - logger *slog.Logger + persistence *persistence.Pg + http *http.HTTP + grpc *grpc.GRPC + event *event.Event + workspaceEventHub *workspaceevent.WorkspaceEventHub + outbox *outbox.Outbox + health *health.Health + logger *slog.Logger } +// TODO: we have to start the workspace event also func NewServer( + persistence *persistence.Pg, http *http.HTTP, grpc *grpc.GRPC, - integrationevent *integrationevent.IntegrationEvent, + event *event.Event, + workspaceEventHub *workspaceevent.WorkspaceEventHub, + outbox *outbox.Outbox, health *health.Health, - app *app.Server, logger *slog.Logger, - globalOtel otel.Global, + globalOtel otel.Global, // This have to be here for deps ) *Server { slog.SetDefault(logger) return &Server{ - http: http, - grpc: grpc, - integrationevent: integrationevent, - health: health, - app: app, - logger: logger, + persistence: persistence, + http: http, + grpc: grpc, + event: event, + workspaceEventHub: workspaceEventHub, + outbox: outbox, + health: health, + logger: logger, } } func (s *Server) Run(ctx context.Context) error { - if err := s.app.Start(ctx); err != nil { - return err + if err := s.persistence.RunMigrations(ctx); err != nil { + return fmt.Errorf("failed to run migrations: %w", err) } g, ctx := errgroup.WithContext(ctx) @@ -57,7 +67,10 @@ func (s *Server) Run(ctx context.Context) error { s.logger.ErrorContext(ctx, "failed to shutdown http server", slog.Any("error", err)) } }() - return s.http.Run() + if err := s.http.Run(); err != nil { + return fmt.Errorf("failed to run http server: %w", err) + } + return nil }) g.Go(func() error { @@ -65,7 +78,10 @@ func (s *Server) Run(ctx context.Context) error { <-ctx.Done() s.grpc.Stop() }() - return s.grpc.Run() + if err := s.grpc.Run(); err != nil { + return fmt.Errorf("failed to run grpc server: %w", err) + } + return nil }) g.Go(func() error { @@ -75,12 +91,31 @@ func (s *Server) Run(ctx context.Context) error { s.logger.ErrorContext(ctx, "failed to shutdown health server", slog.Any("error", err)) } }() - return s.health.Run() + if err := s.health.Run(); err != nil { + return fmt.Errorf("failed to run health server: %w", err) + } + return nil }) g.Go(func() error { - <-ctx.Done() - return s.app.Stop(context.Background()) + // This has context passed down, so we don't really to close/stop it + if err := s.event.Run(ctx); err != nil { + return fmt.Errorf("failed to run integration event listener: %w", err) + } + return nil + }) + g.Go(func() error { + if err := s.workspaceEventHub.Run(ctx); err != nil { + return fmt.Errorf("failed to run workspace event hub: %w", err) + } + return nil + }) + g.Go(func() error { + // This has context passed down, so we don't really to close/stop it + if err := s.outbox.Run(ctx); err != nil { + return fmt.Errorf("failed to run outbox forwarder: %w", err) + } + return nil }) return g.Wait() diff --git a/internal/note/wire.go b/internal/note/wire.go index 31de3589..320e1c94 100644 --- a/internal/note/wire.go +++ b/internal/note/wire.go @@ -3,9 +3,9 @@ package note import ( "github.com/goforj/wire" "github.com/notopia-uit/notopia/internal/note/app" - components "github.com/notopia-uit/notopia/internal/note/component" + "github.com/notopia-uit/notopia/internal/note/component" "github.com/notopia-uit/notopia/internal/note/config" - controller "github.com/notopia-uit/notopia/internal/note/controller" + "github.com/notopia-uit/notopia/internal/note/controller" "github.com/notopia-uit/notopia/internal/note/domain" "github.com/notopia-uit/notopia/internal/note/infra" "github.com/notopia-uit/notopia/pkg/logging" @@ -15,7 +15,7 @@ import ( var ProviderSet = wire.NewSet( ProvideServer, app.ProviderSet, - components.ProviderSet, + component.ProviderSet, config.ProviderSet, controller.ProviderSet, domain.ProviderSet, diff --git a/packages/api-gen/src/index.ts b/packages/api-gen/src/index.ts index c06819a5..501d29d2 100644 --- a/packages/api-gen/src/index.ts +++ b/packages/api-gen/src/index.ts @@ -1,4 +1,4 @@ // This file is auto-generated by @hey-api/openapi-ts export { checkWorkspaceSlugExists, commitDocument, createFolder, createNote, createWorkspace, deleteRevision, deleteWorkspace, getDocumentAttachmentUploadUrl, getNote, getNoteGraph, getNoteLinks, getRevisions, getRevisionWithContent, getWorkspace, getWorkspaceEvents, getWorkspaceGraph, getWorkspaceMembers, getWorkspaceTree, moveWorkspaceItems, type Options, permanentlyDeleteFolder, permanentlyDeleteNote, permanentlyDeleteWorkspaceItems, publishNote, publishWorkspace, renameFolder, renameNote, renameRevision, renameWorkspace, restoreTrashedWorkspaceItems, showTrash, trashWorkspaceItems, unpublishNote, unpublishWorkspace, updateWorkspaceMembers } from './sdk.gen'; -export { type CheckWorkspaceSlugExistsData, type CheckWorkspaceSlugExistsError, type CheckWorkspaceSlugExistsErrors, type CheckWorkspaceSlugExistsResponses, type ClientOptions, type CommitDocumentData, type CommitDocumentError, type CommitDocumentErrors, type CommitDocumentResponse, type CommitDocumentResponses, type CreateFolderData, type CreateFolderError, type CreateFolderErrors, type CreateFolderResponses, type CreateNoteData, type CreateNoteError, type CreateNoteErrors, type CreateNoteResponses, type CreateWorkspaceData, type CreateWorkspaceError, type CreateWorkspaceErrors, type CreateWorkspaceResponses, type DeleteRevisionData, type DeleteRevisionError, type DeleteRevisionErrors, type DeleteRevisionResponse, type DeleteRevisionResponses, type DeleteWorkspaceData, type DeleteWorkspaceError, type DeleteWorkspaceErrors, type DeleteWorkspaceResponse, type DeleteWorkspaceResponses, type DocumentDocumentContent, type DocumentDocumentContentWritable, type DocumentDocumentIdPath, type DocumentDocumentIdQuery, type DocumentError, type DocumentLimitQuery, type DocumentName, type DocumentPageQuery, type DocumentPagination, type DocumentRevision, type DocumentRevisionIdPath, type DocumentRevisionWithContent, type DocumentRevisionWithContentWritable, type DocumentRevisionWritable, type GetDocumentAttachmentUploadUrlData, type GetDocumentAttachmentUploadUrlError, type GetDocumentAttachmentUploadUrlErrors, type GetDocumentAttachmentUploadUrlResponse, type GetDocumentAttachmentUploadUrlResponses, type GetNoteData, type GetNoteError, type GetNoteErrors, type GetNoteGraphData, type GetNoteGraphError, type GetNoteGraphErrors, type GetNoteGraphResponse, type GetNoteGraphResponses, type GetNoteLinksData, type GetNoteLinksError, type GetNoteLinksErrors, type GetNoteLinksResponse, type GetNoteLinksResponses, type GetNoteResponse, type GetNoteResponses, type GetRevisionsData, type GetRevisionsError, type GetRevisionsErrors, type GetRevisionsResponse, type GetRevisionsResponses, type GetRevisionWithContentData, type GetRevisionWithContentError, type GetRevisionWithContentErrors, type GetRevisionWithContentResponse, type GetRevisionWithContentResponses, type GetWorkspaceData, type GetWorkspaceError, type GetWorkspaceErrors, type GetWorkspaceEventsData, type GetWorkspaceEventsError, type GetWorkspaceEventsErrors, type GetWorkspaceEventsResponse, type GetWorkspaceEventsResponses, type GetWorkspaceGraphData, type GetWorkspaceGraphError, type GetWorkspaceGraphErrors, type GetWorkspaceGraphResponse, type GetWorkspaceGraphResponses, type GetWorkspaceMembersData, type GetWorkspaceMembersError, type GetWorkspaceMembersErrors, type GetWorkspaceMembersResponse, type GetWorkspaceMembersResponses, type GetWorkspaceResponse, type GetWorkspaceResponses, type GetWorkspaceTreeData, type GetWorkspaceTreeError, type GetWorkspaceTreeErrors, type GetWorkspaceTreeResponse, type GetWorkspaceTreeResponses, type MoveWorkspaceItemsData, type MoveWorkspaceItemsError, type MoveWorkspaceItemsErrors, type MoveWorkspaceItemsResponse, type MoveWorkspaceItemsResponses, type NoteError, type NoteFolder, type NoteFolderIdPath, type NoteFolderWritable, type NoteGraph, type NoteHeartBeatWorkspaceEvent, type NoteIcon, type NoteId, type NoteName, type NoteNote, type NoteNoteIdPath, type NoteNoteLink, type NoteNoteLinkWritable, type NoteNotePropertiesId, type NoteNoteWritable, type NotePropertiesIcon, type NotePropertiesId, type NotePropertiesName, type NotePropertiesUpdatedAt, type NoteSlug, type NoteTrashed, NoteTrashedBy, type NoteTrashedFolder, type NoteTrashedFolderWritable, type NoteTrashedNote, type NoteTrashedNoteWritable, type NoteUpdatedAt, type NoteUserPropertiesId, type NoteUserPropertiesName, type NoteWorkspace, type NoteWorkspaceDeletedEvent, type NoteWorkspaceDeletedEventWritable, type NoteWorkspaceIdPath, type NoteWorkspaceItemsUpdatedEvent, type NoteWorkspaceItemsUpdatedEventWritable, type NoteWorkspaceMember, type NoteWorkspaceMembersUpdatedEvent, type NoteWorkspaceMembersUpdatedEventWritable, type NoteWorkspaceMemberWritable, type NoteWorkspacePropertiesName, NoteWorkspaceRole, type NoteWorkspaceSlugPath, type NoteWorkspaceTreeFolder, type NoteWorkspaceTreeFolderWritable, type NoteWorkspaceTreeNote, type NoteWorkspaceTreeNoteWritable, type NoteWorkspaceUpdatedEvent, type NoteWorkspaceUpdatedEventWritable, type NoteWorkspaceWritable, type PermanentlyDeleteFolderData, type PermanentlyDeleteFolderError, type PermanentlyDeleteFolderErrors, type PermanentlyDeleteFolderResponse, type PermanentlyDeleteFolderResponses, type PermanentlyDeleteNoteData, type PermanentlyDeleteNoteError, type PermanentlyDeleteNoteErrors, type PermanentlyDeleteNoteResponse, type PermanentlyDeleteNoteResponses, type PermanentlyDeleteWorkspaceItemsData, type PermanentlyDeleteWorkspaceItemsError, type PermanentlyDeleteWorkspaceItemsErrors, type PermanentlyDeleteWorkspaceItemsResponse, type PermanentlyDeleteWorkspaceItemsResponses, type PublishNoteData, type PublishNoteError, type PublishNoteErrors, type PublishNoteResponse, type PublishNoteResponses, type PublishWorkspaceData, type PublishWorkspaceError, type PublishWorkspaceErrors, type PublishWorkspaceResponse, type PublishWorkspaceResponses, type RenameFolderData, type RenameFolderError, type RenameFolderErrors, type RenameFolderResponse, type RenameFolderResponses, type RenameNoteData, type RenameNoteError, type RenameNoteErrors, type RenameNoteResponse, type RenameNoteResponses, type RenameRevisionData, type RenameRevisionError, type RenameRevisionErrors, type RenameRevisionResponse, type RenameRevisionResponses, type RenameWorkspaceData, type RenameWorkspaceError, type RenameWorkspaceErrors, type RenameWorkspaceResponse, type RenameWorkspaceResponses, type RestoreTrashedWorkspaceItemsData, type RestoreTrashedWorkspaceItemsError, type RestoreTrashedWorkspaceItemsErrors, type RestoreTrashedWorkspaceItemsResponse, type RestoreTrashedWorkspaceItemsResponses, type ShareDocument, type ShareDocumentCommittedEvent, type ShareDocumentContent, type ShareDocumentContentWritable, type ShareIcon, type ShareId, type ShareName, type ShareNote, type ShareNoteCreatedEvent, type ShareNoteCreatedEventWritable, type ShareNoteDeletedEvent, type ShareNoteDeletedEventWritable, type ShareNoteSearch, type ShareNoteSearchWritable, type ShareNoteUpdatedEvent, type ShareNoteUpdatedEventWritable, type ShareNoteWritable, type SharePropertiesId, ShareTrashedBy, type ShareUserDeletedEvent, type ShareUserPropertiesId, type ShowTrashData, type ShowTrashError, type ShowTrashErrors, type ShowTrashResponse, type ShowTrashResponses, type TrashWorkspaceItemsData, type TrashWorkspaceItemsError, type TrashWorkspaceItemsErrors, type TrashWorkspaceItemsResponse, type TrashWorkspaceItemsResponses, type UnpublishNoteData, type UnpublishNoteError, type UnpublishNoteErrors, type UnpublishNoteResponse, type UnpublishNoteResponses, type UnpublishWorkspaceData, type UnpublishWorkspaceError, type UnpublishWorkspaceErrors, type UnpublishWorkspaceResponse, type UnpublishWorkspaceResponses, type UpdateWorkspaceMembersData, type UpdateWorkspaceMembersError, type UpdateWorkspaceMembersErrors, type UpdateWorkspaceMembersResponse, type UpdateWorkspaceMembersResponses } from './types.gen'; +export { type CheckWorkspaceSlugExistsData, type CheckWorkspaceSlugExistsError, type CheckWorkspaceSlugExistsErrors, type CheckWorkspaceSlugExistsResponses, type ClientOptions, type CommitDocumentData, type CommitDocumentError, type CommitDocumentErrors, type CommitDocumentResponse, type CommitDocumentResponses, type CreateFolderData, type CreateFolderError, type CreateFolderErrors, type CreateFolderResponses, type CreateNoteData, type CreateNoteError, type CreateNoteErrors, type CreateNoteResponses, type CreateWorkspaceData, type CreateWorkspaceError, type CreateWorkspaceErrors, type CreateWorkspaceResponses, type DeleteRevisionData, type DeleteRevisionError, type DeleteRevisionErrors, type DeleteRevisionResponse, type DeleteRevisionResponses, type DeleteWorkspaceData, type DeleteWorkspaceError, type DeleteWorkspaceErrors, type DeleteWorkspaceResponse, type DeleteWorkspaceResponses, type DocumentDocumentContent, type DocumentDocumentContentWritable, type DocumentDocumentIdPath, type DocumentDocumentIdQuery, type DocumentError, type DocumentLimitQuery, type DocumentName, type DocumentPageQuery, type DocumentPagination, type DocumentRevision, type DocumentRevisionIdPath, type DocumentRevisionWithContent, type DocumentRevisionWithContentWritable, type DocumentRevisionWritable, type GetDocumentAttachmentUploadUrlData, type GetDocumentAttachmentUploadUrlError, type GetDocumentAttachmentUploadUrlErrors, type GetDocumentAttachmentUploadUrlResponse, type GetDocumentAttachmentUploadUrlResponses, type GetNoteData, type GetNoteError, type GetNoteErrors, type GetNoteGraphData, type GetNoteGraphError, type GetNoteGraphErrors, type GetNoteGraphResponse, type GetNoteGraphResponses, type GetNoteLinksData, type GetNoteLinksError, type GetNoteLinksErrors, type GetNoteLinksResponse, type GetNoteLinksResponses, type GetNoteResponse, type GetNoteResponses, type GetRevisionsData, type GetRevisionsError, type GetRevisionsErrors, type GetRevisionsResponse, type GetRevisionsResponses, type GetRevisionWithContentData, type GetRevisionWithContentError, type GetRevisionWithContentErrors, type GetRevisionWithContentResponse, type GetRevisionWithContentResponses, type GetWorkspaceData, type GetWorkspaceError, type GetWorkspaceErrors, type GetWorkspaceEventsData, type GetWorkspaceEventsError, type GetWorkspaceEventsErrors, type GetWorkspaceEventsResponse, type GetWorkspaceEventsResponses, type GetWorkspaceGraphData, type GetWorkspaceGraphError, type GetWorkspaceGraphErrors, type GetWorkspaceGraphResponse, type GetWorkspaceGraphResponses, type GetWorkspaceMembersData, type GetWorkspaceMembersError, type GetWorkspaceMembersErrors, type GetWorkspaceMembersResponse, type GetWorkspaceMembersResponses, type GetWorkspaceResponse, type GetWorkspaceResponses, type GetWorkspaceTreeData, type GetWorkspaceTreeError, type GetWorkspaceTreeErrors, type GetWorkspaceTreeResponse, type GetWorkspaceTreeResponses, type MoveWorkspaceItemsData, type MoveWorkspaceItemsError, type MoveWorkspaceItemsErrors, type MoveWorkspaceItemsResponse, type MoveWorkspaceItemsResponses, type NoteError, type NoteFolder, type NoteFolderIdPath, type NoteFolderWritable, type NoteGraph, type NoteHeartBeatWorkspaceEvent, type NoteIcon, type NoteId, type NoteName, type NoteNote, type NoteNoteIdPath, type NoteNoteLink, type NoteNoteLinkWritable, type NoteNotePropertiesId, type NoteNoteWritable, type NotePropertiesIcon, type NotePropertiesId, type NotePropertiesName, type NotePropertiesUpdatedAt, type NoteSlug, type NoteTrashed, NoteTrashedBy, type NoteTrashedFolder, type NoteTrashedFolderWritable, type NoteTrashedNote, type NoteTrashedNoteWritable, type NoteUpdatedAt, type NoteUserPropertiesId, type NoteUserPropertiesName, type NoteWorkspace, type NoteWorkspaceDeletedEvent, type NoteWorkspaceDeletedEventWritable, type NoteWorkspaceIdPath, type NoteWorkspaceItemsUpdatedEvent, type NoteWorkspaceItemsUpdatedEventWritable, type NoteWorkspaceMember, type NoteWorkspaceMembersUpdatedEvent, type NoteWorkspaceMembersUpdatedEventWritable, type NoteWorkspaceMemberWritable, type NoteWorkspacePropertiesName, NoteWorkspaceRole, type NoteWorkspaceSlugPath, type NoteWorkspaceTreeFolder, type NoteWorkspaceTreeFolderWritable, type NoteWorkspaceTreeNote, type NoteWorkspaceTreeNoteWritable, type NoteWorkspaceUpdatedEvent, type NoteWorkspaceUpdatedEventWritable, type NoteWorkspaceWritable, type PermanentlyDeleteFolderData, type PermanentlyDeleteFolderError, type PermanentlyDeleteFolderErrors, type PermanentlyDeleteFolderResponse, type PermanentlyDeleteFolderResponses, type PermanentlyDeleteNoteData, type PermanentlyDeleteNoteError, type PermanentlyDeleteNoteErrors, type PermanentlyDeleteNoteResponse, type PermanentlyDeleteNoteResponses, type PermanentlyDeleteWorkspaceItemsData, type PermanentlyDeleteWorkspaceItemsError, type PermanentlyDeleteWorkspaceItemsErrors, type PermanentlyDeleteWorkspaceItemsResponse, type PermanentlyDeleteWorkspaceItemsResponses, type PublishNoteData, type PublishNoteError, type PublishNoteErrors, type PublishNoteResponse, type PublishNoteResponses, type PublishWorkspaceData, type PublishWorkspaceError, type PublishWorkspaceErrors, type PublishWorkspaceResponse, type PublishWorkspaceResponses, type RenameFolderData, type RenameFolderError, type RenameFolderErrors, type RenameFolderResponse, type RenameFolderResponses, type RenameNoteData, type RenameNoteError, type RenameNoteErrors, type RenameNoteResponse, type RenameNoteResponses, type RenameRevisionData, type RenameRevisionError, type RenameRevisionErrors, type RenameRevisionResponse, type RenameRevisionResponses, type RenameWorkspaceData, type RenameWorkspaceError, type RenameWorkspaceErrors, type RenameWorkspaceResponse, type RenameWorkspaceResponses, type RestoreTrashedWorkspaceItemsData, type RestoreTrashedWorkspaceItemsError, type RestoreTrashedWorkspaceItemsErrors, type RestoreTrashedWorkspaceItemsResponse, type RestoreTrashedWorkspaceItemsResponses, type ShareDocument, type ShareDocumentCommittedEvent, type ShareDocumentContent, type ShareDocumentContentWritable, type ShareId, type ShareNoteCreatedEvent, type ShareNoteDeletedEvent, type ShareNoteSearch, type ShareNoteUpdatedEvent, type ShareUserDeletedEvent, type ShowTrashData, type ShowTrashError, type ShowTrashErrors, type ShowTrashResponse, type ShowTrashResponses, type TrashWorkspaceItemsData, type TrashWorkspaceItemsError, type TrashWorkspaceItemsErrors, type TrashWorkspaceItemsResponse, type TrashWorkspaceItemsResponses, type UnpublishNoteData, type UnpublishNoteError, type UnpublishNoteErrors, type UnpublishNoteResponse, type UnpublishNoteResponses, type UnpublishWorkspaceData, type UnpublishWorkspaceError, type UnpublishWorkspaceErrors, type UnpublishWorkspaceResponse, type UnpublishWorkspaceResponses, type UpdateWorkspaceMembersData, type UpdateWorkspaceMembersError, type UpdateWorkspaceMembersErrors, type UpdateWorkspaceMembersResponse, type UpdateWorkspaceMembersResponses } from './types.gen'; diff --git a/packages/api-gen/src/types.gen.ts b/packages/api-gen/src/types.gen.ts index 0cdc35de..52996c0a 100644 --- a/packages/api-gen/src/types.gen.ts +++ b/packages/api-gen/src/types.gen.ts @@ -5,23 +5,24 @@ export type ClientOptions = { }; export type ShareDocumentCommittedEvent = ShareDocument & { + userId: string; tags: Array; outgoingLinkIds: Array; }; export type ShareNoteCreatedEvent = { - id: ShareId; - name: ShareName; - icon?: ShareIcon; + id: string; + name: string; + icon?: string | null; }; export type ShareNoteDeletedEvent = { - id: ShareId; + id: string; }; export type ShareNoteSearch = { - id: ShareId; - name: ShareName; + id: string; + name: string; /** * Plain text content */ @@ -29,10 +30,17 @@ export type ShareNoteSearch = { tags?: Array; }; -export type ShareNoteUpdatedEvent = ShareNote; +export type ShareNoteUpdatedEvent = { + id: string; + name: string; + icon: string | null; + folderId: string; + tags: Array; + updatedAt: Date; +}; export type ShareUserDeletedEvent = { - id: ShareUserPropertiesId; + id: ShareId; }; /** @@ -45,41 +53,10 @@ export type ShareDocument = { content: ShareDocumentContent; }; -export type ShareId = string; - -/** - * Can be empty string when creating but will be set to "Untitled Note" internally - */ -export type ShareName = string; - -export type ShareIcon = string | null; - -export type SharePropertiesId = string; - -export const ShareTrashedBy = { PURPOSE: 'purpose', PARENT: 'parent' } as const; - -export type ShareTrashedBy = typeof ShareTrashedBy[keyof typeof ShareTrashedBy]; - -export type ShareNote = { - readonly id: string; - /** - * Can be empty string when creating but will be set to "Untitled Note" internally - */ - name: string; - icon: string | null; - folderId: SharePropertiesId; - readonly tags: Array; - readonly updatedAt: Date; - readonly trashed: { - trashedBy: ShareTrashedBy; - trashedAt: Date; - } | null; -}; - /** * User ID from Authentik (need to change subject mode to User's ID instead of hashed) */ -export type ShareUserPropertiesId = string; +export type ShareId = string; export type DocumentError = { /** @@ -346,39 +323,11 @@ export type NoteWorkspaceTreeFolder = { export type NotePropertiesUpdatedAt = Date; -export type ShareNoteCreatedEventWritable = { - name: ShareName; - icon?: ShareIcon; -}; - -export type ShareNoteDeletedEventWritable = { - [key: string]: unknown; -}; - -export type ShareNoteSearchWritable = { - name: ShareName; - /** - * Plain text content - */ - plainTextContent?: string; - tags?: Array; -}; - -export type ShareNoteUpdatedEventWritable = ShareNoteWritable; - /** * BlockNote model */ export type ShareDocumentContentWritable = Array; -export type ShareNoteWritable = { - /** - * Can be empty string when creating but will be set to "Untitled Note" internally - */ - name: string; - icon: string | null; -}; - export type DocumentRevisionWritable = { name: string | null; }; diff --git a/packages/api-gen/src/zod.gen.ts b/packages/api-gen/src/zod.gen.ts index 0f6a2d4d..46a6ef71 100644 --- a/packages/api-gen/src/zod.gen.ts +++ b/packages/api-gen/src/zod.gen.ts @@ -2,73 +2,55 @@ import * as z from 'zod'; -/** - * BlockNote model - */ -export const zShareDocumentContent = z.array(z.unknown()); - -export const zShareDocument = z.object({ +export const zShareNoteCreatedEvent = z.object({ id: z.uuid(), - content: zShareDocumentContent + name: z.string(), + icon: z.string().nullish() }); -export const zShareDocumentCommittedEvent = zShareDocument.and(z.object({ - tags: z.array(z.string()), - outgoingLinkIds: z.array(z.uuid()) -})); - -export const zShareId = z.uuid().readonly(); - export const zShareNoteDeletedEvent = z.object({ - id: zShareId + id: z.uuid() }); -/** - * Can be empty string when creating but will be set to "Untitled Note" internally - */ -export const zShareName = z.string().min(1).max(255); - export const zShareNoteSearch = z.object({ - id: zShareId, - name: zShareName, + id: z.uuid(), + name: z.string(), plainTextContent: z.string().optional(), tags: z.array(z.string()).optional() }); -export const zShareIcon = z.string().nullable(); - -export const zShareNoteCreatedEvent = z.object({ - id: zShareId, - name: zShareName, - icon: zShareIcon.optional() +export const zShareNoteUpdatedEvent = z.object({ + id: z.uuid(), + name: z.string(), + icon: z.string().nullable(), + folderId: z.uuid(), + tags: z.array(z.string()), + updatedAt: z.iso.datetime() }); -export const zSharePropertiesId = z.uuid().readonly(); - -export const zShareTrashedBy = z.enum(['purpose', 'parent']); +/** + * BlockNote model + */ +export const zShareDocumentContent = z.array(z.unknown()); -export const zShareNote = z.object({ - id: z.uuid().readonly(), - name: z.string().min(1).max(255), - icon: z.string().nullable(), - folderId: zSharePropertiesId, - tags: z.array(z.string()).readonly(), - updatedAt: z.iso.datetime().readonly(), - trashed: z.object({ - trashedBy: zShareTrashedBy, - trashedAt: z.iso.datetime() - }).readonly().nullable() +export const zShareDocument = z.object({ + id: z.uuid(), + content: zShareDocumentContent }); -export const zShareNoteUpdatedEvent = zShareNote; +export const zShareDocumentCommittedEvent = zShareDocument.and(z.object({ + userId: z.string(), + tags: z.array(z.string()), + outgoingLinkIds: z.array(z.uuid()) +})); /** * User ID from Authentik (need to change subject mode to User's ID instead of hashed) */ -export const zShareUserPropertiesId = z.string(); +export const zShareId = z.string(); export const zShareUserDeletedEvent = z.object({ - id: zShareUserPropertiesId + id: zShareId }); export const zDocumentError = z.object({ @@ -278,31 +260,11 @@ export const zNoteWorkspaceTreeFolder = z.object({ updatedAt: zNotePropertiesUpdatedAt }); -export const zShareNoteCreatedEventWritable = z.object({ - name: zShareName, - icon: zShareIcon.optional() -}); - -export const zShareNoteDeletedEventWritable = z.record(z.string(), z.unknown()); - -export const zShareNoteSearchWritable = z.object({ - name: zShareName, - plainTextContent: z.string().optional(), - tags: z.array(z.string()).optional() -}); - /** * BlockNote model */ export const zShareDocumentContentWritable = z.array(z.unknown()); -export const zShareNoteWritable = z.object({ - name: z.string().min(1).max(255), - icon: z.string().nullable() -}); - -export const zShareNoteUpdatedEventWritable = zShareNoteWritable; - export const zDocumentRevisionWritable = z.object({ name: z.string().min(1).max(255).nullable() }); diff --git a/pkg/api/share/share.gen.go b/pkg/api/share/share.gen.go index 4558e1cb..bd3613c5 100644 --- a/pkg/api/share/share.gen.go +++ b/pkg/api/share/share.gen.go @@ -9,24 +9,6 @@ import ( openapi_types "github.com/oapi-codegen/runtime/types" ) -// Defines values for TrashedBy. -const ( - Parent TrashedBy = "parent" - Purpose TrashedBy = "purpose" -) - -// Valid indicates whether the value is a known member of the TrashedBy enum. -func (e TrashedBy) Valid() bool { - switch e { - case Parent: - return true - case Purpose: - return true - default: - return false - } -} - // Document defines model for Document. type Document struct { // Content BlockNote model @@ -41,50 +23,28 @@ type DocumentCommittedEvent struct { Id openapi_types.UUID `json:"id"` OutgoingLinkIds []openapi_types.UUID `json:"outgoingLinkIds"` Tags []string `json:"tags"` + UserId string `json:"userId"` } // DocumentContent BlockNote model type DocumentContent = []map[string]interface{} -// Note defines model for Note. -type Note struct { - FolderId *PropertiesId `json:"folderId,omitempty"` - Icon *string `json:"icon"` - Id *openapi_types.UUID `json:"id,omitempty"` - - // Name Can be empty string when creating but will be set to "Untitled Note" internally - Name string `json:"name"` - Tags *[]string `json:"tags,omitempty"` - Trashed *NoteTrashed `json:"trashed,omitempty"` - UpdatedAt *time.Time `json:"updatedAt,omitempty"` -} - -// NoteTrashed defines model for . -type NoteTrashed struct { - TrashedAt time.Time `json:"trashedAt"` - TrashedBy TrashedBy `json:"trashedBy"` -} - // NoteCreatedEvent defines model for NoteCreatedEvent. type NoteCreatedEvent struct { - Icon *Icon `json:"icon,omitempty"` - Id *Id `json:"id,omitempty"` - - // Name Can be empty string when creating but will be set to "Untitled Note" internally - Name Name `json:"name"` + Icon *string `json:"icon,omitempty"` + Id openapi_types.UUID `json:"id"` + Name string `json:"name"` } // NoteDeletedEvent defines model for NoteDeletedEvent. type NoteDeletedEvent struct { - Id *Id `json:"id,omitempty"` + Id openapi_types.UUID `json:"id"` } // NoteSearch defines model for NoteSearch. type NoteSearch struct { - Id *Id `json:"id,omitempty"` - - // Name Can be empty string when creating but will be set to "Untitled Note" internally - Name Name `json:"name"` + Id openapi_types.UUID `json:"id"` + Name string `json:"name"` // PlainTextContent Plain text content PlainTextContent *string `json:"plainTextContent,omitempty"` @@ -92,28 +52,20 @@ type NoteSearch struct { } // NoteUpdatedEvent defines model for NoteUpdatedEvent. -type NoteUpdatedEvent = Note - -// TrashedBy defines model for TrashedBy. -type TrashedBy string +type NoteUpdatedEvent struct { + FolderId openapi_types.UUID `json:"folderId"` + Icon *string `json:"icon"` + Id openapi_types.UUID `json:"id"` + Name string `json:"name"` + Tags []string `json:"tags"` + UpdatedAt time.Time `json:"updatedAt"` +} // UserDeletedEvent defines model for UserDeletedEvent. type UserDeletedEvent struct { // Id User ID from Authentik (need to change subject mode to User's ID instead of hashed) - Id UserPropertiesId `json:"id"` + Id Id `json:"id"` } -// UserPropertiesId User ID from Authentik (need to change subject mode to User's ID instead of hashed) -type UserPropertiesId = string - -// Icon defines model for icon. -type Icon = string - -// Id defines model for id. -type Id = openapi_types.UUID - -// Name Can be empty string when creating but will be set to "Untitled Note" internally -type Name = string - -// PropertiesId defines model for properties-id. -type PropertiesId = openapi_types.UUID +// Id User ID from Authentik (need to change subject mode to User's ID instead of hashed) +type Id = string