diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
index 0eb7767..dc99a30 100644
--- a/.github/workflows/build-and-test.yml
+++ b/.github/workflows/build-and-test.yml
@@ -22,6 +22,16 @@ jobs:
- name: Build
run: dotnet build --configuration Release --no-restore
+
+ - uses: supabase/setup-cli@v1
+ with:
+ version: latest
+
+ - name: Start Supabsae
+ run: supabase start
+
+ - name: Test
+ run: dotnet test --no-restore
#- name: Add hosts entries
# run: |
diff --git a/.gitignore b/.gitignore
index fcac666..b2fb0f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -406,3 +406,8 @@ ASALocalRun/
# Local History for Visual Studio
.localhistory/
/RealtimeTests/.runsettings
+
+# supabase stuffs
+supabase/.branches
+supabase/.temp
+supabase/.env
\ No newline at end of file
diff --git a/Realtime/Binding.cs b/Realtime/Binding.cs
new file mode 100644
index 0000000..0aec757
--- /dev/null
+++ b/Realtime/Binding.cs
@@ -0,0 +1,15 @@
+using Supabase.Realtime.Interfaces;
+using Supabase.Realtime.PostgresChanges;
+
+namespace Supabase.Realtime;
+
+public class Binding
+{
+ public PostgresChangesOptions? Options { get; set; }
+
+ public IRealtimeChannel.PostgresChangesHandler? Handler { get; set; }
+
+ public PostgresChangesOptions.ListenType? ListenType { get; set; }
+
+ public int? Id { get; set; }
+}
\ No newline at end of file
diff --git a/Realtime/Channel/Push.cs b/Realtime/Channel/Push.cs
index 02148b0..38c3999 100644
--- a/Realtime/Channel/Push.cs
+++ b/Realtime/Channel/Push.cs
@@ -145,8 +145,13 @@ internal void StartTimeout()
///
private void HandleSocketMessageReceived(IRealtimeSocket sender, SocketResponse message)
{
- if (message.Ref != Ref) return;
-
+ // Needs to verify if realtime server won't send the message below anymore after receive a track presence event
+ // {"ref":"bd07efe5-ca06-4257-b080-79779c6f76c4","event":"phx_reply","payload":{"status":"ok","response":{}},"topic":"realtime:online-users"}
+ // the message was used to stop timeout handler
+ // All tests still work on version before 2.34.21
+ var isPresenceDiff = message is { Event: Constants.EventType.PresenceDiff };
+ if (!isPresenceDiff && message.Ref != Ref) return;
+
CancelTimeout();
Response = message;
NotifyMessageReceived(message);
diff --git a/Realtime/PostgresChanges/PostgresChangesOptions.cs b/Realtime/PostgresChanges/PostgresChangesOptions.cs
index 973fa33..6f4a2e9 100644
--- a/Realtime/PostgresChanges/PostgresChangesOptions.cs
+++ b/Realtime/PostgresChanges/PostgresChangesOptions.cs
@@ -1,4 +1,5 @@
-using Newtonsoft.Json;
+using System;
+using Newtonsoft.Json;
using Supabase.Core.Attributes;
using System.Collections.Generic;
@@ -78,7 +79,7 @@ public enum ListenType
///
[JsonProperty("event")]
public string Event => Core.Helpers.GetMappedToAttr(_listenType).Mapping!;
-
+
private readonly ListenType _listenType;
///
@@ -97,4 +98,37 @@ public PostgresChangesOptions(string schema, string? table = null, ListenType ev
Filter = filter;
Parameters = parameters;
}
+
+ private bool Equals(PostgresChangesOptions other)
+ {
+ return _listenType == other._listenType && Schema == other.Schema && Table == other.Table && Filter == other.Filter;
+ }
+
+ ///
+ /// Check if object are equals
+ ///
+ ///
+ ///
+ public override bool Equals(object? obj)
+ {
+ if (obj is null) return false;
+ if (obj.GetType() != GetType()) return false;
+ return Equals((PostgresChangesOptions)obj);
+ }
+
+ ///
+ /// Generate hash code
+ ///
+ ///
+ public override int GetHashCode()
+ {
+ unchecked
+ {
+ var hashCode = (int)_listenType;
+ hashCode = (hashCode * 397) ^ Schema.GetHashCode();
+ hashCode = (hashCode * 397) ^ (Table != null ? Table.GetHashCode() : 0);
+ hashCode = (hashCode * 397) ^ (Filter != null ? Filter.GetHashCode() : 0);
+ return hashCode;
+ }
+ }
}
\ No newline at end of file
diff --git a/Realtime/PostgresChanges/PostgresChangesResponse.cs b/Realtime/PostgresChanges/PostgresChangesResponse.cs
index 35434db..2cf44f1 100644
--- a/Realtime/PostgresChanges/PostgresChangesResponse.cs
+++ b/Realtime/PostgresChanges/PostgresChangesResponse.cs
@@ -1,4 +1,5 @@
-using Newtonsoft.Json;
+using System.Collections.Generic;
+using Newtonsoft.Json;
using Supabase.Postgrest.Models;
using Supabase.Realtime.Socket;
@@ -73,4 +74,7 @@ public class PostgresChangesPayload where T : class
///
[JsonProperty("data")]
public SocketResponsePayload? Data { get; set; }
+
+ [JsonProperty("ids")]
+ public List Ids { get; set; }
}
\ No newline at end of file
diff --git a/Realtime/RealtimeChannel.cs b/Realtime/RealtimeChannel.cs
index ed429a3..c38ea8f 100644
--- a/Realtime/RealtimeChannel.cs
+++ b/Realtime/RealtimeChannel.cs
@@ -1,5 +1,6 @@
using System;
using System.Collections.Generic;
+using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading.Tasks;
using System.Timers;
@@ -154,14 +155,13 @@ public class RealtimeChannel : IRealtimeChannel
private readonly List _messageReceivedHandlers = new();
private readonly List _errorEventHandlers = new();
- private readonly Dictionary> _postgresChangesHandlers =
- new();
-
private bool CanPush => IsJoined && Socket.IsConnected;
private bool _hasJoinedOnce;
private readonly Timer _rejoinTimer;
private bool _isRejoining;
+ private List _bindings = [];
+
///
/// Initializes a Channel - must call `Subscribe()` to receive events.
///
@@ -330,11 +330,7 @@ private void NotifyMessageReceived(SocketResponse message)
///
public void AddPostgresChangeHandler(ListenType listenType, PostgresChangesHandler postgresChangeHandler)
{
- if (!_postgresChangesHandlers.ContainsKey(listenType))
- _postgresChangesHandlers[listenType] = new List();
-
- if (!_postgresChangesHandlers[listenType].Contains(postgresChangeHandler))
- _postgresChangesHandlers[listenType].Add(postgresChangeHandler);
+ BindPostgresChangesHandler(listenType, postgresChangeHandler);
}
///
@@ -344,16 +340,16 @@ public void AddPostgresChangeHandler(ListenType listenType, PostgresChangesHandl
///
public void RemovePostgresChangeHandler(ListenType listenType, PostgresChangesHandler postgresChangeHandler)
{
- if (_postgresChangesHandlers.ContainsKey(listenType) &&
- _postgresChangesHandlers[listenType].Contains(postgresChangeHandler))
- _postgresChangesHandlers[listenType].Remove(postgresChangeHandler);
+ RemovePostgresChangesFromBinding(listenType, postgresChangeHandler);
}
///
/// Clears all postgres changes listeners.
///
- public void ClearPostgresChangeHandlers() =>
- _postgresChangesHandlers.Clear();
+ public void ClearPostgresChangeHandlers()
+ {
+ _bindings.Clear();
+ }
///
/// Adds an error event handler.
@@ -407,15 +403,7 @@ private void NotifyPostgresChanges(EventType eventType, PostgresChangesResponse
_ => ListenType.All
};
- // Invoke the wildcard listener (but only once)
- if (listenType != ListenType.All &&
- _postgresChangesHandlers.TryGetValue(ListenType.All, out var changesHandler))
- foreach (var handler in changesHandler.ToArray())
- handler.Invoke(this, response);
-
- if (_postgresChangesHandlers.TryGetValue(listenType, out var postgresChangesHandler))
- foreach (var handler in postgresChangesHandler.ToArray())
- handler.Invoke(this, response);
+ InvokeProperlyHandlerFromBind(listenType, response);
}
///
@@ -428,6 +416,8 @@ private void NotifyPostgresChanges(EventType eventType, PostgresChangesResponse
public IRealtimeChannel Register(PostgresChangesOptions postgresChangesOptions)
{
PostgresChangesOptions.Add(postgresChangesOptions);
+
+ BindPostgresChangesOptions(postgresChangesOptions);
return this;
}
@@ -673,6 +663,8 @@ private void HandleJoinResponse(IRealtimePush s
Options.SerializerSettings);
if (obj?.Payload == null) return;
+ obj.Payload.Response?.change?.ForEach(BindIdPostgresChanges);
+
switch (obj.Payload.Status)
{
// A response was received from the channel
@@ -764,4 +756,113 @@ internal void HandleSocketMessage(SocketResponse message)
break;
}
}
+
+ ///
+ /// Create a Binding and add to a list
+ ///
+ ///
+ private void BindPostgresChangesOptions(PostgresChangesOptions options)
+ {
+ var founded = _bindings.FirstOrDefault(b => options.Equals(b.Options));
+ if (founded != null) return;
+
+ _bindings.Add(
+ new Binding
+ {
+ Options = options,
+ }
+ );
+ }
+
+ ///
+ /// Try to bind a PostgresChangesHandler to a PostgresChangesOptions
+ ///
+ ///
+ ///
+ private void BindPostgresChangesHandler(ListenType listenType, PostgresChangesHandler handler)
+ {
+ var founded = _bindings.FirstOrDefault(b =>
+ b.Options?.Event == Core.Helpers.GetMappedToAttr(listenType).Mapping &&
+ b.Handler == null
+ );
+ if (founded != null)
+ {
+ founded.Handler = handler;
+ founded.ListenType = listenType;
+ return;
+ }
+
+ BindPostgresChangesHandlerGeneric(listenType, handler);
+
+ }
+
+ private void BindPostgresChangesHandlerGeneric(ListenType listenType, PostgresChangesHandler handler)
+ {
+ var founded = _bindings.FirstOrDefault(b =>
+ (b.Options?.Event == Core.Helpers.GetMappedToAttr(listenType).Mapping || b.Options?.Event == "*") &&
+ b.Handler == null
+ );
+ if (founded == null) return;
+
+ founded.Handler = handler;
+ founded.ListenType = listenType;
+ }
+
+ ///
+ /// Filter the binding list and try to add an id from socket to its binding
+ ///
+ ///
+ private void BindIdPostgresChanges(PhoenixPostgresChangeResponse joinResponse)
+ {
+ var founded = _bindings.FirstOrDefault(b => b.Options != null &&
+ b.Options.Event == joinResponse.eventName &&
+ b.Options.Table == joinResponse.table &&
+ b.Options.Schema == joinResponse.schema &&
+ b.Options.Filter == joinResponse.filter);
+ if (founded == null) return;
+ founded.Id = joinResponse?.id;
+ }
+
+ ///
+ /// Try to invoke the handler properly based on event type and socket response
+ ///
+ ///
+ ///
+ private void InvokeProperlyHandlerFromBind(ListenType eventType, PostgresChangesResponse response)
+ {
+ var all = _bindings.FirstOrDefault(b =>
+ {
+ if (b.Options == null && response.Payload == null && b.Handler == null) return false;
+
+ return response.Payload != null && response.Payload.Ids.Contains(b.Id) && eventType != ListenType.All &&
+ b.ListenType == ListenType.All;
+ });
+
+ if (all != null)
+ {
+ all.Handler?.Invoke(this, response);
+ return;
+ }
+
+ // Invoke all specific handler if possible
+ _bindings.ForEach(binding =>
+ {
+ if (binding.ListenType != eventType) return;
+ if (binding.Options == null || response.Payload == null || binding.Handler == null) return;
+
+ if (response.Payload.Ids.Contains(binding.Id)) binding.Handler.Invoke(this, response);
+ });
+ }
+
+ ///
+ /// Remove handler from binding
+ ///
+ ///
+ ///
+ private void RemovePostgresChangesFromBinding(ListenType eventType, PostgresChangesHandler handler)
+ {
+ var binding = _bindings.FirstOrDefault(b => b.Handler == handler && b.ListenType == eventType);
+ if (binding == null) return;
+ _bindings.Remove(binding);
+ }
}
\ No newline at end of file
diff --git a/Realtime/RealtimeSocket.cs b/Realtime/RealtimeSocket.cs
index 08aba29..9589e4f 100644
--- a/Realtime/RealtimeSocket.cs
+++ b/Realtime/RealtimeSocket.cs
@@ -2,6 +2,7 @@
using System.Collections.Generic;
using System.Linq;
using System.Net.WebSockets;
+using System.Runtime.InteropServices;
using System.Threading;
using System.Threading.Tasks;
using Newtonsoft.Json;
@@ -88,6 +89,7 @@ public RealtimeSocket(string endpoint, ClientOptions options)
_connection = new WebsocketClient(new Uri(EndpointUrl), () =>
{
var socket = new ClientWebSocket();
+ if (RuntimeInformation.IsOSPlatform(OSPlatform.Create("BROWSER"))) return socket;
foreach (var header in Headers)
socket.Options.SetRequestHeader(header.Key, header.Value);
diff --git a/Realtime/Socket/Responses/PhoenixPostgresChangeResponse.cs b/Realtime/Socket/Responses/PhoenixPostgresChangeResponse.cs
new file mode 100644
index 0000000..435b958
--- /dev/null
+++ b/Realtime/Socket/Responses/PhoenixPostgresChangeResponse.cs
@@ -0,0 +1,21 @@
+using Newtonsoft.Json;
+
+namespace Supabase.Realtime.Socket.Responses;
+
+public class PhoenixPostgresChangeResponse
+{
+ [JsonProperty("id")]
+ public int? id { get; set; }
+
+ [JsonProperty("event")]
+ public string? eventName { get; set; }
+
+ [JsonProperty("filter")]
+ public string? filter { get; set; }
+
+ [JsonProperty("schema")]
+ public string? schema { get; set; }
+
+ [JsonProperty("table")]
+ public string? table { get; set; }
+}
\ No newline at end of file
diff --git a/Realtime/Socket/Responses/PhoenixResponse.cs b/Realtime/Socket/Responses/PhoenixResponse.cs
index 8acf2d0..096997a 100644
--- a/Realtime/Socket/Responses/PhoenixResponse.cs
+++ b/Realtime/Socket/Responses/PhoenixResponse.cs
@@ -11,7 +11,7 @@ public class PhoenixResponse
/// The response.
///
[JsonProperty("response")]
- public object? Response;
+ public PostgresChangeResponse? Response;
///
/// The status.
diff --git a/Realtime/Socket/Responses/PostgresChangeResponse.cs b/Realtime/Socket/Responses/PostgresChangeResponse.cs
new file mode 100644
index 0000000..c905311
--- /dev/null
+++ b/Realtime/Socket/Responses/PostgresChangeResponse.cs
@@ -0,0 +1,10 @@
+using System.Collections.Generic;
+using Newtonsoft.Json;
+
+namespace Supabase.Realtime.Socket.Responses;
+
+public class PostgresChangeResponse
+{
+ [JsonProperty("postgres_changes")]
+ public List change { get; set; }
+}
\ No newline at end of file
diff --git a/RealtimeTests/ChannelPostgresChangesTests.cs b/RealtimeTests/ChannelPostgresChangesTests.cs
index 6ea3a77..f767665 100644
--- a/RealtimeTests/ChannelPostgresChangesTests.cs
+++ b/RealtimeTests/ChannelPostgresChangesTests.cs
@@ -1,5 +1,6 @@
using System;
using System.Collections.Generic;
+using System.Diagnostics;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.VisualStudio.TestTools.UnitTesting;
@@ -71,6 +72,89 @@ public async Task ChannelReceivesInsertCallback()
Assert.IsTrue(check);
}
+ [TestMethod("Channel: Receives Filtered Insert Callback")]
+ public async Task ChannelReceivesInsertCallbackFiltered()
+ {
+ var tsc = new TaskCompletionSource();
+
+ var channel = _socketClient!.Channel("realtime", "public", "todos", "details",
+ "Client receives filtered insert callback? ✅");
+
+ channel.AddPostgresChangeHandler(ListenType.Inserts, (_, changes) =>
+ {
+ var oldModel = changes.Model();
+
+ Assert.AreEqual("Client receives filtered insert callback? ✅", oldModel?.Details);
+
+ tsc.SetResult(true);
+ });
+
+ await channel.Subscribe();
+ await _restClient!.Table()
+ .Insert(new Todo { UserId = 1, Details = "Client receives insert callback? ✅" });
+
+ await _restClient!.Table()
+ .Insert(new Todo { UserId = 2, Details = "Client receives filtered insert callback? ✅" });
+
+ var check = await tsc.Task;
+ Assert.IsTrue(check);
+ }
+
+ [TestMethod("Channel: Receives Filtered Two Callback")]
+ public async Task ChannelReceivesTwoCallbacks()
+ {
+ var tsc = new TaskCompletionSource();
+
+ var response = await _restClient!.Table()
+ .Insert(new Todo { UserId = 1, Details = "Client receives insert callback? ✅" });
+ await _restClient!.Table()
+ .Insert(new Todo { UserId = 2, Details = "Client receives filtered insert callback? ✅" });
+
+ var model = response.Models.First();
+ var oldDetails = model.Details;
+ var newDetails = $"I'm an updated item ✏️ - {DateTime.Now}";
+
+ var channel = _socketClient!.Channel("realtime", "public", "todos");
+ channel.AddPostgresChangeHandler(ListenType.Updates, (_, changes) =>
+ {
+ var oldModel = changes.OldModel();
+
+ Assert.AreEqual(oldDetails, oldModel?.Details);
+
+ var updated = changes.Model();
+ Assert.AreEqual(newDetails, updated?.Details);
+
+ if (updated != null)
+ {
+ Assert.AreEqual(model.Id, updated.Id);
+ Assert.AreEqual(model.UserId, updated.UserId);
+ }
+
+ tsc.SetResult(true);
+ });
+
+ const string filter = "Client receives filtered insert callback? ✅";
+ channel.Register(new PostgresChangesOptions("public", "todos", ListenType.Inserts, $"details=eq.{filter}"));
+ channel.AddPostgresChangeHandler(ListenType.Inserts, (_, changes) =>
+ {
+ var insertedModel = changes.Model();
+
+ Assert.AreEqual("Client receives filtered insert callback? ✅", insertedModel?.Details);
+
+ tsc.SetResult(true);
+ });
+
+ await channel.Subscribe();
+
+ await _restClient.Table()
+ .Set(x => x.Details!, newDetails)
+ .Match(model)
+ .Update();
+
+ var check = await tsc.Task;
+ Assert.IsTrue(check);
+ }
+
[TestMethod("Channel: Receives Update Callback")]
public async Task ChannelReceivesUpdateCallback()
{
@@ -134,6 +218,38 @@ public async Task ChannelReceivesDeleteCallback()
Assert.IsTrue(check);
}
+ [TestMethod("Channel: Receives Delete Callback")]
+ public async Task ChannelReceivesFilteredDeleteCallback()
+ {
+ var tsc = new TaskCompletionSource();
+ var channel = _socketClient!.Channel("realtime", "public", "todos");
+
+ var todo1 = await _restClient!.Table().Insert(new Todo
+ { UserId = 1, Details = "Client receives callbacks 1? ✅" });
+ var todo2 = await _restClient!.Table().Insert(new Todo
+ { UserId = 2, Details = "Client receives callbacks 2? ✅" });
+ await _restClient!.Table().Insert(new Todo
+ { UserId = 3, Details = "Client receives callbacks 3? ✅" });
+
+ channel.Register(new PostgresChangesOptions("public", "todos", ListenType.Deletes, $"details=eq.{todo1.Model?.Details}"));
+ channel.AddPostgresChangeHandler(ListenType.Deletes, (_, removed) =>
+ {
+ var result = removed.OldModel();
+ Assert.AreEqual(result?.Details, todo1.Model?.Details);
+ Assert.AreNotEqual(result?.Details, todo2.Model?.Details);
+
+ tsc.SetResult(true);
+ });
+
+ await channel.Subscribe();
+
+ await _restClient.Table().Match(todo1.Models.First()).Delete();
+ await _restClient.Table().Match(todo2.Models.First()).Delete();
+
+ var check = await tsc.Task;
+ Assert.IsTrue(check);
+ }
+
[TestMethod("Channel: Receives '*' Callback")]
public async Task ChannelReceivesWildcardCallback()
{
@@ -176,4 +292,55 @@ public async Task ChannelReceivesWildcardCallback()
Assert.IsTrue(updateTsc.Task.Result);
Assert.IsTrue(deleteTsc.Task.Result);
}
+
+ [TestMethod("Channel: Receives Several Same Callback")]
+ public async Task ChannelReceivesSeveralSameCallback()
+ {
+ var insertTask1 = new TaskCompletionSource();
+ var insertTask2 = new TaskCompletionSource();
+ var insertTask3 = new TaskCompletionSource();
+ const string filter1 = "Client receives callbacks 1? ✅";
+ const string filter2 = "Client receives callbacks 2? ✅";
+
+ var channel = _socketClient!.Channel("realtime", "public", "todos");
+
+ var count = 0;
+ channel.Register(new PostgresChangesOptions("public", "todos", ListenType.Inserts));
+ channel.AddPostgresChangeHandler(ListenType.Inserts, (_, added) =>
+ {
+ count++;
+ if (count == 3) insertTask1.TrySetResult(true);
+ });
+
+ channel.Register(new PostgresChangesOptions("public", "todos", ListenType.Inserts, $"details=eq.{filter1}"));
+ channel.AddPostgresChangeHandler(ListenType.Inserts, (_, added) =>
+ {
+ var model = added.Model();
+
+ insertTask2.SetResult(model?.Details == filter1);
+ });
+
+
+ channel.Register(new PostgresChangesOptions("public", "todos", ListenType.Inserts, $"details=eq.{filter2}"));
+ channel.AddPostgresChangeHandler(ListenType.Inserts, (_, added) =>
+ {
+ var model = added.Model();
+
+ insertTask3.SetResult(model?.Details == filter2);
+ });
+
+
+ await channel.Subscribe();
+
+ await _restClient!.Table().Insert(new Todo { UserId = 1, Details = "Client receives wildcard callbacks? ✅" });
+ await _restClient!.Table().Insert(new Todo { UserId = 1, Details = filter1 });
+ await _restClient!.Table().Insert(new Todo { UserId = 1, Details = filter2 });
+
+ await Task.WhenAll(insertTask1.Task, insertTask2.Task, insertTask3.Task);
+
+ Assert.IsTrue(insertTask1.Task.Result);
+ Assert.IsTrue(insertTask2.Task.Result);
+ Assert.IsTrue(insertTask3.Task.Result);
+ }
+
}
\ No newline at end of file
diff --git a/RealtimeTests/Helpers.cs b/RealtimeTests/Helpers.cs
index 276c49e..ffd05f8 100644
--- a/RealtimeTests/Helpers.cs
+++ b/RealtimeTests/Helpers.cs
@@ -7,11 +7,10 @@ namespace RealtimeTests;
internal static class Helpers
{
- private const string ApiKey =
- "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiIiLCJpYXQiOjE2NzEyMzc4NzMsImV4cCI6MjAwMjc3Mzk5MywiYXVkIjoiIiwic3ViIjoiIiwicm9sZSI6ImF1dGhlbnRpY2F0ZWQifQ.qoYdljDZ9rjfs1DKj5_OqMweNtj7yk20LZKlGNLpUO8";
+ private const string ApiKey = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0";
- private const string SocketEndpoint = "ws://realtime-dev.localhost:4000/socket";
- private const string RestEndpoint = "http://localhost:3000";
+ private const string SocketEndpoint = "ws://127.0.0.1:54321/realtime/v1";
+ private const string RestEndpoint = "http://localhost:54321/rest/v1";
public static Supabase.Postgrest.Client RestClient() => new(RestEndpoint, new Supabase.Postgrest.ClientOptions());
diff --git a/supabase/config.toml b/supabase/config.toml
new file mode 100644
index 0000000..7567704
--- /dev/null
+++ b/supabase/config.toml
@@ -0,0 +1,290 @@
+# For detailed configuration reference documentation, visit:
+# https://supabase.com/docs/guides/local-development/cli/config
+# A string used to distinguish different Supabase projects on the same host. Defaults to the
+# working directory name when running `supabase init`.
+project_id = "realtime-csharp"
+
+[api]
+enabled = true
+# Port to use for the API URL.
+port = 54321
+# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
+# endpoints. `public` and `graphql_public` schemas are included by default.
+schemas = ["public", "graphql_public"]
+# Extra schemas to add to the search_path of every request.
+extra_search_path = ["public", "extensions"]
+# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size
+# for accidental or malicious requests.
+max_rows = 1000
+
+[api.tls]
+# Enable HTTPS endpoints locally using a self-signed certificate.
+enabled = false
+
+[db]
+# Port to use for the local database URL.
+port = 54322
+# Port used by db diff command to initialize the shadow database.
+shadow_port = 54320
+# The database major version to use. This has to be the same as your remote database's. Run `SHOW
+# server_version;` on the remote database to check.
+major_version = 15
+
+[db.pooler]
+enabled = false
+# Port to use for the local connection pooler.
+port = 54329
+# Specifies when a server connection can be reused by other clients.
+# Configure one of the supported pooler modes: `transaction`, `session`.
+pool_mode = "transaction"
+# How many server connections to allow per user/database pair.
+default_pool_size = 20
+# Maximum number of client connections allowed.
+max_client_conn = 100
+
+# [db.vault]
+# secret_key = "env(SECRET_VALUE)"
+
+[db.seed]
+# If enabled, seeds the database after migrations during a db reset.
+enabled = true
+# Specifies an ordered list of seed files to load during db reset.
+# Supports glob patterns relative to supabase directory: "./seeds/*.sql"
+sql_paths = ["./seed.sql"]
+
+[realtime]
+enabled = true
+# Bind realtime via either IPv4 or IPv6. (default: IPv4)
+# ip_version = "IPv6"
+# The maximum length in bytes of HTTP request headers. (default: 4096)
+# max_header_length = 4096
+
+[studio]
+enabled = true
+# Port to use for Supabase Studio.
+port = 54323
+# External URL of the API server that frontend connects to.
+api_url = "http://127.0.0.1"
+# OpenAI API Key to use for Supabase AI in the Supabase Studio.
+openai_api_key = "env(OPENAI_API_KEY)"
+
+# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
+# are monitored, and you can view the emails that would have been sent from the web interface.
+[inbucket]
+enabled = true
+# Port to use for the email testing server web interface.
+port = 54324
+# Uncomment to expose additional ports for testing user applications that send emails.
+# smtp_port = 54325
+# pop3_port = 54326
+# admin_email = "admin@email.com"
+# sender_name = "Admin"
+
+[storage]
+enabled = true
+# The maximum file size allowed (e.g. "5MB", "500KB").
+file_size_limit = "50MiB"
+
+# Image transformation API is available to Supabase Pro plan.
+# [storage.image_transformation]
+# enabled = true
+
+# Uncomment to configure local storage buckets
+# [storage.buckets.images]
+# public = false
+# file_size_limit = "50MiB"
+# allowed_mime_types = ["image/png", "image/jpeg"]
+# objects_path = "./images"
+
+[auth]
+enabled = true
+# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
+# in emails.
+site_url = "http://127.0.0.1:3000"
+# A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
+additional_redirect_urls = ["https://127.0.0.1:3000"]
+# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
+jwt_expiry = 3600
+# If disabled, the refresh token will never expire.
+enable_refresh_token_rotation = true
+# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds.
+# Requires enable_refresh_token_rotation = true.
+refresh_token_reuse_interval = 10
+# Allow/disallow new user signups to your project.
+enable_signup = true
+# Allow/disallow anonymous sign-ins to your project.
+enable_anonymous_sign_ins = false
+# Allow/disallow testing manual linking of accounts
+enable_manual_linking = false
+# Passwords shorter than this value will be rejected as weak. Minimum 6, recommended 8 or more.
+minimum_password_length = 6
+# Passwords that do not meet the following requirements will be rejected as weak. Supported values
+# are: `letters_digits`, `lower_upper_letters_digits`, `lower_upper_letters_digits_symbols`
+password_requirements = ""
+
+# Configure one of the supported captcha providers: `hcaptcha`, `turnstile`.
+# [auth.captcha]
+# enabled = true
+# provider = "hcaptcha"
+# secret = ""
+
+[auth.email]
+# Allow/disallow new user signups via email to your project.
+enable_signup = true
+# If enabled, a user will be required to confirm any email change on both the old, and new email
+# addresses. If disabled, only the new email is required to confirm.
+double_confirm_changes = true
+# If enabled, users need to confirm their email address before signing in.
+enable_confirmations = false
+# If enabled, users will need to reauthenticate or have logged in recently to change their password.
+secure_password_change = false
+# Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email.
+max_frequency = "1s"
+# Number of characters used in the email OTP.
+otp_length = 6
+# Number of seconds before the email OTP expires (defaults to 1 hour).
+otp_expiry = 3600
+
+# Use a production-ready SMTP server
+# [auth.email.smtp]
+# enabled = true
+# host = "smtp.sendgrid.net"
+# port = 587
+# user = "apikey"
+# pass = "env(SENDGRID_API_KEY)"
+# admin_email = "admin@email.com"
+# sender_name = "Admin"
+
+# Uncomment to customize email template
+# [auth.email.template.invite]
+# subject = "You have been invited"
+# content_path = "./supabase/templates/invite.html"
+
+[auth.sms]
+# Allow/disallow new user signups via SMS to your project.
+enable_signup = false
+# If enabled, users need to confirm their phone number before signing in.
+enable_confirmations = false
+# Template for sending OTP to users
+template = "Your code is {{ .Code }}"
+# Controls the minimum amount of time that must pass before sending another sms otp.
+max_frequency = "5s"
+
+# Use pre-defined map of phone number to OTP for testing.
+# [auth.sms.test_otp]
+# 4152127777 = "123456"
+
+# Configure logged in session timeouts.
+# [auth.sessions]
+# Force log out after the specified duration.
+# timebox = "24h"
+# Force log out if the user has been inactive longer than the specified duration.
+# inactivity_timeout = "8h"
+
+# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used.
+# [auth.hook.custom_access_token]
+# enabled = true
+# uri = "pg-functions:////"
+
+# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`.
+[auth.sms.twilio]
+enabled = false
+account_sid = ""
+message_service_sid = ""
+# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead:
+auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)"
+
+# Multi-factor-authentication is available to Supabase Pro plan.
+[auth.mfa]
+# Control how many MFA factors can be enrolled at once per user.
+max_enrolled_factors = 10
+
+# Control MFA via App Authenticator (TOTP)
+[auth.mfa.totp]
+enroll_enabled = false
+verify_enabled = false
+
+# Configure MFA via Phone Messaging
+[auth.mfa.phone]
+enroll_enabled = false
+verify_enabled = false
+otp_length = 6
+template = "Your code is {{ .Code }}"
+max_frequency = "5s"
+
+# Configure MFA via WebAuthn
+# [auth.mfa.web_authn]
+# enroll_enabled = true
+# verify_enabled = true
+
+# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`,
+# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`,
+# `twitter`, `slack`, `spotify`, `workos`, `zoom`.
+[auth.external.apple]
+enabled = false
+client_id = ""
+# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead:
+secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)"
+# Overrides the default auth redirectUrl.
+redirect_uri = ""
+# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure,
+# or any other third-party OIDC providers.
+url = ""
+# If enabled, the nonce check will be skipped. Required for local sign in with Google auth.
+skip_nonce_check = false
+
+# Use Firebase Auth as a third-party provider alongside Supabase Auth.
+[auth.third_party.firebase]
+enabled = false
+# project_id = "my-firebase-project"
+
+# Use Auth0 as a third-party provider alongside Supabase Auth.
+[auth.third_party.auth0]
+enabled = false
+# tenant = "my-auth0-tenant"
+# tenant_region = "us"
+
+# Use AWS Cognito (Amplify) as a third-party provider alongside Supabase Auth.
+[auth.third_party.aws_cognito]
+enabled = false
+# user_pool_id = "my-user-pool-id"
+# user_pool_region = "us-east-1"
+
+[edge_runtime]
+enabled = true
+# Configure one of the supported request policies: `oneshot`, `per_worker`.
+# Use `oneshot` for hot reload, or `per_worker` for load testing.
+policy = "oneshot"
+# Port to attach the Chrome inspector for debugging edge functions.
+inspector_port = 8083
+
+# Use these configurations to customize your Edge Function.
+# [functions.MY_FUNCTION_NAME]
+# enabled = true
+# verify_jwt = true
+# import_map = "./functions/MY_FUNCTION_NAME/deno.json"
+# Uncomment to specify a custom file path to the entrypoint.
+# Supported file extensions are: .ts, .js, .mjs, .jsx, .tsx
+# entrypoint = "./functions/MY_FUNCTION_NAME/index.ts"
+# Specifies static files to be bundled with the function. Supports glob patterns.
+# For example, if you want to serve static HTML pages in your function:
+# static_files = [ "./functions/MY_FUNCTION_NAME/*.html" ]
+
+[analytics]
+enabled = true
+port = 54327
+# Configure one of the supported backends: `postgres`, `bigquery`.
+backend = "postgres"
+
+# Experimental features may be deprecated any time
+[experimental]
+# Configures Postgres storage engine to use OrioleDB (S3)
+orioledb_version = ""
+# Configures S3 bucket URL, eg. .s3-.amazonaws.com
+s3_host = "env(S3_HOST)"
+# Configures S3 bucket region, eg. us-east-1
+s3_region = "env(S3_REGION)"
+# Configures AWS_ACCESS_KEY_ID for S3 bucket
+s3_access_key = "env(S3_ACCESS_KEY)"
+# Configures AWS_SECRET_ACCESS_KEY for S3 bucket
+s3_secret_key = "env(S3_SECRET_KEY)"
diff --git a/supabase/migrations/20250224164421_init.sql b/supabase/migrations/20250224164421_init.sql
new file mode 100644
index 0000000..718e09b
--- /dev/null
+++ b/supabase/migrations/20250224164421_init.sql
@@ -0,0 +1,185 @@
+-- Create a second schema
+CREATE SCHEMA personal;
+
+-- USERS
+CREATE TYPE public.user_status AS ENUM ('ONLINE', 'OFFLINE');
+CREATE TABLE public.users
+(
+ username text primary key,
+ inserted_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ updated_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ favorite_numbers int[] DEFAULT null,
+ data jsonb DEFAULT null,
+ age_range int4range DEFAULT null,
+ status user_status DEFAULT 'ONLINE':: public.user_status,
+ catchphrase tsvector DEFAULT null
+);
+ALTER TABLE public.users
+ REPLICA IDENTITY FULL; -- Send "previous data" to supabase
+COMMENT
+ ON COLUMN public.users.data IS 'For unstructured data and prototyping.';
+
+CREATE TYPE public.todo_status AS ENUM ('NOT STARTED', 'STARTED', 'COMPLETED');
+create table public.todos
+(
+ id bigint generated by default as identity not null,
+ name text null,
+ notes text null,
+ done boolean null default false,
+ details text null,
+ inserted_at timestamp without time zone null default now(),
+ numbers int[] null,
+ user_id text null,
+ status public.todo_status not null default 'NOT STARTED'::todo_status,
+ constraint todos_pkey primary key (id)
+) tablespace pg_default;
+
+ALTER publication supabase_realtime add table public.todos;
+alter table public.todos replica identity full;
+
+-- CHANNELS
+CREATE TABLE public.channels
+(
+ id bigint GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
+ inserted_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ updated_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ data jsonb DEFAULT null,
+ slug text
+);
+ALTER TABLE public.users
+ REPLICA IDENTITY FULL; -- Send "previous data" to supabase
+COMMENT
+ ON COLUMN public.channels.data IS 'For unstructured data and prototyping.';
+
+-- MESSAGES
+CREATE TABLE public.messages
+(
+ id bigint GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
+ inserted_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ updated_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ data jsonb DEFAULT null,
+ message text,
+ username text REFERENCES users NOT NULL,
+ channel_id bigint REFERENCES channels NOT NULL
+);
+ALTER TABLE public.messages
+ REPLICA IDENTITY FULL; -- Send "previous data" to supabase
+COMMENT
+ ON COLUMN public.messages.data IS 'For unstructured data and prototyping.';
+
+create table "public"."kitchen_sink"
+(
+ "id" serial primary key,
+ "string_value" varchar(255) null,
+ "bool_value" BOOL DEFAULT false,
+ "unique_value" varchar(255) UNIQUE,
+ "int_value" INT null,
+ "float_value" FLOAT null,
+ "double_value" DOUBLE PRECISION null,
+ "datetime_value" timestamp null,
+ "datetime_value_1" timestamp null,
+ "datetime_pos_infinite_value" timestamp null,
+ "datetime_neg_infinite_value" timestamp null,
+ "list_of_strings" TEXT[] null,
+ "list_of_datetimes" DATE[] null,
+ "list_of_ints" INT[] null,
+ "list_of_floats" FLOAT[] null,
+ "int_range" INT4RANGE null
+);
+
+CREATE TABLE public.movie
+(
+ id serial primary key,
+ created_at timestamp without time zone NOT NULL DEFAULT now(),
+ name character varying(255) NULL
+);
+
+CREATE TABLE public.person
+(
+ id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,
+ created_at timestamp without time zone NOT NULL DEFAULT now(),
+ first_name character varying(255) NULL,
+ last_name character varying(255) NULL
+);
+
+CREATE TABLE public.profile
+(
+ profile_id int PRIMARY KEY references person (id),
+ email character varying(255) null,
+ created_at timestamp without time zone NOT NULL DEFAULT now()
+);
+
+CREATE TABLE public.movie_person
+(
+ id int generated by default as identity,
+ movie_id int references movie (id),
+ person_id int references person (id),
+ primary key (id, movie_id, person_id)
+);
+
+insert into "public"."movie" ("created_at", "id", "name")
+values ('2022-08-20 00:29:45.400188', 1, 'Top Gun: Maverick');
+insert into "public"."movie" ("created_at", "id", "name")
+values ('2022-08-22 00:29:45.400188', 2, 'Mad Max: Fury Road');
+insert into "public"."movie" ("created_at", "id", "name")
+values ('2022-08-28 00:29:45.400188', 3, 'Guns Away');
+
+
+insert into "public"."person" ("created_at", "first_name", "id", "last_name")
+values ('2022-08-20 00:30:02.120528', 'Tom', 1, 'Cruise');
+insert into "public"."person" ("created_at", "first_name", "id", "last_name")
+values ('2022-08-20 00:30:02.120528', 'Tom', 2, 'Holland');
+insert into "public"."person" ("created_at", "first_name", "id", "last_name")
+values ('2022-08-20 00:30:33.72443', 'Bob', 3, 'Saggett');
+insert into "public"."person" ("created_at", "first_name", "id", "last_name")
+values ('2022-08-20 00:30:33.72443', 'Random', 4, 'Actor');
+
+
+insert into "public"."profile" ("created_at", "email", "profile_id")
+values ('2022-08-20 00:30:33.72443', 'tom.cruise@supabase.io', 1);
+insert into "public"."profile" ("created_at", "email", "profile_id")
+values ('2022-08-20 00:30:33.72443', 'tom.holland@supabase.io', 2);
+insert into "public"."profile" ("created_at", "email", "profile_id")
+values ('2022-08-20 00:30:33.72443', 'bob.saggett@supabase.io', 3);
+
+insert into "public"."movie_person" ("id", "movie_id", "person_id")
+values (1, 1, 1);
+insert into "public"."movie_person" ("id", "movie_id", "person_id")
+values (2, 2, 2);
+insert into "public"."movie_person" ("id", "movie_id", "person_id")
+values (3, 1, 3);
+insert into "public"."movie_person" ("id", "movie_id", "person_id")
+values (4, 3, 4);
+
+
+-- STORED FUNCTION
+CREATE FUNCTION public.get_status(name_param text)
+ RETURNS user_status AS
+$$
+SELECT status
+from users
+WHERE username = name_param;
+$$
+ LANGUAGE SQL IMMUTABLE;
+
+-- SECOND SCHEMA USERS
+CREATE TYPE personal.user_status AS ENUM ('ONLINE', 'OFFLINE');
+CREATE TABLE personal.users
+(
+ username text primary key,
+ inserted_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ updated_at timestamp without time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
+ data jsonb DEFAULT null,
+ age_range int4range DEFAULT null,
+ status user_status DEFAULT 'ONLINE':: public.user_status
+);
+
+-- SECOND SCHEMA STORED FUNCTION
+CREATE FUNCTION personal.get_status(name_param text)
+ RETURNS user_status AS
+$$
+SELECT status
+from users
+WHERE username = name_param;
+$$
+ LANGUAGE SQL IMMUTABLE;
\ No newline at end of file