Selaa lähdekoodia

feat: implement snowflake connector and action

Fixes https://emqx.atlassian.net/browse/EMQX-12024
Thales Macedo Garitezi 1 vuosi sitten
vanhempi
commit
2d3dca6794
35 muutettua tiedostoa jossa 3333 lisäystä ja 37 poistoa
  1. 30 7
      apps/emqx/test/emqx_common_test_helpers.erl
  2. 1 1
      apps/emqx/test/emqx_test_janitor.erl
  3. 1 0
      apps/emqx_bridge/src/emqx_action_info.erl
  4. 106 0
      apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl
  5. 94 0
      apps/emqx_bridge_snowflake/BSL.txt
  6. 22 0
      apps/emqx_bridge_snowflake/README.md
  7. 1 0
      apps/emqx_bridge_snowflake/docker-ct
  8. 94 0
      apps/emqx_bridge_snowflake/docs/dev-quick-ref.md
  9. 37 0
      apps/emqx_bridge_snowflake/mix.exs
  10. 16 0
      apps/emqx_bridge_snowflake/rebar.config
  11. 26 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake.app.src
  12. 21 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake.hrl
  13. 34 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_action_info.erl
  14. 241 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_action_schema.erl
  15. 27 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_app.erl
  16. 979 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_connector.erl
  17. 73 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_connector_info.erl
  18. 145 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_connector_schema.erl
  19. 53 0
      apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_sup.erl
  20. 1045 0
      apps/emqx_bridge_snowflake/test/emqx_bridge_snowflake_SUITE.erl
  21. 1 0
      apps/emqx_connector/src/emqx_connector_info.erl
  22. 9 1
      apps/emqx_connector_aggregator/mix.exs
  23. 13 0
      apps/emqx_connector_aggregator/src/emqx_connector_aggreg_buffer_ctx.erl
  24. 3 0
      apps/emqx_connector_aggregator/src/emqx_connector_aggregator.erl
  25. 1 0
      apps/emqx_machine/priv/reboot_lists.eterm
  26. 1 2
      apps/emqx_mix_utils/lib/mix/tasks/emqx.dialyzer.ex
  27. 6 0
      apps/emqx_utils/src/emqx_utils_sql.erl
  28. 51 0
      apps/emqx_utils/test/emqx_utils_sql_tests.erl
  29. 1 0
      changes/ee/feat-13745.en.md
  30. 1 0
      mix.exs
  31. 1 0
      rebar.config.erl
  32. 91 0
      rel/i18n/emqx_bridge_snowflake_action_schema.hocon
  33. 22 0
      rel/i18n/emqx_bridge_snowflake_connector_schema.hocon
  34. 44 26
      scripts/ct/run.sh
  35. 42 0
      scripts/install-snowflake-driver.sh

+ 30 - 7
apps/emqx/test/emqx_common_test_helpers.erl

@@ -95,7 +95,9 @@
     with_failure/5,
     enable_failure/4,
     heal_failure/4,
-    reset_proxy/2
+    reset_proxy/2,
+    create_proxy/3,
+    delete_proxy/3
 ]).
 
 %% TLS certs API
@@ -1101,7 +1103,7 @@ with_mock(Mod, FnName, MockedFn, Fun) ->
 %%-------------------------------------------------------------------------------
 
 reset_proxy(ProxyHost, ProxyPort) ->
-    Url = "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/reset",
+    Url = toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/reset",
     Body = <<>>,
     {ok, {{_, 204, _}, _, _}} = httpc:request(
         post,
@@ -1133,7 +1135,7 @@ heal_failure(FailureType, Name, ProxyHost, ProxyPort) ->
     end.
 
 switch_proxy(Switch, Name, ProxyHost, ProxyPort) ->
-    Url = "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name,
+    Url = toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/proxies/" ++ Name,
     Body =
         case Switch of
             off -> #{<<"enabled">> => false};
@@ -1149,7 +1151,7 @@ switch_proxy(Switch, Name, ProxyHost, ProxyPort) ->
 
 timeout_proxy(on, Name, ProxyHost, ProxyPort) ->
     Url =
-        "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
+        toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/proxies/" ++ Name ++
             "/toxics",
     NameBin = list_to_binary(Name),
     Body = #{
@@ -1169,7 +1171,7 @@ timeout_proxy(on, Name, ProxyHost, ProxyPort) ->
 timeout_proxy(off, Name, ProxyHost, ProxyPort) ->
     ToxicName = Name ++ "_timeout",
     Url =
-        "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
+        toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/proxies/" ++ Name ++
             "/toxics/" ++ ToxicName,
     Body = <<>>,
     {ok, {{_, 204, _}, _, _}} = httpc:request(
@@ -1181,7 +1183,7 @@ timeout_proxy(off, Name, ProxyHost, ProxyPort) ->
 
 latency_up_proxy(on, Name, ProxyHost, ProxyPort) ->
     Url =
-        "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
+        toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/proxies/" ++ Name ++
             "/toxics",
     NameBin = list_to_binary(Name),
     Body = #{
@@ -1204,7 +1206,7 @@ latency_up_proxy(on, Name, ProxyHost, ProxyPort) ->
 latency_up_proxy(off, Name, ProxyHost, ProxyPort) ->
     ToxicName = Name ++ "_latency_up",
     Url =
-        "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
+        toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/proxies/" ++ Name ++
             "/toxics/" ++ ToxicName,
     Body = <<>>,
     {ok, {{_, 204, _}, _, _}} = httpc:request(
@@ -1214,6 +1216,27 @@ latency_up_proxy(off, Name, ProxyHost, ProxyPort) ->
         [{body_format, binary}]
     ).
 
+create_proxy(ProxyHost, ProxyPort, Body) ->
+    Url = toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/proxies",
+    {ok, {{_, 201, _}, _, _}} = httpc:request(
+        post,
+        {Url, [], "application/json", emqx_utils_json:encode(Body)},
+        [],
+        [{body_format, binary}]
+    ).
+
+delete_proxy(ProxyHost, ProxyPort, ProxyName) ->
+    Url = toxiproxy_base_uri(ProxyHost, ProxyPort) ++ "/proxies/" ++ ProxyName,
+    {ok, {{_, 204, _}, _, _}} = httpc:request(
+        delete,
+        {Url, []},
+        [],
+        [{body_format, binary}]
+    ).
+
+toxiproxy_base_uri(ProxyHost, ProxyPort) ->
+    "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort).
+
 %%-------------------------------------------------------------------------------
 %% TLS certs
 %%-------------------------------------------------------------------------------

+ 1 - 1
apps/emqx/test/emqx_test_janitor.erl

@@ -91,7 +91,7 @@ do_terminate(Callbacks) ->
                 Failed
             catch
                 K:E:S ->
-                    ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]),
+                    ct:pal("error executing callback ~p:\n  ~p", [Fun, {K, E}]),
                     ct:pal("stacktrace: ~p", [S]),
                     [Fun | Failed]
             end

+ 1 - 0
apps/emqx_bridge/src/emqx_action_info.erl

@@ -117,6 +117,7 @@ hard_coded_action_info_modules_ee() ->
         emqx_bridge_redis_action_info,
         emqx_bridge_rocketmq_action_info,
         emqx_bridge_s3_upload_action_info,
+        emqx_bridge_snowflake_action_info,
         emqx_bridge_sqlserver_action_info,
         emqx_bridge_syskeeper_action_info,
         emqx_bridge_tdengine_action_info,

+ 106 - 0
apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl

@@ -182,6 +182,7 @@ add_source_hookpoint(Config) ->
     on_exit(fun() -> emqx_hooks:del(Hookpoint, {?MODULE, source_hookpoint_callback}) end),
     ok.
 
+%% action/source resource id
 resource_id(Config) ->
     #{
         kind := Kind,
@@ -1196,3 +1197,108 @@ t_start_action_or_source_with_disabled_connector(Config) ->
         []
     ),
     ok.
+
+%% For bridges that use `emqx_connector_aggregator' for aggregated mode uploads, verifies
+%% that the bridge can recover from a buffer file corruption, and does so while preserving
+%% uncompromised data.  `TCConfig' should contain keys that satisfy the usual functions of
+%% this module for creating connectors and actions.
+%%
+%%   * `aggreg_id' : identifier for the aggregator.  It's the name of the
+%%     `emqx_connector_aggregator' process defined when starting
+%%     `emqx_connector_aggreg_upload_sup'.
+%%   * `batch_size' : size of batch of messages to be sent before and after corruption.
+%%     Should be less than the configured maximum number of aggregated records.
+%%   * `rule_sql' : SQL statement for the rule that will send data to the action.
+%%   * `make_message_fn' : function taking `N', an integer, and producing a `#message{}'
+%%     that should match the rule topic.
+%%   * `prepare_fn' (optional) : function that is run before producing messages, but after
+%%     the connector, action and rule are created.  Receives a context map and should
+%%     return it.  Defaults to a no-op.
+%%   * `message_check_fn' : function that is run after the first batch is corrupted and
+%%     the second batch is uploaded.  Receives a context map containing the first batch
+%%     (which gets half corrupted) of messages in `messages_before' and the second batch
+%%     (after corruption) in `messages_after'.  Add any assertions and integrity checks
+%%     here.
+%%   * `trace_checkers' (optional) : either function that receives the snabbkaffe trace
+%%     and performs its analysis, os a list of such functions.  Defaults to a no-op.
+t_aggreg_upload_restart_corrupted(TCConfig, Opts) ->
+    #{
+        aggreg_id := AggregId,
+        batch_size := BatchSize,
+        rule_sql := RuleSQL,
+        make_message_fn := MakeMessageFn,
+        message_check_fn := MessageCheckFn
+    } = Opts,
+    PrepareFn = maps:get(prepare_fn, Opts, fun(Ctx) -> Ctx end),
+    TraceCheckers = maps:get(trace_checkers, Opts, []),
+    #{type := ActionType} = get_common_values(TCConfig),
+    ?check_trace(
+        snk_timetrap(),
+        begin
+            %% Create a bridge with the sample configuration.
+            ?assertMatch({ok, _Bridge}, emqx_bridge_v2_testlib:create_bridge_api(TCConfig)),
+            {ok, _Rule} =
+                emqx_bridge_v2_testlib:create_rule_and_action_http(
+                    ActionType, <<"">>, TCConfig, #{
+                        sql => RuleSQL
+                    }
+                ),
+            Context0 = #{},
+            Context1 = PrepareFn(Context0),
+            Messages1 = lists:map(MakeMessageFn, lists:seq(1, BatchSize)),
+            Context2 = Context1#{messages_before => Messages1},
+            %% Ensure that they span multiple batch queries.
+            {ok, {ok, _}} =
+                ?wait_async_action(
+                    publish_messages_delayed(Messages1, 1),
+                    #{?snk_kind := connector_aggreg_records_written, action := AggregId}
+                ),
+            ct:pal("first batch's records have been written"),
+
+            %% Find out the buffer file.
+            {ok, #{filename := Filename}} = ?block_until(
+                #{?snk_kind := connector_aggreg_buffer_allocated, action := AggregId}
+            ),
+            ct:pal("new buffer allocated"),
+
+            %% Stop the bridge, corrupt the buffer file, and restart the bridge.
+            {ok, {{_, 204, _}, _, _}} = emqx_bridge_v2_testlib:disable_kind_http_api(TCConfig),
+            BufferFileSize = filelib:file_size(Filename),
+            ok = emqx_connector_aggregator_test_helpers:truncate_at(Filename, BufferFileSize div 2),
+            {ok, {{_, 204, _}, _, _}} = emqx_bridge_v2_testlib:enable_kind_http_api(TCConfig),
+
+            %% Send some more messages.
+            Messages2 = lists:map(MakeMessageFn, lists:seq(1, BatchSize)),
+            Context3 = Context2#{messages_after => Messages2},
+            ok = publish_messages_delayed(Messages2, 1),
+            ct:pal("published second batch"),
+
+            %% Wait until the delivery is completed.
+            {ok, _} = ?block_until(#{
+                ?snk_kind := connector_aggreg_delivery_completed, action := AggregId
+            }),
+            ct:pal("delivery completed"),
+
+            MessageCheckFn(Context3)
+        end,
+        TraceCheckers
+    ),
+    ok.
+
+snk_timetrap() ->
+    {CTTimetrap, _} = ct:get_timetrap_info(),
+    #{timetrap => max(0, CTTimetrap - 1_000)}.
+
+publish_messages_delayed(MessageEvents, Delay) ->
+    lists:foreach(
+        fun(Msg) ->
+            emqx:publish(Msg),
+            ct:sleep(Delay)
+        end,
+        MessageEvents
+    ).
+
+proplist_update(Proplist, K, Fn) ->
+    {K, OldV} = lists:keyfind(K, 1, Proplist),
+    NewV = Fn(OldV),
+    lists:keystore(K, 1, Proplist, {K, NewV}).

+ 94 - 0
apps/emqx_bridge_snowflake/BSL.txt

@@ -0,0 +1,94 @@
+Business Source License 1.1
+
+Licensor:             Hangzhou EMQ Technologies Co., Ltd.
+Licensed Work:        EMQX Enterprise Edition
+                      The Licensed Work is (c) 2024
+                      Hangzhou EMQ Technologies Co., Ltd.
+Additional Use Grant: Students and educators are granted right to copy,
+                      modify, and create derivative work for research
+                      or education.
+Change Date:          2028-09-02
+Change License:       Apache License, Version 2.0
+
+For information about alternative licensing arrangements for the Software,
+please contact Licensor: https://www.emqx.com/en/contact
+
+Notice
+
+The Business Source License (this document, or the “License”) is not an Open
+Source license. However, the Licensed Work will eventually be made available
+under an Open Source License, as stated in this License.
+
+License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
+“Business Source License” is a trademark of MariaDB Corporation Ab.
+
+-----------------------------------------------------------------------------
+
+Business Source License 1.1
+
+Terms
+
+The Licensor hereby grants you the right to copy, modify, create derivative
+works, redistribute, and make non-production use of the Licensed Work. The
+Licensor may make an Additional Use Grant, above, permitting limited
+production use.
+
+Effective on the Change Date, or the fourth anniversary of the first publicly
+available distribution of a specific version of the Licensed Work under this
+License, whichever comes first, the Licensor hereby grants you rights under
+the terms of the Change License, and the rights granted in the paragraph
+above terminate.
+
+If your use of the Licensed Work does not comply with the requirements
+currently in effect as described in this License, you must purchase a
+commercial license from the Licensor, its affiliated entities, or authorized
+resellers, or you must refrain from using the Licensed Work.
+
+All copies of the original and modified Licensed Work, and derivative works
+of the Licensed Work, are subject to this License. This License applies
+separately for each version of the Licensed Work and the Change Date may vary
+for each version of the Licensed Work released by Licensor.
+
+You must conspicuously display this License on each original or modified copy
+of the Licensed Work. If you receive the Licensed Work in original or
+modified form from a third party, the terms and conditions set forth in this
+License apply to your use of that work.
+
+Any use of the Licensed Work in violation of this License will automatically
+terminate your rights under this License for the current and all other
+versions of the Licensed Work.
+
+This License does not grant you any right in any trademark or logo of
+Licensor or its affiliates (provided that you may use a trademark or logo of
+Licensor as expressly required by this License).
+
+TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
+AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
+EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
+TITLE.
+
+MariaDB hereby grants you permission to use this License’s text to license
+your works, and to refer to it using the trademark “Business Source License”,
+as long as you comply with the Covenants of Licensor below.
+
+Covenants of Licensor
+
+In consideration of the right to use this License’s text and the “Business
+Source License” name and trademark, Licensor covenants to MariaDB, and to all
+other recipients of the licensed work to be provided by Licensor:
+
+1. To specify as the Change License the GPL Version 2.0 or any later version,
+   or a license that is compatible with GPL Version 2.0 or a later version,
+   where “compatible” means that software provided under the Change License can
+   be included in a program with software provided under GPL Version 2.0 or a
+   later version. Licensor may specify additional Change Licenses without
+   limitation.
+
+2. To either: (a) specify an additional grant of rights to use that does not
+   impose any additional restriction on the right granted in this License, as
+   the Additional Use Grant; or (b) insert the text “None”.
+
+3. To specify a Change Date.
+
+4. Not to modify this License in any other way.

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 22 - 0
apps/emqx_bridge_snowflake/README.md


+ 1 - 0
apps/emqx_bridge_snowflake/docker-ct

@@ -0,0 +1 @@
+snowflake

+ 94 - 0
apps/emqx_bridge_snowflake/docs/dev-quick-ref.md

@@ -0,0 +1,94 @@
+## Basic helper functions
+
+```elixir
+Application.ensure_all_started(:odbc)
+user = "your_admin_user"
+pass = System.fetch_env!("SNOWFLAKE_PASSWORD")
+account = "orgid-accountid"
+server = "#{account}.snowflakecomputing.com"
+dsn = "snowflake"
+
+{:ok, conn} = (
+  "dsn=#{dsn};uid=#{user};pwd=#{pass};server=#{server};account=#{account};"
+  |> to_charlist()
+  |> :odbc.connect([])
+)
+
+query = fn conn, sql -> :odbc.sql_query(conn, sql |> to_charlist()) end
+```
+
+## Create user
+
+```sh
+openssl genrsa 2048 | openssl pkcs8 -topk8 -inform PEM -out snowflake_rsa_key.private.pem -nocrypt
+openssl rsa -in snowflake_rsa_key.private.pem -pubout -out snowflake_rsa_key.public.pem
+```
+
+```elixir
+test_user = "testuser"
+query.(conn, "create user #{test_user} password = 'TestUser99' must_change_password = false")
+# {:updated, :undefined}
+
+public_pem_contents_trimmed = File.read!("snowflake_rsa_key.public.pem") |> String.trim() |> String.split("\n") |> Enum.drop(1) |> Enum.drop(-1) |> Enum.join("\n")
+
+query.(conn, "alter user #{test_user} set rsa_public_key = '#{public_pem_contents_trimmed}'")
+# {:updated, :undefined}
+```
+
+## Create database objects
+
+```elixir
+database = "testdatabase"
+schema = "public"
+table = "test1"
+stage = "teststage0"
+pipe = "testpipe0"
+warehouse = "testwarehouse"
+snowpipe_role = "snowpipe1"
+snowpipe_user = "snowpipeuser"
+test_role = "testrole"
+fqn_table = "#{database}.#{schema}.#{table}"
+fqn_stage = "#{database}.#{schema}.#{stage}"
+fqn_pipe = "#{database}.#{schema}.#{pipe}"
+
+query.(conn, "use role accountadmin")
+
+# create database, table, stage, pipe, warehouse
+query.(conn, "create database if not exists #{database}")
+query.(conn, "create or replace table #{fqn_table} (clientid string, topic string, payload binary, publish_received_at timestamp_ltz)")
+query.(conn, "create stage if not exists #{fqn_stage} file_format = (type = csv parse_header = true) copy_options = (on_error = continue purge = true)")
+query.(conn, "create pipe if not exists #{fqn_pipe} as copy into #{fqn_table} from @#{fqn_stage} match_by_column_name = case_insensitive")
+query.(conn, "create or replace warehouse #{warehouse}")
+
+# Create a role for the Snowpipe privileges.
+query.(conn, "create or replace role #{snowpipe_role}")
+query.(conn, "create or replace role #{test_role}")
+# Grant the USAGE privilege on the database and schema that contain the pipe object.
+query.(conn, "grant usage on database #{database} to role #{snowpipe_role}")
+query.(conn, "grant usage on database #{database} to role #{test_role}")
+query.(conn, "grant usage on schema #{database}.#{schema} to role #{snowpipe_role}")
+query.(conn, "grant usage on schema #{database}.#{schema} to role #{test_role}")
+# Grant the INSERT and SELECT privileges on the target table.
+query.(conn, "grant insert, select on #{fqn_table} to role #{snowpipe_role}")
+# for cleaning up table after tests
+query.(conn, "grant insert, select, truncate, delete on #{fqn_table} to role #{test_role}")
+# Grant the USAGE privilege on the external stage.
+# must use read/write for internal stage
+# query.(conn, "grant usage on stage #{fqn_stage} to role #{snowpipe_role}")
+query.(conn, "grant read, write on stage #{fqn_stage} to role #{snowpipe_role}")
+# for cleaning up table after tests
+query.(conn, "grant read, write on stage #{fqn_stage} to role #{test_role}")
+# Grant the OPERATE and MONITOR privileges on the pipe object.
+query.(conn, "grant operate, monitor on pipe #{fqn_pipe} to role #{snowpipe_role}")
+# Grant the role to a user
+query.(conn, "create user if not exists #{snowpipe_user} password = 'TestUser99' must_change_password = false rsa_public_key = '#{public_pem_contents_trimmed}'")
+
+query.(conn, "grant usage on warehouse #{warehouse} to role #{test_role}")
+
+query.(conn, "grant role #{snowpipe_role} to user #{snowpipe_user}")
+query.(conn, "grant role #{snowpipe_role} to user #{test_user}")
+query.(conn, "grant role #{test_role} to user #{test_user}")
+# Set the role as the default role for the user
+query.(conn, "alter user #{snowpipe_user} set default_role = #{snowpipe_role}")
+query.(conn, "alter user testuser set default_role = #{test_role}")
+```

+ 37 - 0
apps/emqx_bridge_snowflake/mix.exs

@@ -0,0 +1,37 @@
+defmodule EMQXBridgeSnowflake.MixProject do
+  use Mix.Project
+  alias EMQXUmbrella.MixProject, as: UMP
+
+  def project do
+    [
+      app: :emqx_bridge_snowflake,
+      version: "0.1.0",
+      build_path: "../../_build",
+      erlc_options: UMP.erlc_options(),
+      erlc_paths: UMP.erlc_paths(),
+      deps_path: "../../deps",
+      lockfile: "../../mix.lock",
+      elixir: "~> 1.14",
+      start_permanent: Mix.env() == :prod,
+      deps: deps()
+    ]
+  end
+
+  def application do
+    [
+      extra_applications: [:odbc] ++ UMP.extra_applications(),
+      mod: {:emqx_bridge_snowflake_app, []}
+    ]
+  end
+
+  def deps() do
+    [
+      {:emqx_resource, in_umbrella: true},
+      {:emqx_connector_jwt, in_umbrella: true},
+      {:emqx_connector_aggregator, in_umbrella: true},
+      UMP.common_dep(:ehttpc),
+      UMP.common_dep(:ecpool),
+      UMP.common_dep(:gproc),
+    ]
+  end
+end

+ 16 - 0
apps/emqx_bridge_snowflake/rebar.config

@@ -0,0 +1,16 @@
+%% -*- mode: erlang; -*-
+
+{erl_opts, [
+    warn_unused_vars,
+    warn_shadow_vars,
+    warn_unused_import,
+    warn_obsolete_guard,
+    warnings_as_errors,
+    debug_info
+]}.
+
+{deps, [
+    {emqx_resource, {path, "../../apps/emqx_resource"}},
+    {emqx_connector_jwt, {path, "../../apps/emqx_connector_jwt"}},
+    {emqx_connector_aggregator, {path, "../../apps/emqx_connector_aggregator"}}
+]}.

+ 26 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake.app.src

@@ -0,0 +1,26 @@
+{application, emqx_bridge_snowflake, [
+    {description, "EMQX Enterprise Snowflake Bridge"},
+    {vsn, "0.1.0"},
+    {registered, []},
+    {applications, [
+        kernel,
+        stdlib,
+        odbc,
+        gproc,
+        ecpool,
+        ehttpc,
+        emqx_resource,
+        emqx_connector_jwt
+    ]},
+    {env, [
+        {emqx_action_info_modules, [
+            emqx_bridge_snowflake_action_info
+        ]},
+        {emqx_connector_info_modules, [
+            emqx_bridge_snowflake_connector_info
+        ]}
+    ]},
+    {mod, {emqx_bridge_snowflake_app, []}},
+    {modules, []},
+    {links, []}
+]}.

+ 21 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake.hrl

@@ -0,0 +1,21 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+
+-ifndef(__EMQX_BRIDGE_SNOWFLAKE_HRL__).
+-define(__EMQX_BRIDGE_SNOWFLAKE_HRL__, true).
+
+-define(CONNECTOR_TYPE, snowflake).
+-define(CONNECTOR_TYPE_BIN, <<"snowflake">>).
+
+-define(ACTION_TYPE, snowflake).
+-define(ACTION_TYPE_BIN, <<"snowflake">>).
+
+-define(SERVER_OPTS, #{
+    default_port => 443
+}).
+
+-define(AGGREG_SUP, emqx_bridge_snowflake_sup).
+
+%% END ifndef(__EMQX_BRIDGE_SNOWFLAKE_HRL__)
+-endif.

+ 34 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_action_info.erl

@@ -0,0 +1,34 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+
+-module(emqx_bridge_snowflake_action_info).
+
+-behaviour(emqx_action_info).
+
+-include("emqx_bridge_snowflake.hrl").
+
+%% `emqx_action_info' API
+-export([
+    action_type_name/0,
+    connector_type_name/0,
+    schema_module/0
+]).
+
+%%------------------------------------------------------------------------------
+%% Type declarations
+%%------------------------------------------------------------------------------
+
+%%------------------------------------------------------------------------------
+%% `emqx_action_info' API
+%%------------------------------------------------------------------------------
+
+action_type_name() -> ?ACTION_TYPE.
+
+connector_type_name() -> ?CONNECTOR_TYPE.
+
+schema_module() -> emqx_bridge_snowflake_action_schema.
+
+%%------------------------------------------------------------------------------
+%% Internal fns
+%%------------------------------------------------------------------------------

+ 241 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_action_schema.erl

@@ -0,0 +1,241 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+
+-module(emqx_bridge_snowflake_action_schema).
+
+-include_lib("typerefl/include/types.hrl").
+-include_lib("hocon/include/hoconsc.hrl").
+-include("emqx_bridge_snowflake.hrl").
+
+%% `hocon_schema' API
+-export([
+    namespace/0,
+    roots/0,
+    fields/1,
+    desc/1
+]).
+
+%% `emqx_bridge_v2_schema' "unofficial" API
+-export([
+    bridge_v2_examples/1
+]).
+
+%% API
+-export([]).
+
+%%------------------------------------------------------------------------------
+%% Type declarations
+%%------------------------------------------------------------------------------
+
+%%-------------------------------------------------------------------------------------------------
+%% `hocon_schema' API
+%%-------------------------------------------------------------------------------------------------
+
+namespace() ->
+    "action_snowflake".
+
+roots() ->
+    [].
+
+fields(Field) when
+    Field == "get_bridge_v2";
+    Field == "put_bridge_v2";
+    Field == "post_bridge_v2"
+->
+    emqx_bridge_v2_schema:api_fields(Field, ?ACTION_TYPE, fields(?ACTION_TYPE));
+fields(action) ->
+    {?ACTION_TYPE,
+        mk(
+            hoconsc:map(name, hoconsc:ref(?MODULE, ?ACTION_TYPE)),
+            #{
+                desc => <<"Snowflake Action Config">>,
+                required => false
+            }
+        )};
+fields(?ACTION_TYPE) ->
+    emqx_bridge_v2_schema:make_producer_action_schema(
+        mk(
+            mkunion(mode, #{
+                %% To be implemented
+                %% <<"direct">> => ref(direct_parameters),
+                <<"aggregated">> => ref(aggreg_parameters)
+            }),
+            #{
+                required => true,
+                desc => ?DESC("parameters")
+            }
+        ),
+        #{resource_opts_ref => ref(action_resource_opts)}
+    );
+fields(aggreg_parameters) ->
+    [
+        {mode, mk(aggregated, #{required => true, desc => ?DESC("aggregated_mode")})},
+        {aggregation, mk(ref(aggregation), #{required => true, desc => ?DESC("aggregation")})},
+        {private_key, emqx_schema_secret:mk(#{required => true, desc => ?DESC("private_key")})},
+        {database, mk(binary(), #{required => true, desc => ?DESC("database")})},
+        {schema, mk(binary(), #{required => true, desc => ?DESC("schema")})},
+        {stage, mk(binary(), #{required => true, desc => ?DESC("stage")})},
+        {pipe, mk(binary(), #{required => true, desc => ?DESC("pipe")})},
+        {pipe_user, mk(binary(), #{required => true, desc => ?DESC("pipe_user")})},
+        {connect_timeout,
+            mk(emqx_schema:timeout_duration_ms(), #{
+                default => <<"15s">>, desc => ?DESC("connect_timeout")
+            })},
+        {pipelining, mk(pos_integer(), #{default => 100, desc => ?DESC("pipelining")})},
+        {pool_size, mk(pos_integer(), #{default => 8, desc => ?DESC("pool_size")})},
+        {max_retries, mk(non_neg_integer(), #{required => false, desc => ?DESC("max_retries")})},
+        {max_block_size,
+            mk(
+                emqx_schema:bytesize(),
+                #{
+                    default => <<"250mb">>,
+                    importance => ?IMPORTANCE_HIDDEN,
+                    required => true
+                }
+            )},
+        {min_block_size,
+            mk(
+                emqx_schema:bytesize(),
+                #{
+                    default => <<"100mb">>,
+                    importance => ?IMPORTANCE_HIDDEN,
+                    required => true
+                }
+            )}
+    ];
+fields(direct_parameters) ->
+    %% to be implemented
+    [{mode, mk(direct, #{required => true, desc => ?DESC("direct_mode")})}];
+fields(aggregation) ->
+    [
+        emqx_connector_aggregator_schema:container(),
+        {time_interval,
+            hoconsc:mk(
+                emqx_schema:duration_s(),
+                #{
+                    required => false,
+                    default => <<"1h">>,
+                    desc => ?DESC("aggregation_interval")
+                }
+            )},
+        {max_records,
+            hoconsc:mk(
+                pos_integer(),
+                #{
+                    required => false,
+                    default => 1_000_000,
+                    desc => ?DESC("aggregation_max_records")
+                }
+            )}
+    ];
+fields(action_resource_opts) ->
+    %% NOTE: This action should benefit from generous batching defaults.
+    emqx_bridge_v2_schema:action_resource_opts_fields([
+        {batch_size, #{default => 100}},
+        {batch_time, #{default => <<"10ms">>}}
+    ]).
+
+desc(Name) when
+    Name =:= ?ACTION_TYPE;
+    Name =:= aggreg_parameters;
+    Name =:= aggregation;
+    Name =:= parameters
+->
+    ?DESC(Name);
+desc(action_resource_opts) ->
+    ?DESC(emqx_resource_schema, "creation_opts");
+desc(_Name) ->
+    undefined.
+
+%%-------------------------------------------------------------------------------------------------
+%% `emqx_bridge_v2_schema' "unofficial" API
+%%-------------------------------------------------------------------------------------------------
+
+bridge_v2_examples(Method) ->
+    [
+        #{
+            ?ACTION_TYPE_BIN => #{
+                summary => <<"Snowflake Action">>,
+                value => action_example(Method)
+            }
+        }
+    ].
+
+action_example(post) ->
+    maps:merge(
+        action_example(put),
+        #{
+            type => ?ACTION_TYPE_BIN,
+            name => <<"my_action">>
+        }
+    );
+action_example(get) ->
+    maps:merge(
+        action_example(put),
+        #{
+            status => <<"connected">>,
+            node_status => [
+                #{
+                    node => <<"emqx@localhost">>,
+                    status => <<"connected">>
+                }
+            ]
+        }
+    );
+action_example(put) ->
+    #{
+        enable => true,
+        description => <<"my action">>,
+        connector => <<"my_connector">>,
+        parameters =>
+            #{
+                mode => <<"aggregated">>,
+                aggregation => #{
+                    container => #{type => <<"csv">>},
+                    max_records => 1000,
+                    time_interval => <<"60s">>
+                },
+                connect_timeout => <<"15s">>,
+                database => <<"testdatabase">>,
+                pipe => <<"testpipe">>,
+                pipe_user => <<"pipeuser">>,
+                schema => <<"public">>,
+                stage => <<"teststage">>,
+                private_key => <<"file:///path/to/secret.pem">>,
+                max_retries => 3,
+                pipelining => 100,
+                pool_size => 16
+            },
+        resource_opts =>
+            #{
+                batch_time => <<"60s">>,
+                batch_size => 10_000,
+                health_check_interval => <<"30s">>,
+                inflight_window => 100,
+                query_mode => <<"sync">>,
+                request_ttl => <<"45s">>,
+                worker_pool_size => 16
+            }
+    }.
+
+%%------------------------------------------------------------------------------
+%% Internal fns
+%%------------------------------------------------------------------------------
+
+ref(Name) -> hoconsc:ref(?MODULE, Name).
+mk(Type, Meta) -> hoconsc:mk(Type, Meta).
+
+mkunion(Field, Schemas) ->
+    hoconsc:union(fun(Arg) -> scunion(Field, Schemas, Arg) end).
+
+scunion(_Field, Schemas, all_union_members) ->
+    maps:values(Schemas);
+scunion(Field, Schemas, {value, Value}) ->
+    Selector = maps:get(emqx_utils_conv:bin(Field), Value, undefined),
+    case Selector == undefined orelse maps:find(emqx_utils_conv:bin(Selector), Schemas) of
+        {ok, Schema} ->
+            [Schema];
+        _Error ->
+            throw(#{field_name => Field, expected => maps:keys(Schemas)})
+    end.

+ 27 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_app.erl

@@ -0,0 +1,27 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+-module(emqx_bridge_snowflake_app).
+
+-behaviour(application).
+
+%% `application' API
+-export([start/2, stop/1]).
+
+%%------------------------------------------------------------------------------
+%% Type declarations
+%%------------------------------------------------------------------------------
+
+%%------------------------------------------------------------------------------
+%% `application' API
+%%------------------------------------------------------------------------------
+
+start(_StartType, _StartArgs) ->
+    emqx_bridge_snowflake_sup:start_link().
+
+stop(_State) ->
+    ok.
+
+%%------------------------------------------------------------------------------
+%% Internal fns
+%%------------------------------------------------------------------------------

+ 979 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_connector.erl

@@ -0,0 +1,979 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+-module(emqx_bridge_snowflake_connector).
+
+-feature(maybe_expr, enable).
+
+-behaviour(emqx_resource).
+-behaviour(emqx_connector_aggreg_delivery).
+
+-include_lib("public_key/include/public_key.hrl").
+-include_lib("emqx/include/logger.hrl").
+-include_lib("snabbkaffe/include/trace.hrl").
+-include_lib("emqx_resource/include/emqx_resource.hrl").
+-include_lib("emqx/include/emqx_trace.hrl").
+-include("emqx_bridge_snowflake.hrl").
+-include_lib("emqx_connector_aggregator/include/emqx_connector_aggregator.hrl").
+
+-elvis([{elvis_style, macro_module_names, disable}]).
+
+%% `emqx_resource' API
+-export([
+    resource_type/0,
+    callback_mode/0,
+
+    on_start/2,
+    on_stop/2,
+    on_get_status/2,
+
+    on_get_channels/1,
+    on_add_channel/4,
+    on_remove_channel/3,
+    on_get_channel_status/3,
+
+    on_query/3,
+    on_batch_query/3
+]).
+
+%% `ecpool_worker' API
+-export([
+    connect/1,
+    disconnect/1,
+    do_health_check_connector/1,
+    do_stage_file/6
+]).
+
+%% `emqx_connector_aggreg_delivery' API
+-export([
+    init_transfer_state/2,
+    process_append/2,
+    process_write/1,
+    process_complete/1
+]).
+
+%% API
+-export([
+    insert_report/2
+]).
+
+%% Internal exports only for mocking
+-export([do_insert_files_request/4]).
+
+%%------------------------------------------------------------------------------
+%% Type declarations
+%%------------------------------------------------------------------------------
+
+-define(HC_TIMEOUT, 15_000).
+
+%% Ad-hoc requests
+-record(insert_report, {action_res_id :: action_resource_id(), opts :: ad_hoc_query_opts()}).
+
+-type connector_config() :: #{
+    server := binary(),
+    account := account(),
+    username := binary(),
+    password := emqx_schema_secret:secret(),
+    dsn := binary(),
+    pool_size := pos_integer()
+}.
+-type connector_state() :: #{
+    account := account(),
+    server := #{host := binary(), port := emqx_schema:port_number()},
+    installed_actions := #{action_resource_id() => action_state()}
+}.
+
+-type action_config() :: aggregated_action_config().
+-type aggregated_action_config() :: #{
+    parameters := #{
+        mode := aggregated,
+        database := database(),
+        schema := schema(),
+        pipe := pipe(),
+        pipe_user := binary(),
+        private_key := emqx_schema_secret:secret(),
+        connect_timeout := emqx_schema:timeout_duration(),
+        pipelining := non_neg_integer(),
+        pool_size := pos_integer(),
+        max_retries := non_neg_integer()
+    }
+}.
+-type action_state() :: #{}.
+
+-type account() :: binary().
+-type database() :: binary().
+-type schema() :: binary().
+-type stage() :: binary().
+-type pipe() :: binary().
+
+-type odbc_pool() :: connector_resource_id().
+-type http_pool() :: action_resource_id().
+-type http_client_config() :: #{
+    jwt_config := emqx_connector_jwt:jwt_config(),
+    insert_files_path := binary(),
+    insert_report_path := binary(),
+    max_retries := non_neg_integer(),
+    request_ttl := timeout()
+}.
+
+-type query() :: action_query() | insert_report_query().
+-type action_query() :: {_Tag :: channel_id(), _Data :: map()}.
+-type insert_report_query() :: #insert_report{}.
+
+-type ad_hoc_query_opts() :: map().
+
+-type action_name() :: binary().
+
+-type transfer_opts() :: #{
+    container := #{type := emqx_connector_aggregator:container_type()},
+    upload_options := #{
+        action := action_name(),
+        database := database(),
+        schema := schema(),
+        stage := stage(),
+        odbc_pool := odbc_pool(),
+        http_pool := http_pool(),
+        http_client_config := http_client_config(),
+        min_block_size := pos_integer(),
+        max_block_size := pos_integer(),
+        work_dir := file:filename()
+    }
+}.
+
+-type transfer_state() :: #{
+    action_name := action_name(),
+
+    buffer_seq := non_neg_integer(),
+    buffer_datetime := string(),
+    seq_no := non_neg_integer(),
+    container_type := emqx_connector_aggregator:container_type(),
+
+    http_pool := http_pool(),
+    http_client_config := http_client_config(),
+
+    odbc_pool := odbc_pool(),
+    database := database(),
+    schema := schema(),
+    stage := stage(),
+    filename_template := emqx_template:t(),
+    filename := emqx_maybe:t(file:filename()),
+    fd := emqx_maybe:t(file:io_device()),
+    work_dir := file:filename(),
+    written := non_neg_integer(),
+    staged_files := [staged_file()],
+    next_file := queue:queue({file:filename(), non_neg_integer()}),
+
+    max_block_size := pos_integer(),
+    min_block_size := pos_integer()
+}.
+-type staged_file() :: #{
+    path := file:filename(),
+    size := non_neg_integer()
+}.
+
+%%------------------------------------------------------------------------------
+%% `emqx_resource' API
+%%------------------------------------------------------------------------------
+
+-spec resource_type() -> atom().
+resource_type() ->
+    snowflake.
+
+-spec callback_mode() -> callback_mode().
+callback_mode() ->
+    always_sync.
+
+-spec on_start(connector_resource_id(), connector_config()) ->
+    {ok, connector_state()} | {error, _Reason}.
+on_start(ConnResId, ConnConfig) ->
+    #{
+        server := Server,
+        account := Account,
+        username := Username,
+        password := Password,
+        dsn := DSN,
+        pool_size := PoolSize
+    } = ConnConfig,
+    #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SERVER_OPTS),
+    PoolOpts = [
+        {pool_size, PoolSize},
+        {dsn, DSN},
+        {account, Account},
+        {server, Server},
+        {username, Username},
+        {password, Password},
+        {on_disconnect, {?MODULE, disconnect, []}}
+    ],
+    case emqx_resource_pool:start(ConnResId, ?MODULE, PoolOpts) of
+        ok ->
+            State = #{
+                account => Account,
+                server => #{host => Host, port => Port},
+                installed_actions => #{}
+            },
+            {ok, State};
+        {error, Reason} ->
+            {error, Reason}
+    end.
+
+-spec on_stop(connector_resource_id(), connector_state()) -> ok.
+on_stop(ConnResId, _ConnState) ->
+    Res = emqx_resource_pool:stop(ConnResId),
+    ?tp("snowflake_connector_stop", #{instance_id => ConnResId}),
+    Res.
+
+-spec on_get_status(connector_resource_id(), connector_state()) ->
+    ?status_connected | ?status_disconnected.
+on_get_status(ConnResId, _ConnState) ->
+    health_check_connector(ConnResId).
+
+-spec on_add_channel(
+    connector_resource_id(),
+    connector_state(),
+    action_resource_id(),
+    action_config()
+) ->
+    {ok, connector_state()}.
+on_add_channel(ConnResId, ConnState0, ActionResId, ActionConfig) ->
+    maybe
+        {ok, ActionState} ?= create_action(ConnResId, ActionResId, ActionConfig, ConnState0),
+        ConnState = emqx_utils_maps:deep_put(
+            [installed_actions, ActionResId], ConnState0, ActionState
+        ),
+        {ok, ConnState}
+    end.
+
+-spec on_remove_channel(
+    connector_resource_id(),
+    connector_state(),
+    action_resource_id()
+) ->
+    {ok, connector_state()}.
+on_remove_channel(
+    _ConnResId, ConnState0 = #{installed_actions := InstalledActions0}, ActionResId
+) when
+    is_map_key(ActionResId, InstalledActions0)
+->
+    {ActionState, InstalledActions} = maps:take(ActionResId, InstalledActions0),
+    destroy_action(ActionResId, ActionState),
+    ConnState = ConnState0#{installed_actions := InstalledActions},
+    {ok, ConnState};
+on_remove_channel(_ConnResId, ConnState, _ActionResId) ->
+    {ok, ConnState}.
+
+-spec on_get_channels(connector_resource_id()) ->
+    [{action_resource_id(), action_config()}].
+on_get_channels(ConnResId) ->
+    emqx_bridge_v2:get_channels_for_connector(ConnResId).
+
+-spec on_get_channel_status(
+    connector_resource_id(),
+    action_resource_id(),
+    connector_state()
+) ->
+    ?status_connected | ?status_disconnected.
+on_get_channel_status(
+    _ConnResId,
+    ActionResId,
+    _ConnState = #{installed_actions := InstalledActions}
+) when is_map_key(ActionResId, InstalledActions) ->
+    ActionState = maps:get(ActionResId, InstalledActions),
+    action_status(ActionResId, ActionState);
+on_get_channel_status(_ConnResId, _ActionResId, _ConnState) ->
+    ?status_disconnected.
+
+-spec on_query(connector_resource_id(), query(), connector_state()) ->
+    {ok, _Result} | {error, _Reason}.
+on_query(
+    _ConnResId, {ActionResId, Data}, #{installed_actions := InstalledActions} = _ConnState
+) when
+    is_map_key(ActionResId, InstalledActions)
+->
+    case InstalledActions of
+        %% #{ActionResId := #{mode := direct} = _ActionState} ->
+        %%     {ok, todo};
+        #{ActionResId := #{mode := aggregated} = ActionState} ->
+            run_aggregated_action([Data], ActionState)
+    end;
+on_query(
+    _ConnResId,
+    #insert_report{action_res_id = ActionResId, opts = Opts},
+    #{installed_actions := InstalledActions} = _ConnState
+) when
+    is_map_key(ActionResId, InstalledActions)
+->
+    #{mode := aggregated, http := HTTPClientConfig} = maps:get(ActionResId, InstalledActions),
+    insert_report_request(ActionResId, Opts, HTTPClientConfig);
+on_query(_ConnResId, Query, _ConnState) ->
+    {error, {unrecoverable_error, {invalid_query, Query}}}.
+
+-spec on_batch_query(connector_resource_id(), [query()], connector_state()) ->
+    {ok, _Result} | {error, _Reason}.
+on_batch_query(_ConnResId, [{ActionResId, _} | _] = Batch0, #{installed_actions := InstalledActions}) when
+    is_map_key(ActionResId, InstalledActions)
+->
+    case InstalledActions of
+        %% #{ActionResId := #{mode := direct} = _ActionState} ->
+        %%     {ok, todo};
+        #{ActionResId := #{mode := aggregated} = ActionState} ->
+            Batch = [Data || {_, Data} <- Batch0],
+            run_aggregated_action(Batch, ActionState)
+    end;
+on_batch_query(_ConnResId, Batch, _ConnState) ->
+    {error, {unrecoverable_error, {bad_batch, Batch}}}.
+
+%%------------------------------------------------------------------------------
+%% API
+%%------------------------------------------------------------------------------
+
+%% Used for debugging.
+-spec insert_report(action_resource_id(), _Opts :: map()) -> {ok, map()} | {error, term()}.
+insert_report(ActionResId, Opts) ->
+    emqx_resource:simple_sync_query(
+        ActionResId, #insert_report{action_res_id = ActionResId, opts = Opts}
+    ).
+
+%%------------------------------------------------------------------------------
+%% `ecpool_worker' API
+%%------------------------------------------------------------------------------
+
+connect(Opts) ->
+    ConnectStr = conn_str(Opts),
+    DriverOpts = proplists:get_value(driver_options, Opts, []),
+    odbc:connect(ConnectStr, DriverOpts).
+
+disconnect(ConnectionPid) ->
+    odbc:disconnect(ConnectionPid).
+
+health_check_connector(ConnResId) ->
+    Res = emqx_resource_pool:health_check_workers(
+        ConnResId,
+        fun ?MODULE:do_health_check_connector/1,
+        ?HC_TIMEOUT
+    ),
+    case Res of
+        true ->
+            ?status_connected;
+        false ->
+            ?status_disconnected
+    end.
+
+do_health_check_connector(ConnectionPid) ->
+    case odbc:sql_query(ConnectionPid, "show schemas") of
+        {selected, _, _} ->
+            true;
+        _ ->
+            false
+    end.
+
+-spec stage_file(odbc_pool(), file:filename(), database(), schema(), stage(), action_name()) ->
+    {ok, file:filename()} | {error, term()}.
+stage_file(ODBCPool, Filename, Database, Schema, Stage, ActionName) ->
+    Res = ecpool:pick_and_do(
+        ODBCPool,
+        fun(ConnPid) ->
+            ?MODULE:do_stage_file(ConnPid, Filename, Database, Schema, Stage, ActionName)
+        end,
+        %% Must be executed by the ecpool worker, which owns the ODBC connection.
+        handover
+    ),
+    Context = #{
+        filename => Filename,
+        database => Database,
+        schema => Schema,
+        stage => Stage,
+        pool => ODBCPool
+    },
+    handle_stage_file_result(Res, Context).
+
+-spec do_stage_file(
+    odbc:connection_reference(), file:filename(), database(), schema(), stage(), action_name()
+) ->
+    {ok, file:filename()} | {error, term()}.
+do_stage_file(ConnPid, Filename, Database, Schema, Stage, ActionName) ->
+    SQL = stage_file_sql(Filename, Database, Schema, Stage, ActionName),
+    %% Should we also check if it actually succeeded by inspecting reportFiles?
+    odbc:sql_query(ConnPid, SQL).
+
+-spec handle_stage_file_result({selected, [string()], [tuple()]} | {error, term()}, map()) ->
+    {ok, file:filename()} | {error, term()}.
+handle_stage_file_result({selected, Headers0, Rows}, Context) ->
+    #{filename := Filename} = Context,
+    Headers = lists:map(fun emqx_utils_conv:bin/1, Headers0),
+    ParsedRows = lists:map(fun(R) -> row_to_map(R, Headers) end, Rows),
+    case ParsedRows of
+        [#{<<"target">> := Target, <<"status">> := <<"UPLOADED">>}] ->
+            ?tp(debug, "snowflake_stage_file_succeeded", Context#{
+                result => ParsedRows
+            }),
+            ok = file:delete(Filename),
+            {ok, Target};
+        [#{<<"target">> := Target, <<"status">> := <<"SKIPPED">>}] ->
+            ?tp(info, "snowflake_stage_file_skipped", Context#{
+                result => ParsedRows
+            }),
+            ok = file:delete(Filename),
+            {ok, Target};
+        _ ->
+            ?tp(warning, "snowflake_stage_bad_response", Context#{
+                result => ParsedRows
+            }),
+            {error, {bad_response, ParsedRows}}
+    end;
+handle_stage_file_result({error, Reason} = Error, Context) ->
+    ?tp(warning, "snowflake_stage_file_failed", Context#{
+        reason => Reason
+    }),
+    Error.
+
+%%------------------------------------------------------------------------------
+%% `emqx_connector_aggreg_delivery' API
+%%------------------------------------------------------------------------------
+
+-spec init_transfer_state(buffer(), transfer_opts()) ->
+    transfer_state().
+init_transfer_state(Buffer, Opts) ->
+    #{
+        container := #{type := ContainerType},
+        upload_options := #{
+            action := ActionName,
+            database := Database,
+            schema := Schema,
+            stage := Stage,
+            odbc_pool := ODBCPool,
+            http_pool := HTTPPool,
+            http_client_config := HTTPClientConfig,
+            max_block_size := MaxBlockSize,
+            min_block_size := MinBlockSize,
+            work_dir := WorkDir
+        }
+    } = Opts,
+    BufferSeq = emqx_connector_aggreg_buffer_ctx:sequence(Buffer),
+    BufferDT = emqx_connector_aggreg_buffer_ctx:datetime(Buffer, <<"unix">>),
+    FilenameTemplate = emqx_template:parse(
+        <<"${buffer_datetime}_${buffer_seq}_${seq_no}.${container_type}">>
+    ),
+    #{
+        action_name => ActionName,
+
+        buffer_seq => BufferSeq,
+        buffer_datetime => BufferDT,
+        seq_no => 0,
+        container_type => ContainerType,
+
+        http_pool => HTTPPool,
+        http_client_config => HTTPClientConfig,
+
+        odbc_pool => ODBCPool,
+        database => Database,
+        schema => Schema,
+        stage => Stage,
+        filename_template => FilenameTemplate,
+        filename => undefined,
+        fd => undefined,
+        work_dir => WorkDir,
+        written => 0,
+        staged_files => [],
+        next_file => queue:new(),
+
+        max_block_size => MaxBlockSize,
+        min_block_size => MinBlockSize
+    }.
+
+-spec process_append(iodata(), transfer_state()) ->
+    transfer_state().
+process_append(IOData, TransferState0) ->
+    #{min_block_size := MinBlockSize} = TransferState0,
+    Size = iolist_size(IOData),
+    %% Open and write to file until minimum is reached
+    TransferState1 = ensure_file(TransferState0),
+    #{written := Written} = TransferState2 = append_to_file(IOData, Size, TransferState1),
+    case Written >= MinBlockSize of
+        true ->
+            close_and_enqueue_file(TransferState2);
+        false ->
+            TransferState2
+    end.
+
+ensure_file(#{fd := undefined} = TransferState) ->
+    #{
+        buffer_datetime := BufferDT,
+        buffer_seq := BufferSeq,
+        container_type := ContainerType,
+        filename_template := FilenameTemplate,
+        seq_no := SeqNo,
+        work_dir := WorkDir
+    } = TransferState,
+    Filename0 = emqx_template:render_strict(FilenameTemplate, #{
+        buffer_datetime => BufferDT,
+        buffer_seq => BufferSeq,
+        seq_no => SeqNo,
+        container_type => ContainerType
+    }),
+    Filename1 = filename:join([WorkDir, <<"tmp">>, Filename0]),
+    Filename2 = filename:absname(Filename1),
+    Filename = emqx_utils:safe_filename(Filename2),
+    ok = filelib:ensure_dir(Filename),
+    {ok, FD} = file:open(Filename, [write, binary]),
+    TransferState#{
+        filename := Filename,
+        fd := FD
+    };
+ensure_file(TransferState) ->
+    TransferState.
+
+append_to_file(IOData, Size, TransferState) ->
+    #{
+        fd := FD,
+        written := Written
+    } = TransferState,
+    %% Todo: handle errors?
+    ok = file:write(FD, IOData),
+    TransferState#{written := Written + Size}.
+
+close_and_enqueue_file(TransferState0) ->
+    #{
+        fd := FD,
+        filename := Filename,
+        next_file := NextFile,
+        seq_no := SeqNo,
+        written := Written
+    } = TransferState0,
+    ok = file:close(FD),
+    TransferState0#{
+        next_file := queue:in({Filename, Written}, NextFile),
+        filename := undefined,
+        fd := undefined,
+        seq_no := SeqNo + 1,
+        written := 0
+    }.
+
+-spec process_write(transfer_state()) ->
+    {ok, transfer_state()} | {error, term()}.
+process_write(TransferState0) ->
+    #{next_file := NextFile0} = TransferState0,
+    case queue:out(NextFile0) of
+        {{value, {Filename, Size}}, NextFile} ->
+            ?tp(snowflake_will_stage_file, #{}),
+            do_process_write(Filename, Size, TransferState0#{next_file := NextFile});
+        {empty, _} ->
+            {ok, TransferState0}
+    end.
+
+-spec do_process_write(file:filename(), non_neg_integer(), transfer_state()) ->
+    {ok, transfer_state()} | {error, term()}.
+do_process_write(Filename, Size, TransferState0) ->
+    #{
+        action_name := ActionName,
+        odbc_pool := ODBCPool,
+        database := Database,
+        schema := Schema,
+        stage := Stage,
+        staged_files := StagedFiles0
+    } = TransferState0,
+    case stage_file(ODBCPool, Filename, Database, Schema, Stage, ActionName) of
+        {ok, Target0} ->
+            Target = filename:join(ActionName, Target0),
+            StagedFile = #{path => Target, size => Size},
+            StagedFiles = [StagedFile | StagedFiles0],
+            TransferState = TransferState0#{staged_files := StagedFiles},
+            process_write(TransferState);
+        {error, Reason} ->
+            %% TODO: retry?
+            {error, Reason}
+    end.
+
+-spec process_complete(transfer_state()) ->
+    {ok, term()}.
+process_complete(TransferState0) ->
+    #{written := Written0} = TransferState0,
+    maybe
+        %% Flush any left-over data
+        {ok, TransferState} ?=
+            case Written0 > 0 of
+                true ->
+                    ?tp("snowflake_flush_on_complete", #{}),
+                    TransferState1 = close_and_enqueue_file(TransferState0),
+                    process_write(TransferState1);
+                false ->
+                    {ok, TransferState0}
+            end,
+        #{
+            http_pool := HTTPPool,
+            http_client_config := HTTPClientConfig,
+            staged_files := StagedFiles
+        } = TransferState,
+        case insert_files_request(StagedFiles, HTTPPool, HTTPClientConfig) of
+            {ok, 200, _, Body} ->
+                {ok, emqx_utils_json:decode(Body, [return_maps])};
+            Res ->
+                %% TODO: retry?
+                exit({insert_failed, Res})
+        end
+    end.
+
+%%------------------------------------------------------------------------------
+%% Internal fns
+%%------------------------------------------------------------------------------
+
+-spec create_action(
+    connector_resource_id(), action_resource_id(), action_config(), connector_state()
+) ->
+    {ok, action_state()} | {error, term()}.
+create_action(
+    ConnResId, ActionResId, #{parameters := #{mode := aggregated}} = ActionConfig, ConnState
+) ->
+    maybe
+        {ok, ActionState0} ?= start_http_pool(ActionResId, ActionConfig, ConnState),
+        start_aggregator(ConnResId, ActionResId, ActionConfig, ActionState0)
+    end.
+
+start_http_pool(ActionResId, ActionConfig, ConnState) ->
+    #{server := #{host := Host, port := Port}} = ConnState,
+    #{
+        parameters := #{
+            database := Database,
+            schema := Schema,
+            pipe := Pipe,
+            pipe_user := _,
+            private_key := _,
+            connect_timeout := ConnectTimeout,
+            pipelining := Pipelining,
+            pool_size := PoolSize,
+            max_retries := MaxRetries
+        },
+        resource_opts := #{request_ttl := RequestTTL}
+    } = ActionConfig,
+    PipeParts = lists:map(fun maybe_quote/1, [Database, Schema, Pipe]),
+    PipePath = iolist_to_binary(lists:join($., PipeParts)),
+    PipePrefix = iolist_to_binary([
+        <<"https://">>,
+        Host,
+        <<":">>,
+        integer_to_binary(Port),
+        <<"/v1/data/pipes/">>,
+        PipePath
+    ]),
+    InserFilesPath = iolist_to_binary([
+        PipePrefix,
+        <<"/insertFiles">>
+    ]),
+    InserReportPath = iolist_to_binary([
+        PipePrefix,
+        <<"/insertReport">>
+    ]),
+    JWTConfig = jwt_config(ActionResId, ActionConfig, ConnState),
+    TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}),
+    PoolOpts = [
+        {host, Host},
+        {port, Port},
+        {connect_timeout, ConnectTimeout},
+        {keepalive, 30_000},
+        {pool_type, random},
+        {pool_size, PoolSize},
+        {transport, tls},
+        {transport_opts, TransportOpts},
+        {enable_pipelining, Pipelining}
+    ],
+    case ehttpc_sup:start_pool(ActionResId, PoolOpts) of
+        {ok, _} ->
+            {ok, #{
+                http => #{
+                    jwt_config => JWTConfig,
+                    insert_files_path => InserFilesPath,
+                    insert_report_path => InserReportPath,
+                    connect_timeout => ConnectTimeout,
+                    max_retries => MaxRetries,
+                    request_ttl => RequestTTL
+                }
+            }};
+        {error, Reason} ->
+            {error, Reason}
+    end.
+
+start_aggregator(ConnResId, ActionResId, ActionConfig, ActionState0) ->
+    maybe
+        #{
+            bridge_name := Name,
+            parameters := #{
+                mode := aggregated = Mode,
+                database := Database,
+                schema := Schema,
+                stage := Stage,
+                aggregation := #{
+                    container := ContainerOpts,
+                    max_records := MaxRecords,
+                    time_interval := TimeInterval
+                },
+                max_block_size := MaxBlockSize,
+                min_block_size := MinBlockSize
+            }
+        } = ActionConfig,
+        #{http := HTTPClientConfig} = ActionState0,
+        Type = ?ACTION_TYPE_BIN,
+        AggregId = {Type, Name},
+        WorkDir = work_dir(Type, Name),
+        AggregOpts = #{
+            max_records => MaxRecords,
+            time_interval => TimeInterval,
+            work_dir => WorkDir
+        },
+        TransferOpts = #{
+            action => Name,
+            action_res_id => ActionResId,
+            odbc_pool => ConnResId,
+            database => Database,
+            schema => Schema,
+            stage => Stage,
+            http_pool => ActionResId,
+            http_client_config => HTTPClientConfig,
+            max_block_size => MaxBlockSize,
+            min_block_size => MinBlockSize,
+            work_dir => WorkDir
+        },
+        DeliveryOpts = #{
+            callback_module => ?MODULE,
+            container => ContainerOpts,
+            upload_options => TransferOpts
+        },
+        _ = ?AGGREG_SUP:delete_child(AggregId),
+        {ok, SupPid} ?=
+            ?AGGREG_SUP:start_child(#{
+                id => AggregId,
+                start =>
+                    {emqx_connector_aggreg_upload_sup, start_link, [
+                        AggregId, AggregOpts, DeliveryOpts
+                    ]},
+                type => supervisor,
+                restart => permanent
+            }),
+        {ok, ActionState0#{
+            mode => Mode,
+            aggreg_id => AggregId,
+            supervisor => SupPid,
+            on_stop => {?AGGREG_SUP, delete_child, [AggregId]}
+        }}
+    else
+        {error, Reason} ->
+            _ = ehttpc_sup:stop_pool(ActionResId),
+            {error, Reason}
+    end.
+
+-spec destroy_action(action_resource_id(), action_state()) -> ok.
+destroy_action(ActionResId, ActionState) ->
+    case ActionState of
+        #{on_stop := {M, F, A}} ->
+            ok = apply(M, F, A);
+        _ ->
+            ok
+    end,
+    ok = ehttpc_sup:stop_pool(ActionResId),
+    ok.
+
+run_aggregated_action(Batch, #{aggreg_id := AggregId}) ->
+    Timestamp = erlang:system_time(second),
+    case emqx_connector_aggregator:push_records(AggregId, Timestamp, Batch) of
+        ok ->
+            ok;
+        {error, Reason} ->
+            {error, {unrecoverable_error, Reason}}
+    end.
+
+work_dir(Type, Name) ->
+    filename:join([emqx:data_dir(), bridge, Type, Name]).
+
+str(X) -> emqx_utils_conv:str(X).
+
+conn_str(Opts) ->
+    lists:concat(conn_str(Opts, [])).
+
+conn_str([], Acc) ->
+    lists:join(";", Acc);
+conn_str([{dsn, DSN} | Opts], Acc) ->
+    conn_str(Opts, ["dsn=" ++ str(DSN) | Acc]);
+conn_str([{server, Server} | Opts], Acc) ->
+    conn_str(Opts, ["server=" ++ str(Server) | Acc]);
+conn_str([{account, Account} | Opts], Acc) ->
+    conn_str(Opts, ["account=" ++ str(Account) | Acc]);
+conn_str([{username, Username} | Opts], Acc) ->
+    conn_str(Opts, ["uid=" ++ str(Username) | Acc]);
+conn_str([{password, Password} | Opts], Acc) ->
+    conn_str(Opts, ["pwd=" ++ str(emqx_secret:unwrap(Password)) | Acc]);
+conn_str([{_, _} | Opts], Acc) ->
+    conn_str(Opts, Acc).
+
+jwt_config(ActionResId, ActionConfig, ConnState) ->
+    #{account := Account} = ConnState,
+    #{
+        parameters := #{
+            private_key := PrivateKeyPEM,
+            pipe_user := PipeUser
+        }
+    } = ActionConfig,
+    PrivateJWK = jose_jwk:from_pem(emqx_secret:unwrap(PrivateKeyPEM)),
+    %% N.B.
+    %% The account_identifier and user values must use all uppercase characters
+    %% https://docs.snowflake.com/en/developer-guide/sql-api/authenticating#using-key-pair-authentication
+    AccountUp = string:uppercase(Account),
+    PipeUserUp = string:uppercase(PipeUser),
+    Fingerprint = fingerprint(PrivateJWK),
+    Sub = iolist_to_binary([AccountUp, <<".">>, PipeUserUp]),
+    Iss = iolist_to_binary([Sub, <<".">>, Fingerprint]),
+    #{
+        expiration => 360_000,
+        resource_id => ActionResId,
+        jwk => emqx_secret:wrap(PrivateJWK),
+        iss => Iss,
+        sub => Sub,
+        aud => <<"unused">>,
+        kid => <<"unused">>,
+        alg => <<"RS256">>
+    }.
+
+fingerprint(PrivateJWK) ->
+    {_, PublicRSAKey} = jose_jwk:to_public_key(PrivateJWK),
+    #'SubjectPublicKeyInfo'{algorithm = DEREncoded} =
+        public_key:pem_entry_encode('SubjectPublicKeyInfo', PublicRSAKey),
+    Hash = crypto:hash(sha256, DEREncoded),
+    Hash64 = base64:encode(Hash),
+    <<"SHA256:", Hash64/binary>>.
+
+insert_files_request(StagedFiles, HTTPPool, HTTPClientConfig) ->
+    #{
+        jwt_config := JWTConfig,
+        insert_files_path := InserFilesPath,
+        request_ttl := RequestTTL,
+        max_retries := MaxRetries
+    } = HTTPClientConfig,
+    JWTToken = emqx_connector_jwt:ensure_jwt(JWTConfig),
+    AuthnHeader = [<<"BEARER ">>, JWTToken],
+    Headers = http_headers(AuthnHeader),
+    Body = emqx_utils_json:encode(#{files => StagedFiles}),
+    %% TODO: generate unique request id
+    Req = {InserFilesPath, Headers, Body},
+    ?tp(debug, "snowflake_stage_insert_files_request", #{
+        action_res_id => HTTPPool,
+        staged_files => StagedFiles
+    }),
+    ?MODULE:do_insert_files_request(HTTPPool, Req, RequestTTL, MaxRetries).
+
+%% Exposed for mocking
+do_insert_files_request(HTTPPool, Req, RequestTTL, MaxRetries) ->
+    ehttpc:request(HTTPPool, post, Req, RequestTTL, MaxRetries).
+
+insert_report_request(HTTPPool, Opts, HTTPClientConfig) ->
+    #{
+        jwt_config := JWTConfig,
+        insert_report_path := InsertReportPath0,
+        request_ttl := RequestTTL,
+        max_retries := MaxRetries
+    } = HTTPClientConfig,
+    JWTToken = emqx_connector_jwt:ensure_jwt(JWTConfig),
+    AuthnHeader = [<<"BEARER ">>, JWTToken],
+    Headers = http_headers(AuthnHeader),
+    InsertReportPath =
+        case Opts of
+            #{begin_mark := BeginMark} when is_binary(BeginMark) ->
+                <<InsertReportPath0/binary, "?beginMark=", BeginMark/binary>>;
+            _ ->
+                InsertReportPath0
+        end,
+    Req = {InsertReportPath, Headers},
+    Response = ehttpc:request(
+        HTTPPool,
+        get,
+        Req,
+        RequestTTL,
+        MaxRetries
+    ),
+    case Response of
+        {ok, 200, _Headers, Body0} ->
+            Body = emqx_utils_json:decode(Body0, [return_maps]),
+            {ok, Body};
+        _ ->
+            {error, Response}
+    end.
+
+http_headers(AuthnHeader) ->
+    [
+        {<<"X-Snowflake-Authorization-Token-Type">>, <<"KEYPAIR_JWT">>},
+        {<<"Content-Type">>, <<"application/json">>},
+        {<<"Authorization">>, AuthnHeader}
+    ].
+
+row_to_map(Row0, Headers) ->
+    Row1 = tuple_to_list(Row0),
+    Row2 = lists:map(fun emqx_utils_conv:bin/1, Row1),
+    Row = lists:zip(Headers, Row2),
+    maps:from_list(Row).
+
+action_status(ActionResId, #{mode := aggregated} = ActionState) ->
+    #{
+        aggreg_id := AggregId,
+        http := #{connect_timeout := ConnectTimeout}
+    } = ActionState,
+    %% NOTE: This will effectively trigger uploads of buffers yet to be uploaded.
+    Timestamp = erlang:system_time(second),
+    ok = emqx_connector_aggregator:tick(AggregId, Timestamp),
+    case http_pool_workers_healthy(ActionResId, ConnectTimeout) of
+        true ->
+            ?status_connected;
+        false ->
+            ?status_disconnected
+    end.
+
+stage_file_sql(Filename, Database, Schema, Stage, ActionName) ->
+    SQL0 = iolist_to_binary([
+        <<"PUT file://">>,
+        %% TODO: use action as directory name on stage?
+        Filename,
+        <<" @">>,
+        maybe_quote(Database),
+        <<".">>,
+        maybe_quote(Schema),
+        <<".">>,
+        maybe_quote(Stage),
+        <<"/">>,
+        ActionName
+    ]),
+    binary_to_list(SQL0).
+
+http_pool_workers_healthy(HTTPPool, Timeout) ->
+    Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(HTTPPool)],
+    DoPerWorker =
+        fun(Worker) ->
+            case ehttpc:health_check(Worker, Timeout) of
+                ok ->
+                    true;
+                {error, Reason} ->
+                    ?SLOG(error, #{
+                        msg => "snowflake_ehttpc_health_check_failed",
+                        action => HTTPPool,
+                        reason => Reason,
+                        worker => Worker,
+                        wait_time => Timeout
+                    }),
+                    false
+            end
+        end,
+    try emqx_utils:pmap(DoPerWorker, Workers, Timeout) of
+        [_ | _] = Status ->
+            lists:all(fun(St) -> St =:= true end, Status);
+        [] ->
+            false
+    catch
+        exit:timeout ->
+            false
+    end.
+
+%% https://docs.snowflake.com/en/sql-reference/identifiers-syntax
+needs_quoting(Identifier) ->
+    nomatch =:= re:run(Identifier, <<"^[A-Za-z_][A-Za-z_0-9$]$">>, [{capture, none}]).
+
+maybe_quote(Identifier) ->
+    case needs_quoting(Identifier) of
+        true ->
+            emqx_utils_sql:escape_snowflake(Identifier);
+        false ->
+            Identifier
+    end.

+ 73 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_connector_info.erl

@@ -0,0 +1,73 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+
+-module(emqx_bridge_snowflake_connector_info).
+
+-behaviour(emqx_connector_info).
+
+-include("emqx_bridge_snowflake.hrl").
+
+%% `emqx_connector_info' API
+-export([
+    type_name/0,
+    bridge_types/0,
+    resource_callback_module/0,
+    config_schema/0,
+    schema_module/0,
+    api_schema/1
+]).
+
+%% API
+-export([]).
+
+%%------------------------------------------------------------------------------
+%% Type declarations
+%%------------------------------------------------------------------------------
+
+-define(SCHEMA_MOD, emqx_bridge_snowflake_connector_schema).
+
+%%------------------------------------------------------------------------------
+%% API
+%%------------------------------------------------------------------------------
+
+%%------------------------------------------------------------------------------
+%% `emqx_connector_info' API
+%%------------------------------------------------------------------------------
+
+type_name() ->
+    ?CONNECTOR_TYPE.
+
+bridge_types() ->
+    [?ACTION_TYPE].
+
+resource_callback_module() ->
+    emqx_bridge_snowflake_connector.
+
+config_schema() ->
+    {?CONNECTOR_TYPE,
+        hoconsc:mk(
+            hoconsc:map(
+                name,
+                hoconsc:ref(
+                    ?SCHEMA_MOD,
+                    "config_connector"
+                )
+            ),
+            #{
+                desc => <<"Snowflake Connector Config">>,
+                required => false
+            }
+        )}.
+
+schema_module() ->
+    ?SCHEMA_MOD.
+
+api_schema(Method) ->
+    emqx_connector_schema:api_ref(
+        ?SCHEMA_MOD, ?CONNECTOR_TYPE_BIN, Method ++ "_connector"
+    ).
+
+%%------------------------------------------------------------------------------
+%% Internal fns
+%%------------------------------------------------------------------------------

+ 145 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_connector_schema.erl

@@ -0,0 +1,145 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+
+-module(emqx_bridge_snowflake_connector_schema).
+
+-behaviour(hocon_schema).
+-behaviour(emqx_connector_examples).
+
+-include_lib("typerefl/include/types.hrl").
+-include_lib("hocon/include/hoconsc.hrl").
+-include("emqx_bridge_snowflake.hrl").
+
+%% `hocon_schema' API
+-export([
+    namespace/0,
+    roots/0,
+    fields/1,
+    desc/1
+]).
+
+%% `emqx_connector_examples' API
+-export([
+    connector_examples/1
+]).
+
+%% API
+-export([]).
+
+%%------------------------------------------------------------------------------
+%% Type declarations
+%%------------------------------------------------------------------------------
+
+%%-------------------------------------------------------------------------------------------------
+%% `hocon_schema' API
+%%-------------------------------------------------------------------------------------------------
+
+namespace() ->
+    "connector_snowflake".
+
+roots() ->
+    [].
+
+fields(Field) when
+    Field == "get_connector";
+    Field == "put_connector";
+    Field == "post_connector"
+->
+    emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, fields(connector_config));
+fields("config_connector") ->
+    emqx_connector_schema:common_fields() ++ fields(connector_config);
+fields(connector_config) ->
+    Fields0 = emqx_connector_schema_lib:relational_db_fields(),
+    Fields1 = proplists:delete(database, Fields0),
+    Fields = lists:map(
+        fun
+            ({Field, Sc}) when Field =:= username; Field =:= password ->
+                Override = #{type => hocon_schema:field_schema(Sc, type), required => true},
+                {Field, hocon_schema:override(Sc, Override)};
+            ({Field, Sc}) ->
+                {Field, Sc}
+        end,
+        Fields1
+    ),
+    [
+        {server,
+            emqx_schema:servers_sc(
+                #{required => true, desc => ?DESC("server")},
+                ?SERVER_OPTS
+            )},
+        {account, mk(binary(), #{required => true, desc => ?DESC("account")})},
+        {dsn, mk(binary(), #{required => true, desc => ?DESC("dsn")})}
+        | Fields
+    ] ++
+        emqx_connector_schema:resource_opts() ++
+        emqx_connector_schema_lib:ssl_fields().
+
+desc("config_connector") ->
+    ?DESC("config_connector");
+desc(resource_opts) ->
+    ?DESC(emqx_resource_schema, resource_opts);
+desc(_Name) ->
+    undefined.
+
+%%-------------------------------------------------------------------------------------------------
+%% `emqx_connector_examples' API
+%%-------------------------------------------------------------------------------------------------
+
+connector_examples(Method) ->
+    [
+        #{
+            <<"snowflake">> => #{
+                summary => <<"Snowflake Connector">>,
+                value => connector_example(Method)
+            }
+        }
+    ].
+
+connector_example(get) ->
+    maps:merge(
+        connector_example(put),
+        #{
+            status => <<"connected">>,
+            node_status => [
+                #{
+                    node => <<"emqx@localhost">>,
+                    status => <<"connected">>
+                }
+            ]
+        }
+    );
+connector_example(post) ->
+    maps:merge(
+        connector_example(put),
+        #{
+            type => atom_to_binary(?CONNECTOR_TYPE),
+            name => <<"my_connector">>
+        }
+    );
+connector_example(put) ->
+    #{
+        enable => true,
+        description => <<"My connector">>,
+        server => <<"myorg-myaccount.snowflakecomputing.com">>,
+        account => <<"myorg-myaccount">>,
+        username => <<"admin">>,
+        password => <<"******">>,
+        dsn => <<"snowflake">>,
+        pool_size => 8,
+        resource_opts => #{
+            health_check_interval => <<"45s">>,
+            start_after_created => true,
+            start_timeout => <<"5s">>
+        }
+    }.
+
+%%------------------------------------------------------------------------------
+%% API
+%%------------------------------------------------------------------------------
+
+%%------------------------------------------------------------------------------
+%% Internal fns
+%%------------------------------------------------------------------------------
+
+mk(Type, Meta) -> hoconsc:mk(Type, Meta).

+ 53 - 0
apps/emqx_bridge_snowflake/src/emqx_bridge_snowflake_sup.erl

@@ -0,0 +1,53 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%--------------------------------------------------------------------
+-module(emqx_bridge_snowflake_sup).
+
+%% API
+-export([
+    start_link/0,
+    start_child/1,
+    delete_child/1
+]).
+
+%% `supervisor' API
+-export([init/1]).
+
+%%------------------------------------------------------------------------------
+%% Type declarations
+%%------------------------------------------------------------------------------
+
+%%------------------------------------------------------------------------------
+%% API
+%%------------------------------------------------------------------------------
+
+start_link() ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+start_child(ChildSpec) ->
+    supervisor:start_child(?MODULE, ChildSpec).
+
+delete_child(ChildId) ->
+    case supervisor:terminate_child(?MODULE, ChildId) of
+        ok ->
+            supervisor:delete_child(?MODULE, ChildId);
+        Error ->
+            Error
+    end.
+
+%%------------------------------------------------------------------------------
+%% `supervisor' API
+%%------------------------------------------------------------------------------
+
+init([]) ->
+    SupFlags = #{
+        strategy => one_for_one,
+        intensity => 1,
+        period => 1
+    },
+    ChildSpecs = [],
+    {ok, {SupFlags, ChildSpecs}}.
+
+%%------------------------------------------------------------------------------
+%% Internal fns
+%%------------------------------------------------------------------------------

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 1045 - 0
apps/emqx_bridge_snowflake/test/emqx_bridge_snowflake_SUITE.erl


+ 1 - 0
apps/emqx_connector/src/emqx_connector_info.erl

@@ -105,6 +105,7 @@ hard_coded_connector_info_modules_ee() ->
         emqx_bridge_redis_connector_info,
         emqx_bridge_rocketmq_connector_info,
         emqx_bridge_s3_connector_info,
+        emqx_bridge_snowflake_connector_info,
         emqx_bridge_sqlserver_connector_info,
         emqx_bridge_syskeeper_connector_info,
         emqx_bridge_syskeeper_proxy_connector_info,

+ 9 - 1
apps/emqx_connector_aggregator/mix.exs

@@ -22,10 +22,18 @@ defmodule EMQXConnectorAggregator.MixProject do
   end
 
   def deps() do
+    test_deps() ++
     [
       {:emqx, in_umbrella: true},
       UMP.common_dep(:gproc),
-      {:erl_csv, "0.2.0"}
     ]
   end
+
+  defp test_deps() do
+    if UMP.test_env?() do
+      [{:erl_csv, "0.2.0"}]
+    else
+      []
+    end
+  end
 end

+ 13 - 0
apps/emqx_connector_aggregator/src/emqx_connector_aggreg_buffer_ctx.erl

@@ -11,10 +11,23 @@
 %% `emqx_template' API
 -export([lookup/2]).
 
+%% API
+-export([sequence/1, datetime/2]).
+
 %%------------------------------------------------------------------------------
 %% Type declarations
 %%------------------------------------------------------------------------------
 
+%%------------------------------------------------------------------------------
+%% API
+%%------------------------------------------------------------------------------
+
+sequence(#buffer{seq = Seq}) ->
+    Seq.
+
+datetime(#buffer{since = Since}, Format) ->
+    format_timestamp(Since, Format).
+
 %%------------------------------------------------------------------------------
 %% `emqx_template' API
 %%------------------------------------------------------------------------------

+ 3 - 0
apps/emqx_connector_aggregator/src/emqx_connector_aggregator.erl

@@ -30,10 +30,13 @@
 ]).
 
 -export_type([
+    container_type/0,
     record/0,
     timestamp/0
 ]).
 
+-type container_type() :: csv.
+
 %% Record.
 -type record() :: #{binary() => _}.
 

+ 1 - 0
apps/emqx_machine/priv/reboot_lists.eterm

@@ -122,6 +122,7 @@
             emqx_bridge_s3,
             emqx_bridge_azure_blob_storage,
             emqx_bridge_couchbase,
+            emqx_bridge_snowflake,
             emqx_schema_registry,
             emqx_eviction_agent,
             emqx_node_rebalance,

+ 1 - 2
apps/emqx_mix_utils/lib/mix/tasks/emqx.dialyzer.ex

@@ -75,8 +75,7 @@ defmodule Mix.Tasks.Emqx.Dialyzer do
 
   defp resolve_apps() do
     base_apps = MapSet.new([:erts, :crypto])
-    # excluded_apps = MapSet.new([:elixir])
-    excluded_apps = MapSet.new()
+    excluded_apps = MapSet.new([:emqx_mix_utils])
     acc = %{
       umbrella_apps: [],
       dep_apps: base_apps

+ 6 - 0
apps/emqx_utils/src/emqx_utils_sql.erl

@@ -25,6 +25,7 @@
 -export([escape_sql/1]).
 -export([escape_cql/1]).
 -export([escape_mysql/1]).
+-export([escape_snowflake/1]).
 
 -export_type([value/0]).
 
@@ -168,6 +169,11 @@ escape_mysql(S0) ->
     % https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
     [$', escape_mysql(S0, 0, 0, S0), $'].
 
+-spec escape_snowflake(binary()) -> iodata().
+escape_snowflake(S) ->
+    ES = binary:replace(S, <<"\"">>, <<"\"">>, [global, {insert_replaced, 1}]),
+    [$", ES, $"].
+
 %% NOTE
 %% This thing looks more complicated than needed because it's optimized for as few
 %% intermediate memory (re)allocations as possible.

+ 51 - 0
apps/emqx_utils/test/emqx_utils_sql_tests.erl

@@ -0,0 +1,51 @@
+%%--------------------------------------------------------------------
+%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%     http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%--------------------------------------------------------------------
+-module(emqx_utils_sql_tests).
+
+-compile(nowarn_export_all).
+-compile(export_all).
+
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%%------------------------------------------------------------------------------
+%% Properties
+%%------------------------------------------------------------------------------
+
+snowflake_escape_test_() ->
+    Props = [prop_snowflake_escape()],
+    Opts = [{numtests, 1_000}, {to_file, user}, {max_size, 100}],
+    {timeout, 300, [?_assert(proper:quickcheck(Prop, Opts)) || Prop <- Props]}.
+
+prop_snowflake_escape() ->
+    ?FORALL(
+        Input,
+        binary(),
+        begin
+            Escaped = iolist_to_binary(emqx_utils_sql:escape_snowflake(Input)),
+            Content = binary_part(Escaped, 1, iolist_size(Escaped) - 2),
+            %% Should not have any single double quotes after escaping, except for the
+            %% leading and trailing ones.
+            ?WHENFAIL(
+                io:format(
+                    user,
+                    "Input:\n  ~s\n\nEscaped:\n  ~s\n",
+                    [Input, Escaped]
+                ),
+                nomatch =:= re:run(Content, <<"[^\"]\"[^\"]">>, [{capture, none}])
+            )
+        end
+    ).

+ 1 - 0
changes/ee/feat-13745.en.md

@@ -0,0 +1 @@
+Added Snowflake Connector and Action as a Data Integration.

+ 1 - 0
mix.exs

@@ -373,6 +373,7 @@ defmodule EMQXUmbrella.MixProject do
       :emqx_bridge_s3,
       :emqx_bridge_azure_blob_storage,
       :emqx_bridge_couchbase,
+      :emqx_bridge_snowflake,
       :emqx_schema_registry,
       :emqx_schema_validation,
       :emqx_message_transformation,

+ 1 - 0
rebar.config.erl

@@ -107,6 +107,7 @@ is_community_umbrella_app("apps/emqx_s3") -> false;
 is_community_umbrella_app("apps/emqx_bridge_s3") -> false;
 is_community_umbrella_app("apps/emqx_bridge_azure_blob_storage") -> false;
 is_community_umbrella_app("apps/emqx_bridge_couchbase") -> false;
+is_community_umbrella_app("apps/emqx_bridge_snowflake") -> false;
 is_community_umbrella_app("apps/emqx_schema_registry") -> false;
 is_community_umbrella_app("apps/emqx_enterprise") -> false;
 is_community_umbrella_app("apps/emqx_bridge_kinesis") -> false;

+ 91 - 0
rel/i18n/emqx_bridge_snowflake_action_schema.hocon

@@ -0,0 +1,91 @@
+emqx_bridge_snowflake_action_schema {
+  snowflake.label:
+  """Upload to Snowflake"""
+  snowflake.desc:
+  """Action that takes incoming events and uploads them to the Snowflake."""
+
+  parameters.label:
+  """Snowflake action parameters"""
+  parameters.desc:
+  """Set of parameters for the action."""
+
+  aggreg_parameters.label:
+  """Snowflake Aggregated Mode action parameters"""
+  aggreg_parameters.desc:
+  """Set of parameters for the action in aggregated mode."""
+
+  aggregated_mode.label:
+  """Aggregated Snowflake Upload"""
+  aggregated_mode.desc:
+  """Enables time-based aggregation of incoming events and uploading them to the Snowflake as a single object.  This aggregation is done independently by each node in the cluster."""
+
+  aggregation.label:
+  """Aggregation parameters"""
+  aggregation.desc:
+  """Set of parameters governing the aggregation process."""
+
+  aggregation_interval.label:
+  """Time interval"""
+  aggregation_interval.desc:
+  """Amount of time events will be aggregated in a single file on each node before uploading."""
+
+  aggregation_max_records.label:
+  """Maximum number of records"""
+  aggregation_max_records.desc:
+  """Number of records (events) allowed per each aggregated file. Each aggregated upload will contain no more than that number of events, but may contain less.<br/>
+  If event rate is high enough, there obviously may be more than one aggregated upload during the same time interval. These uploads will have different, but consecutive sequence numbers, which will be a part of Snowflake staged file name."""
+
+  private_key.label:
+  """Private Key"""
+  private_key.desc:
+  """~
+  The private key configured for the Pipe User.  This supports the input formats below:
+  - Plain key: Enter the private key contents in PEM format directly as a string value.
+  - File Path: Specify the path to a file that contains the private key. Ensure the path starts with <code>file://</code>.  The file path must be the same on all nodes in the cluster.~"""
+
+  database.label:
+  """Database"""
+  database.desc:
+  """Name of the Database that contains the Snowflake resources."""
+
+  schema.label:
+  """Schema"""
+  schema.desc:
+  """Name of the Schema that contains the Snowflake resources."""
+
+  stage.label:
+  """Stage"""
+  stage.desc:
+  """Name of the Stage that'll be used for loading data files into Snowflake."""
+
+  pipe.label:
+  """Pipe"""
+  pipe.desc:
+  """Name of the Pipe that'll be used to ingest data into the table."""
+
+  pipe_user.label:
+  """Pipe User"""
+  pipe_user.desc:
+  """A username which has a role with permissions over the Pipe to be used.  The minimum permissions are `operate` and `monitor`."""
+
+  pipelining.label:
+  """HTTP Pipelining"""
+  pipelining.desc:
+  """A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request."""
+
+  connect_timeout.label:
+  """Connect Timeout"""
+  connect_timeout.desc:
+  """The timeout when connecting to the HTTP server."""
+
+  pool_size.label:
+  """Pool Size"""
+  pool_size.desc:
+  """The pool size."""
+
+  max_retries.label:
+  """Max Retries"""
+  max_retries.desc:
+  """Max retry attempts if there's an error when sending an HTTP request."""
+
+}

+ 22 - 0
rel/i18n/emqx_bridge_snowflake_connector_schema.hocon

@@ -0,0 +1,22 @@
+emqx_bridge_snowflake_connector_schema {
+  config_connector.label:
+  """Snowflake Connector Configuration"""
+  config_connector.desc:
+  """Configuration for a connector to Snowflake service."""
+
+  server.label:
+  """Server Host"""
+  server.desc:
+  """The address of Snowflake computing server to connect to."""
+
+  account.label:
+  """Account"""
+  account.desc:
+  """Account ID for Snowflake."""
+
+  dsn.label:
+  """DSN"""
+  dsn.desc:
+  """Data Source Name (DSN) associated with the installed Snowflake ODBC driver."""
+
+}

+ 44 - 26
scripts/ct/run.sh

@@ -39,7 +39,8 @@ ONLY_UP='no'
 ATTACH='no'
 STOP='no'
 IS_CI='no'
-ODBC_REQUEST='no'
+SQLSERVER_ODBC_REQUEST='no'
+SNOWFLAKE_ODBC_REQUEST='no'
 UP='up'
 while [ "$#" -gt 0 ]; do
     case $1 in
@@ -207,7 +208,7 @@ for dep in ${CT_DEPS}; do
             FILES+=( '.ci/docker-compose-file/docker-compose-cassandra.yaml' )
             ;;
         sqlserver)
-            ODBC_REQUEST='yes'
+            SQLSERVER_ODBC_REQUEST='yes'
             FILES+=( '.ci/docker-compose-file/docker-compose-sqlserver.yaml' )
             ;;
         opents)
@@ -247,22 +248,30 @@ for dep in ${CT_DEPS}; do
         otel)
             FILES+=( '.ci/docker-compose-file/docker-compose-otel.yaml' )
             ;;
-	    elasticsearch)
-	        FILES+=( '.ci/docker-compose-file/docker-compose-elastic-search-tls.yaml' )
-	        ;;
-	    azurite)
-	        FILES+=( '.ci/docker-compose-file/docker-compose-azurite.yaml' )
-	        ;;
-	    couchbase)
-	        FILES+=( '.ci/docker-compose-file/docker-compose-couchbase.yaml' )
-	        ;;
-	    kdc)
-	        FILES+=( '.ci/docker-compose-file/docker-compose-kdc.yaml' )
-	        ;;
+        elasticsearch)
+            FILES+=( '.ci/docker-compose-file/docker-compose-elastic-search-tls.yaml' )
+            ;;
+        azurite)
+            FILES+=( '.ci/docker-compose-file/docker-compose-azurite.yaml' )
+            ;;
+        couchbase)
+            FILES+=( '.ci/docker-compose-file/docker-compose-couchbase.yaml' )
+            ;;
+        kdc)
+            FILES+=( '.ci/docker-compose-file/docker-compose-kdc.yaml' )
+            ;;
         datalayers)
             FILES+=( '.ci/docker-compose-file/docker-compose-datalayers-tcp.yaml'
                      '.ci/docker-compose-file/docker-compose-datalayers-tls.yaml' )
             ;;
+        snowflake)
+            if [[ -z "${SNOWFLAKE_ACCOUNT_ID:-}" ]]; then
+                echo "Snowflake environment requested, but SNOWFLAKE_ACCOUNT_ID is undefined"
+                echo "Will NOT install Snowflake's ODBC drivers"
+            else
+                SNOWFLAKE_ODBC_REQUEST='yes'
+            fi
+            ;;
         *)
             echo "unknown_ct_dependency $dep"
             exit 1
@@ -270,10 +279,16 @@ for dep in ${CT_DEPS}; do
     esac
 done
 
-if [ "$ODBC_REQUEST" = 'yes' ]; then
-    INSTALL_ODBC="./scripts/install-msodbc-driver.sh"
+if [ "$SQLSERVER_ODBC_REQUEST" = 'yes' ]; then
+    INSTALL_SQLSERVER_ODBC="./scripts/install-msodbc-driver.sh"
+else
+    INSTALL_SQLSERVER_ODBC="echo 'msodbc driver not requested'"
+fi
+
+if [ "$SNOWFLAKE_ODBC_REQUEST" = 'yes' ]; then
+    INSTALL_SNOWFLAKE_ODBC="./scripts/install-snowflake-driver.sh"
 else
-    INSTALL_ODBC="echo 'msodbc driver not requested'"
+    INSTALL_SNOWFLAKE_ODBC="echo 'snowflake driver not requested'"
 fi
 
 for file in "${FILES[@]}"; do
@@ -310,15 +325,18 @@ fi
 
 if [ "$DOCKER_USER" != "root" ]; then
     # the user must exist inside the container for `whoami` to work
-    docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c \
-          "useradd --uid $DOCKER_USER -M -d / emqx && \
-           mkdir -p /.cache /.hex /.mix && \
-           chown $DOCKER_USER /.cache /.hex /.mix && \
-           openssl rand -base64 -hex 16 > /.erlang.cookie && \
-           chown $DOCKER_USER /.erlang.cookie && \
-           chmod 0400 /.erlang.cookie && \
-           chown -R $DOCKER_USER /var/lib/secret && \
-           $INSTALL_ODBC" || true
+  docker exec -i $TTY -u root:root \
+         -e "SFACCOUNT=${SFACCOUNT:-myorg-myacc}" \
+         "$ERLANG_CONTAINER" bash -c \
+         "useradd --uid $DOCKER_USER -M -d / emqx || true && \
+          mkdir -p /.cache /.hex /.mix && \
+          chown $DOCKER_USER /.cache /.hex /.mix && \
+          openssl rand -base64 -hex 16 > /.erlang.cookie && \
+          chown $DOCKER_USER /.erlang.cookie && \
+          chmod 0400 /.erlang.cookie && \
+          chown -R $DOCKER_USER /var/lib/secret && \
+          $INSTALL_SQLSERVER_ODBC && \
+          $INSTALL_SNOWFLAKE_ODBC" || true
 fi
 
 if [ "$ONLY_UP" = 'yes' ]; then

+ 42 - 0
scripts/install-snowflake-driver.sh

@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -xeuo pipefail
+
+## Specify your organization - account name as the account identifier
+SFACCOUNT=${SFACCOUNT:-myorganization-myaccount}
+VERSION="3.3.2"
+FILE="snowflake-odbc-${VERSION}.x86_64.deb"
+URL="https://sfc-repo.snowflakecomputing.com/odbc/linux/${VERSION}/${FILE}"
+SHA256="fdcf83aadaf92ec135bed0699936fa4ef2cf2d88aef5a4657a96877ae2ba232d"
+
+if [[ -f "${FILE}" && $(sha256sum "${FILE}" | cut -f1 -d' ') == "${SHA256}" ]]; then
+  echo "snowflake package already downloaded"
+else
+  echo "downloading snowflake package"
+  wget -nc "$URL"
+fi
+
+function configure() {
+  ODBC_INST_LIB=/usr/lib/x86_64-linux-gnu/libodbcinst.so
+
+  sed -i -e "s#^ODBCInstLib=.*#ODBCInstLib=$ODBC_INST_LIB#" /usr/lib/snowflake/odbc/lib/simba.snowflake.ini
+
+  sed -i -e "s#SF_ACCOUNT#${SFACCOUNT}#" /etc/odbc.ini
+
+  cat >>/etc/odbc.ini  <<EOF
+[ODBC Data Sources]
+snowflake = SnowflakeDSIIDriver
+EOF
+}
+
+if ! dpkg -l snowflake-odbc 1>/dev/null 2>/dev/null ; then
+  apt update && apt install -yyq unixodbc-dev odbcinst
+  dpkg -i "${FILE}"
+  apt install -f
+
+  configure
+
+  echo "installed and configured snowflake"
+else
+  echo "snowflake odbc already installed; not attempting to configure it"
+fi