فهرست منبع

Merge pull request #10268 from zmstone/0329-fix-bridge-docs

fix: hide nodelay Kafka client socket option
Zaiming (Stone) Shi 2 سال پیش
والد
کامیت
09f6d87a7e
2فایلهای تغییر یافته به همراه35 افزوده شده و 30 حذف شده
  1. 28 27
      lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf
  2. 7 3
      lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl

+ 28 - 27
lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf

@@ -216,6 +216,7 @@ emqx_ee_bridge_kafka {
             zh: "Socket 收包缓存大小"
         }
     }
+    # hidden
     socket_nodelay {
         desc {
             en: "When set to 'true', TCP buffer is sent as soon as possible. "
@@ -571,26 +572,27 @@ emqx_ee_bridge_kafka {
             zh: "指定从哪个 Kafka 主题消费消息。"
         }
         label {
-            en: "Kafka topic"
-            zh: "Kafka 主题 "
+            en: "Kafka Topic"
+            zh: "Kafka 主题"
         }
     }
     consumer_max_batch_bytes {
         desc {
-            en: "Maximum bytes to fetch in a batch of messages."
+            en: "Set how many bytes to pull from Kafka in each fetch request. "
                 "Please note that if the configured value is smaller than the message size in Kafka, it may negatively impact the fetch performance."
-            zh: "在一批消息中要取的最大字节数。"
-                "如果该配置小于 Kafka 中消息到大小,则可能会影响消费性能。"
+            zh: "设置每次从 Kafka 拉取数据的字节数。"
+                "如该配置小于 Kafka 消息的大小,可能会影响消费性能。"
         }
         label {
-            en: "Max Bytes"
-            zh: "最大字节数"
+            en: "Fetch Bytes"
+            zh: "拉取字节数"
         }
     }
+    # hidden
     consumer_max_rejoin_attempts {
         desc {
             en: "Maximum number of times allowed for a member to re-join the group. If the consumer group can not reach balance after this configured number of attempts, the consumer group member will restart after a delay."
-            zh: "允许一个成员重新加入小组的最大次数。如果超过改配置次数后仍不能成功加入消费组,则会在延迟一段时间后再重试。"
+            zh: "消费组成员允许重新加入小组的最大次数。如超过该配置次数后仍未能成功加入消费组,则会在等待一段时间后重试。"
         }
         label {
             en: "Max Rejoin Attempts"
@@ -599,10 +601,9 @@ emqx_ee_bridge_kafka {
     }
     consumer_offset_reset_policy {
         desc {
-            en: "Defines from which offset a consumer should start fetching when there"
-                " is no commit history or when the commit history becomes invalid."
-            zh: "当没有主题分区没有偏移量的历史记录,或则历史记录失效后,"
-                "消费者应该使用哪个偏移量重新开始消费"
+            en: "Defines from which offset a consumer should start fetching when there "
+                "is no commit history or when the commit history becomes invalid."
+            zh: "如不存在偏移量历史记录或历史记录失效,消费者应使用哪个偏移量开始消费。"
         }
         label {
             en: "Offset Reset Policy"
@@ -616,13 +617,13 @@ emqx_ee_bridge_kafka {
         }
         label {
             en: "Offset Commit Interval"
-            zh: "偏移承诺间隔"
+            zh: "偏移提交间隔"
         }
     }
     consumer_topic_mapping {
         desc {
-            en: "Defines the mapping between Kafka topics and MQTT topics.  Must contain at least one item."
-            zh: "指定 Kafka 主题和 MQTT 主题之间的映射。 必须至少包含一个项目。"
+            en: "Defines the mapping between Kafka topics and MQTT topics. Must contain at least one item."
+            zh: "指定 Kafka 主题和 MQTT 主题之间的映射关系。 应至少包含一项。"
         }
         label {
             en: "Topic Mapping"
@@ -632,14 +633,14 @@ emqx_ee_bridge_kafka {
     consumer_key_encoding_mode {
         desc {
             en: "Defines how the key from the Kafka message is"
-                " dealt with before being forwarded via MQTT.\n"
+                " encoded before being forwarded via MQTT.\n"
                 "<code>none</code> Uses the key from the Kafka message unchanged."
-                "  Note: in this case, then the key must be a valid UTF-8 string.\n"
+                "  Note: in this case, the key must be a valid UTF-8 string.\n"
                 "<code>base64</code> Uses base-64 encoding on the received key."
-            zh: "定义了在通过MQTT转发之前如何处理Kafka消息的 Key。"
-                "<code>none</code> 使用Kafka消息中的 Key 原始值,不进行编码。"
-                "  注意:在这种情况下,Key 必须是一个有效的UTF-8字符串。\n"
-                "<code>base64</code> 对收到的密钥或值使用base-64编码。"
+            zh: "通过 MQTT 转发之前如何处理 Kafka 消息的 Key。"
+                "<code>none</code> 使用 Kafka 消息中的 Key 原始值,不进行编码。"
+                "  注意:在这种情况下,Key 必须是一个有效的 UTF-8 字符串。\n"
+                "<code>base64</code> 对收到的密钥或值使用 base-64 编码。"
         }
         label {
             en: "Key Encoding Mode"
@@ -649,14 +650,14 @@ emqx_ee_bridge_kafka {
     consumer_value_encoding_mode {
         desc {
             en: "Defines how the value from the Kafka message is"
-                " dealt with before being forwarded via MQTT.\n"
+                " encoded before being forwarded via MQTT.\n"
                 "<code>none</code> Uses the value from the Kafka message unchanged."
-                "  Note: in this case, then the value must be a valid UTF-8 string.\n"
+                "  Note: in this case, the value must be a valid UTF-8 string.\n"
                 "<code>base64</code> Uses base-64 encoding on the received value."
-            zh: "定义了在通过MQTT转发之前如何处理Kafka消息的 Value。"
-                "<code>none</code> 使用Kafka消息中的 Value 原始值,不进行编码。"
-                "  注意:在这种情况下,Value 必须是一个有效的UTF-8字符串。\n"
-                "<code>base64</code> 对收到的 Value 使用base-64编码。"
+            zh: "通过 MQTT 转发之前如何处理 Kafka 消息的 Value。"
+                "<code>none</code> 使用 Kafka 消息中的 Value 原始值,不进行编码。"
+                "  注意:在这种情况下,Value 必须是一个有效的 UTF-8 字符串。\n"
+                "<code>base64</code> 对收到的 Value 使用 base-64 编码。"
         }
         label {
             en: "Value Encoding Mode"

+ 7 - 3
lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl

@@ -221,17 +221,21 @@ fields(socket_opts) ->
         {sndbuf,
             mk(
                 emqx_schema:bytesize(),
-                #{default => <<"1024KB">>, desc => ?DESC(socket_send_buffer)}
+                #{default => <<"1MB">>, desc => ?DESC(socket_send_buffer)}
             )},
         {recbuf,
             mk(
                 emqx_schema:bytesize(),
-                #{default => <<"1024KB">>, desc => ?DESC(socket_receive_buffer)}
+                #{default => <<"1MB">>, desc => ?DESC(socket_receive_buffer)}
             )},
         {nodelay,
             mk(
                 boolean(),
-                #{default => true, desc => ?DESC(socket_nodelay)}
+                #{
+                    default => true,
+                    hidden => true,
+                    desc => ?DESC(socket_nodelay)
+                }
             )}
     ];
 fields(producer_opts) ->