diff --git a/.gitignore b/.gitignore
index 06bddcc1e9..4120c0008a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,6 +57,7 @@ _deps
 core/build/
 core/protobuf/config_server/*/*.pb.*
 core/protobuf/*/*.pb.*
+core/log_pb/*.pb.*
 core/common/Version.cpp
 !/Makefile
 # Enterprise
diff --git a/config_server/protocol/v2/README.md b/config_server/protocol/v2/README.md
index 53e096f2bd..324f2f8523 100644
--- a/config_server/protocol/v2/README.md
+++ b/config_server/protocol/v2/README.md
@@ -13,19 +13,19 @@
 
     message HeartbeatRequest {
         bytes request_id = 1;
-        uint64 sequence_num = 2;                    // Increment every request, for server to check sync status
-        uint64 capabilities = 3;                    // Bitmask of flags defined by AgentCapabilities enum
-        bytes instance_id = 4;                      // Required, Agent's unique identification, consistent throughout the process lifecycle
-        string agent_type = 5;                      // Required, Agent's type(ilogtail, ..)
-        AgentAttributes attributes = 6;             // Agent's basic attributes
-        repeated AgentGroupTag tags = 7;            // Agent's tags
-        string running_status = 8;                  // Human readable running status
-        int64 startup_time = 9;                     // Required, Agent's startup time
-        repeated ConfigInfo pipeline_configs = 10;  // Information about the current PIPELINE_CONFIG held by the Agent
-        repeated ConfigInfo instance_configs = 11;  // Information about the current AGENT_CONFIG held by the Agent
-        repeated CommandInfo custom_commands = 12;  // Information about command history
-        uint64 flags = 13;                          // Predefined command flag
-        bytes opaque = 14;                          // Opaque data for extension
+        uint64 sequence_num = 2;                              // Increment every request, for server to check sync status
+        uint64 capabilities = 3;                              // Bitmask of flags defined by AgentCapabilities enum
+        bytes instance_id = 4;                                // Required, Agent's unique identification, consistent throughout the process lifecycle
+        string agent_type = 5;                                // Required, Agent's type(ilogtail, ..)
+        AgentAttributes attributes = 6;                       // Agent's basic attributes
+        repeated AgentGroupTag tags = 7;                      // Agent's tags
+        string running_status = 8;                            // Human readable running status
+        int64 startup_time = 9;                               // Required, Agent's startup time
+        repeated ConfigInfo continuous_pipeline_configs = 10; // Information about the current continuous pipeline configs held by the Agent
+        repeated ConfigInfo instance_configs = 11;            // Information about the current instance configs held by the Agent
+        repeated ConfigInfo onetime_pipeline_configs = 12;    // Information about onetime pipeline configs history
+        uint64 flags = 13;                                    // Predefined command flag
+        bytes opaque = 14;                                    // Opaque data for extension
         // before 100 (inclusive) are reserved for future official fields
     }
     
@@ -55,15 +55,6 @@
         map<string, bytes> extra = 5; // Optional extra info
     }
 
-    // Define the Command information carried in the request
-    message CommandInfo {
-        string type = 1;         // Command's type
-        string name = 2;         // Required, Command's unique identification
-        ConfigStatus status = 3; // Command's status
-        string message = 4;      // Optional error message
-        map<string, bytes> extra = 5;   // Optional extra info
-    }
-
     // Define Agent's basic attributes
     message AgentAttributes {
         bytes version = 1;                 // Agent's version
@@ -76,12 +67,12 @@
     enum AgentCapabilities {
         // The capabilities field is unspecified.
         UnspecifiedAgentCapability = 0;
-        // The Agent can accept pipeline configuration from the Server.
-        AcceptsPipelineConfig          = 0x00000001;
+        // The Agent can accept continuous pipeline configuration from the Server.
+        AcceptsContinuousPipelineConfig = 0x00000001;
         // The Agent can accept instance configuration from the Server.
-        AcceptsInstanceConfig           = 0x00000002;
-        // The Agent can accept custom command from the Server.
-        AcceptsCustomCommand           = 0x00000004;
+        AcceptsInstanceConfig = 0x00000002;
+        // The Agent can accept onetime pipeline configuration from the Server.
+        AcceptsOnetimePipelineConfig = 0x00000004;
 
         // bits before 2^16 (inclusive) are reserved for future official fields
     }
@@ -99,15 +90,15 @@
 ### HeartbeatResponse 消息
 
     message HeartbeatResponse {
-        bytes request_id = 1;  
-        ServerErrorResponse error_response = 2;             // Set value indicates error
-        uint64 capabilities = 3;                            // Bitmask of flags defined by ServerCapabilities enum
-
-        repeated ConfigDetail pipeline_config_updates = 4;  // Agent's pipeline config update status
-        repeated ConfigDetail instance_config_updates = 5;  // Agent's instance config update status
-        repeated CommandDetail custom_command_updates = 6;  // Agent's commands updates
-        uint64 flags = 7;                                   // Predefined command flag
-        bytes opaque = 8;                                   // Opaque data for extension
+        bytes request_id = 1;
+        CommonResponse common_response = 2; // Set common response
+        uint64 capabilities = 3;           // Bitmask of flags defined by ServerCapabilities enum
+
+        repeated ConfigDetail continuous_pipeline_config_updates = 4; // Agent's continuous pipeline config update status
+        repeated ConfigDetail instance_config_updates = 5;            // Agent's instance config update status
+        repeated CommandDetail onetime_pipeline_config_updates = 6;   // Agent's onetime pipeline config updates
+        uint64 flags = 7;                                             // Predefined command flag
+        bytes opaque = 8;                                             // Opaque data for extension
     }
     
     message ConfigDetail {
@@ -118,24 +109,23 @@
     }
 
     message CommandDetail {
-        string type = 1;                // Required, Command type
-        string name = 2;                // Required, Command name
-        bytes detail = 3;               // Required, Command's detail
-        int64 expire_time = 4;          // After which the command can be safely removed from history
-        map<string, bytes> extra = 5;   // Optional extra info
+        string name = 1;              // Required, Command name
+        bytes detail = 2;             // Required, Command's detail
+        int64 expire_time = 3;        // After which the command can be safely removed from history
+        map<string, bytes> extra = 4; // Optional extra info
     }
 
     enum ServerCapabilities {
         // The capabilities field is unspecified.
-        UnspecifiedServerCapability        = 0;
+        UnspecifiedServerCapability = 0;
         // The Server can remember agent attributes.
-        RembersAttribute                   = 0x00000001;
-        // The Server can remember pipeline config status.
-        RembersPipelineConfigStatus        = 0x00000002;
+        RembersAttribute = 0x00000001;
+        // The Server can remember continuous pipeline config status.
+        RembersContinuousPipelineConfigStatus = 0x00000002;
         // The Server can remember instance config status.
-        RembersInstanceConfigStatus        = 0x00000004;
-        // The Server can remember custom command status.
-        RembersCustomCommandStatus         = 0x00000008;
+        RembersInstanceConfigStatus = 0x00000004;
+        // The Server can remember onetime pipeline config status.
+        RembersOnetimePipelineConfigStatus = 0x00000008;
 
         // bits before 2^16 (inclusive) are reserved for future official fields
     }
@@ -154,11 +144,11 @@
         // some sub-message in the last AgentToServer message (which is an allowed
         // optimization) but the Server detects that it does not have it (e.g. was
         // restarted and lost state).
-        ReportFullState           = 0x00000001;
-        // FetchPipelineConfigDetail can be used by the Server to tell Agent to fetch config details by FetchConfig api,
+        ReportFullState = 0x00000001;
+        // FetchContinuousPipelineConfigDetail can be used by the Server to tell Agent to fetch continuous pipeline config details by FetchConfig api,
         // HB response ConfigDetail will not contains details.
-        FetchPipelineConfigDetail = 0x00000002;
-        // like FetchPipelineConfigDetail, but for instance config.
+        FetchContinuousPipelineConfigDetail = 0x00000002;
+        // like FetchContinuousPipelineConfigDetail, but for instance config.
         FetchInstanceConfigDetail = 0x00000004;
         // bits before 2^16 (inclusive) are reserved for future official fields
     }
@@ -168,21 +158,21 @@
 额外的 config 拉取接口,不通过心跳返回 config 详情。
 
     message FetchConfigRequest {
-        bytes request_id = 1; 
-        bytes instance_id = 2;                     // Agent's unique identification
-        repeated ConfigInfo pipeline_configs = 3;  // Information about the current PIPELINE_CONFIG held by the Agent
-        repeated ConfigInfo instance_configs = 4;  // Information about the current AGENT_CONFIG held by the Agent
-        repeated CommandInfo custom_commands = 5;  // Information about command history
+        bytes request_id = 1;
+        bytes instance_id = 2;                               // Agent's unique identification
+        repeated ConfigInfo continuous_pipeline_configs = 3; // Information about the current continuous pipeline configs held by the Agent
+        repeated ConfigInfo instance_configs = 4;            // Information about the current instance configs held by the Agent
+        repeated ConfigInfo onetime_pipeline_configs = 5;    // Information about onetime pipeline configs history
     }
 
 ### [Optional] FetchConfigResponse 消息
 
     message FetchConfigResponse {
-        bytes request_id = 1;                     
-        CommonResponse commonResponse = 2;
-        repeated ConfigDetail pipeline_config_updates = 3;  // Agent's pipeline config with details
-        repeated ConfigDetail instance_config_updates = 4;  // Agent's instance config with details
-        repeated CommandDetail custom_command_updates = 5;  // Agent's commands details
+        bytes request_id = 1;
+        CommonResponse common_response = 2;
+        repeated ConfigDetail continuous_pipeline_config_updates = 3; // Agent's continuous pipeline config with details
+        repeated ConfigDetail instance_config_updates = 4;            // Agent's instance config with details
+        repeated CommandDetail onetime_pipeline_config_updates = 5;   // Agent's onetime pipeline config details
     }
 
 ### [Optional] ReportStatusRequest 消息
@@ -191,17 +181,17 @@
 
     message ReportStatusRequest {
         bytes request_id = 1;
-        bytes instance_id = 2;                     // Agent's unique identification
-        repeated ConfigInfo pipeline_configs = 3;  // status about the current PIPELINE_CONFIG held by the Agent
-        repeated ConfigInfo instance_configs = 4;  // status about the current AGENT_CONFIG held by the Agent
-        repeated CommandInfo custom_commands = 5;  // status about command history
+        bytes instance_id = 2;                               // Agent's unique identification
+        repeated ConfigInfo continuous_pipeline_configs = 3; // status about the current continuous pipeline configs held by the Agent
+        repeated ConfigInfo instance_configs = 4;            // status about the current instance configs held by the Agent
+        repeated ConfigInfo onetime_pipeline_configs = 5;    // status about onetime pipeline configs history
     }
 
 ### [Optional] ReportStatusResponse 消息
 
     message ReportStatusResponse {
         bytes request_id = 1;
-        CommonResponse commonResponse = 2;
+        CommonResponse common_response = 2;
     }
 
 ## 行为规范
@@ -218,7 +208,7 @@ Server:应当通过capbilitiies上报Server自身的能力,这样如果新
 
 Client:Agent启动后第一次向Server汇报全量信息,request字段应填尽填。request\_id、sequence\_num、capabilities、instance\_id、agent\_type、startup\_time为必填字段。
 
-Server:Server根据上报的信息返回响应。pipeline\_config\_updates、instance\_config\_updates中包含agent需要同步的配置,updates中必然包含name和version,是否包含detail取决于server端实现, 如果不包含则需要通过 FetchConfig 拉取。custom\_command_updates包含要求agent执行的命令command中必然包含type、name和expire\_time。
+Server:Server根据上报的信息返回响应。continuous\_pipeline\_config\_updates、instance\_config\_updates中包含agent需要同步的配置,updates中必然包含name和version,是否包含detail取决于server端实现, 如果不包含则需要通过 FetchConfig 拉取。onetime\_pipeline\_config\_updates包含要求agent执行的命令中必然包含name和expire\_time。
 
 Server是否保存Client信息取决于Server实现,如果服务端找不到或保存的sequence\_num + 1 ≠ 心跳的sequence\_num,那么就立刻返回并且flags中必须设置ReportFullStatus标识位。
 
@@ -243,10 +233,11 @@ Server:同注册
 ### 进程配置
 
 可选两种实现:
+
 1. 在心跳中完成进程配置的状态上报与同步。
 
     Server的注册/心跳响应中有instance\_config\_updates.detail,client 直接从response中获得detail,应用成功后下次心跳需要上报完整状态。
-    
+
 2. 在心跳中完成进程配置的基础信息同步,通过额外的接口完成进程配置的拉取。
 
     Server的响应不包含detail, 只包含要更新的进程配置 name 和 version。client 比较本地的配置和 version 判断需要更新后,根据 instance_config_updates 的信息构造 FetchConfigRequest 后进行一次额外拉取。FetchConfigRequest 至少需要包括 name 和 version。
@@ -258,23 +249,24 @@ Client获取到多个进程配置时,自动合并,若产生冲突默认行
 ### 采集配置
 
 可选两种实现:
+
 1. 在心跳中完成采集配置的状态上报与同步。
 
-    Server的注册/心跳响应中有pipeline\_config\_updates.detail, Client 直接从response中获得detail,应用成功后下次心跳需要上报完整状态。
+    Server的注册/心跳响应中有continuous\_pipeline\_config\_updates.detail, Client 直接从response中获得detail,应用成功后下次心跳需要上报完整状态。
 
 2. 在心跳中完成采集配置的基础信息同步,通过额外的接口完成进程配置的拉取。
 
-    Server的响应不包含detail, 只包含要更新的采集配置 name 和 version。client 比较本地的配置和 version 判断需要更新后,根据 pipeline_config_updates 的信息构造 FetchConfigRequest 后进行一次额外拉取。FetchConfigRequest 至少需要包括 name 和 version。
+    Server的响应不包含detail, 只包含要更新的采集配置 name 和 version。client 比较本地的配置和 version 判断需要更新后,根据 continuous_pipeline_config_updates 的信息构造 FetchConfigRequest 后进行一次额外拉取。FetchConfigRequest 至少需要包括 name 和 version。
 
-    心跳 response flag 需要设置 FetchPipelineConfigDetail.
+    心跳 response flag 需要设置 FetchContinuousPipelineConfigDetail.
 
 客户端以下2种实现
 
-实现1:直接将Detail返回在心跳响应中(FetchPipelineConfigDetail flag is unset)
+实现1:直接将Detail返回在心跳响应中(FetchContinuousPipelineConfigDetail flag is unset)
 
 ![image](https://github.com/alibaba/ilogtail/assets/1827594/be645615-dd99-42dd-9deb-681e9a4069bb)
 
-实现2:仅返回配置名和版本,Detail使用单独请求获取(FetchPipelineConfigDetail flag is set)
+实现2:仅返回配置名和版本,Detail使用单独请求获取(FetchContinuousPipelineConfigDetail flag is set)
 
 ![image](https://github.com/alibaba/ilogtail/assets/1827594/c409c35c-2a81-4927-bfd2-7fb321ef1ca8)
 
@@ -287,6 +279,7 @@ Client获取到多个进程配置时,自动合并,若产生冲突默认行
 对于 Server:这些信息是Agent状态的一部分,可选保存。与通过Event上报可观测信息不同的是,作为状态信息没有时间属性,用户可通过接口可获取即刻状态,而不需要选择时间窗口合并事件。
 
 同进程配置和采集配置,上报配置状态也有两种可选实现:
+
 1. 在心跳 request 中将配置最新状态带上。
 
     在心跳中将进程配置和采集配置的最新版本和状态一起上报。另外按照心跳协议的定义,配置状态变更后,要求在心跳一定要上报配置最新状态,如果相较于上一次心跳配置状态无变化,则不要求。
@@ -296,7 +289,9 @@ Client获取到多个进程配置时,自动合并,若产生冲突默认行
     通过 ReportStatus 额外接口去上报,能够在一定程度上减少心跳服务的复杂度,有利于状态服务和心跳服务的拆分。ReportStatus 接口不用等到下一次心跳,在配置状态发生变化即可上报。
 
 ### 心跳配置拉取/上报与额外接口拉取/上报选择
+
 配置状态上报的方式应该和配置拉取方式配套使用:
+
 1. 如果进程配置和采集配置都通过心跳下发,状态配置也仅应该通过心跳上报。
 2. 如果进程配置和采集配置都通过 FetchConfig 接口拉取,状态上报也应该通过 ReportStatus 上报。
 
@@ -315,7 +310,7 @@ Server: 通过response的flag传递,定义了ReportFullStatus,表明要求C
 
 Client: 为了防止服务端重复下发命令以及感知命令执行结果,在command expire前,Client始终应具备向服务端上报command执行状态的能力,实际是否上报取决于心跳压缩机制。在expire\_time超过后,client不应该再上报超时的command状态。
 
-Server: 如果上报+已知的Agent状态中,缺少应下发的custom\_command\_updates(通过name识别),那么server应该在响应中下发缺少的custom\_command\_updates。
+Server: 如果上报+已知的Agent状态中,缺少应下发的onetime\_pipeline\_config\_updates(通过name识别),那么server应该在响应中下发缺少的onetime\_pipeline\_config\_updates。
 
 ### 异常处理
 
@@ -324,6 +319,7 @@ Server: 服务端正常返回时HeartbeatResponse中的code应始终设置为0
 Client: 当HeartbeatResponse中的code为0时,Agent应该正常处理下发的配置。当HeartbeatResponse中的code不为0时,Agent必须忽略除code和message外的其他字段,并择机重试。
 
 ### 辅助信息
-在command\_info, command\_detail, config\_info, config\_detail中,都预留了extra字段,可以用于传递一些额外的用户自定义的辅助信息。\
+
+在command\_detail, config\_info, config\_detail中,都预留了extra字段,可以用于传递一些额外的用户自定义的辅助信息。\
 
 注意:extra字段仅作传递辅助信息使用,不会对管控行为造成任何影响。
diff --git a/config_server/protocol/v2/agentV2.proto b/config_server/protocol/v2/agentV2.proto
index 649cd433da..965e553772 100644
--- a/config_server/protocol/v2/agentV2.proto
+++ b/config_server/protocol/v2/agentV2.proto
@@ -21,41 +21,32 @@ enum ConfigStatus {
 
 // Define the Config information carried in the request
 message ConfigInfo {
-    string name = 1;         // Required, Config's unique identification
-    int64 version = 2;       // Required, Config's version number or hash code
-    ConfigStatus status = 3; // Config's status
-    string message = 4;      // Optional error message
-    map<string, bytes> extra = 5; // Optional extra info
-}
-
-// Define the Command information carried in the request
-message CommandInfo {
-    string type = 1;         // Command's type
-    string name = 2;         // Required, Command's unique identification
-    ConfigStatus status = 3; // Command's status
-    string message = 4;      // Optional error message
+    string name = 1;              // Required, Config's unique identification
+    int64 version = 2;            // Required, Config's version number or hash code
+    ConfigStatus status = 3;      // Config's status
+    string message = 4;           // Optional error message
     map<string, bytes> extra = 5; // Optional extra info
 }
 
 // Define Agent's basic attributes
 message AgentAttributes {
-    bytes version = 1;                 // Agent's version
-    bytes ip = 2;                      // Agent's ip
-    bytes hostname = 3;                // Agent's hostname
-    bytes hostid = 4;                  // Agent's hostid  https://opentelemetry.io/docs/specs/semconv/attributes-registry/host/
-    map<string, bytes> extras = 100;   // Agent's other attributes
+    bytes version = 1;               // Agent's version
+    bytes ip = 2;                    // Agent's ip
+    bytes hostname = 3;              // Agent's hostname
+    bytes hostid = 4;                // Agent's hostid  https://opentelemetry.io/docs/specs/semconv/attributes-registry/host/
+    map<string, bytes> extras = 100; // Agent's other attributes
     // before 100 (inclusive) are reserved for future official fields
 }
 
 enum AgentCapabilities {
     // The capabilities field is unspecified.
-    UnspecifiedAgentCapability     = 0;
-    // The Agent can accept pipeline configuration from the Server.
-    AcceptsPipelineConfig          = 0x00000001;
+    UnspecifiedAgentCapability = 0;
+    // The Agent can accept continuous pipeline configuration from the Server.
+    AcceptsContinuousPipelineConfig = 0x00000001;
     // The Agent can accept instance configuration from the Server.
-    AcceptsInstanceConfig          = 0x00000002;
-    // The Agent can accept custom command from the Server.
-    AcceptsCustomCommand           = 0x00000004;
+    AcceptsInstanceConfig = 0x00000002;
+    // The Agent can accept onetime pipeline configuration from the Server.
+    AcceptsOnetimePipelineConfig = 0x00000004;
 
     // Add new capabilities here, continuing with the least significant unused bit.
 }
@@ -66,7 +57,7 @@ enum RequestFlags {
     // Flags is a bit mask. Values below define individual bits.
 
     // Must be set if this request contains full state
-    FullState               = 0x00000001;
+    FullState = 0x00000001;
     // bits before 2^16 (inclusive) are reserved for future official fields
 }
 
@@ -75,49 +66,48 @@ enum RequestFlags {
 // Agent sends requests to the ConfigServer to get config updates and receive commands.
 message HeartbeatRequest {
     bytes request_id = 1;
-    uint64 sequence_num = 2;                    // Increment every request, for server to check sync status
-    uint64 capabilities = 3;                    // Bitmask of flags defined by AgentCapabilities enum
-    bytes instance_id = 4;                      // Required, Agent's unique identification, consistent throughout the process lifecycle
-    string agent_type = 5;                      // Required, Agent's type(ilogtail, ..)
-    AgentAttributes attributes = 6;             // Agent's basic attributes
-    repeated AgentGroupTag tags = 7;            // Agent's tags
-    string running_status = 8;                  // Human readable running status
-    int64 startup_time = 9;                     // Required, Agent's startup time
-    repeated ConfigInfo pipeline_configs = 10;  // Information about the current PIPELINE_CONFIG held by the Agent
-    repeated ConfigInfo instance_configs = 11;  // Information about the current AGENT_CONFIG held by the Agent
-    repeated CommandInfo custom_commands = 12;  // Information about command history
-    uint64 flags = 13;                          // Predefined command flag
-    bytes opaque = 14;                          // Opaque data for extension
+    uint64 sequence_num = 2;                              // Increment every request, for server to check sync status
+    uint64 capabilities = 3;                              // Bitmask of flags defined by AgentCapabilities enum
+    bytes instance_id = 4;                                // Required, Agent's unique identification, consistent throughout the process lifecycle
+    string agent_type = 5;                                // Required, Agent's type(ilogtail, ..)
+    AgentAttributes attributes = 6;                       // Agent's basic attributes
+    repeated AgentGroupTag tags = 7;                      // Agent's tags
+    string running_status = 8;                            // Human readable running status
+    int64 startup_time = 9;                               // Required, Agent's startup time
+    repeated ConfigInfo continuous_pipeline_configs = 10; // Information about the current continuous pipeline configs held by the Agent
+    repeated ConfigInfo instance_configs = 11;            // Information about the current instance configs held by the Agent
+    repeated ConfigInfo onetime_pipeline_configs = 12;    // Information about onetime pipeline configs history
+    uint64 flags = 13;                                    // Predefined command flag
+    bytes opaque = 14;                                    // Opaque data for extension
     // before 100 (inclusive) are reserved for future official fields
 }
 
 // Define Config's detail
 message ConfigDetail {
-    string name = 1;        // Required, Config's unique identification
-    int64 version = 2;      // Required, Config's version number or hash code
-    bytes detail = 3;       // Required, Config's detail
+    string name = 1;              // Required, Config's unique identification
+    int64 version = 2;            // Required, Config's version number or hash code
+    bytes detail = 3;             // Required, Config's detail
     map<string, bytes> extra = 4; // Optional extra info
 }
 
 message CommandDetail {
-    string type = 1;                // Required, Command type
-    string name = 2;                // Required, Command name
-    bytes detail = 3;               // Required, Command's detail
-    int64 expire_time = 4;          // After which the command can be safely removed from history
-    map<string, bytes> extra = 5;   // Optional extra info
+    string name = 1;              // Required, Command name
+    bytes detail = 2;             // Required, Command's detail
+    int64 expire_time = 3;        // After which the command can be safely removed from history
+    map<string, bytes> extra = 4; // Optional extra info
 }
 
 enum ServerCapabilities {
     // The capabilities field is unspecified.
-    UnspecifiedServerCapability        = 0;
+    UnspecifiedServerCapability = 0;
     // The Server can remember agent attributes.
-    RembersAttribute                   = 0x00000001;
-    // The Server can remember pipeline config status.
-    RembersPipelineConfigStatus        = 0x00000002;
+    RembersAttribute = 0x00000001;
+    // The Server can remember continuous pipeline config status.
+    RembersContinuousPipelineConfigStatus = 0x00000002;
     // The Server can remember instance config status.
-    RembersInstanceConfigStatus        = 0x00000004;
-    // The Server can remember custom command status.
-    RembersCustomCommandStatus         = 0x00000008;
+    RembersInstanceConfigStatus = 0x00000004;
+    // The Server can remember onetime pipeline config status.
+    RembersOnetimePipelineConfigStatus = 0x00000008;
 
     // bits before 2^16 (inclusive) are reserved for future official fields
 }
@@ -131,45 +121,45 @@ enum ResponseFlags {
     // some sub-message in the last AgentToServer message (which is an allowed
     // optimization) but the Server detects that it does not have it (e.g. was
     // restarted and lost state).
-    ReportFullState           = 0x00000001;
-    // FetchPipelineConfigDetail can be used by the Server to tell Agent to fetch config details by FetchConfig api,
+    ReportFullState = 0x00000001;
+    // FetchContinuousPipelineConfigDetail can be used by the Server to tell Agent to fetch continuous pipeline config details by FetchConfig api,
     // HB response ConfigDetail will not contains details.
-    FetchPipelineConfigDetail = 0x00000002;
-    // like FetchPipelineConfigDetail, but for instance config.
+    FetchContinuousPipelineConfigDetail = 0x00000002;
+    // like FetchContinuousPipelineConfigDetail, but for instance config.
     FetchInstanceConfigDetail = 0x00000004;
     // bits before 2^16 (inclusive) are reserved for future official fields
 }
 
 // ConfigServer's response to Agent's request
 message HeartbeatResponse {
-    bytes request_id = 1;  
-    CommonResponse commonResponse = 2;                  // Set common response
-    uint64 capabilities = 3;                            // Bitmask of flags defined by ServerCapabilities enum
+    bytes request_id = 1;
+    CommonResponse common_response = 2; // Set common response
+    uint64 capabilities = 3;           // Bitmask of flags defined by ServerCapabilities enum
 
-    repeated ConfigDetail pipeline_config_updates = 4;  // Agent's pipeline config update status
-    repeated ConfigDetail instance_config_updates = 5;  // Agent's instance config update status
-    repeated CommandDetail custom_command_updates = 6;  // Agent's commands updates
-    uint64 flags = 7;                                   // Predefined command flag
-    bytes opaque = 8;                                   // Opaque data for extension
+    repeated ConfigDetail continuous_pipeline_config_updates = 4; // Agent's continuous pipeline config update status
+    repeated ConfigDetail instance_config_updates = 5;            // Agent's instance config update status
+    repeated CommandDetail onetime_pipeline_config_updates = 6;   // Agent's onetime pipeline config updates
+    uint64 flags = 7;                                             // Predefined command flag
+    bytes opaque = 8;                                             // Opaque data for extension
 }
 
 // API: /Agent/FetchConfig
 // optional api for fetching configs details, but not by heartbeat response with config details, see README.
 message FetchConfigRequest {
-    bytes request_id = 1; 
-    bytes instance_id = 2;                     // Agent's unique identification
-    repeated ConfigInfo pipeline_configs = 3;  // Information about the current PIPELINE_CONFIG held by the Agent
-    repeated ConfigInfo instance_configs = 4;  // Information about the current AGENT_CONFIG held by the Agent
-    repeated CommandInfo custom_commands = 5;  // Information about command history
+    bytes request_id = 1;
+    bytes instance_id = 2;                               // Agent's unique identification
+    repeated ConfigInfo continuous_pipeline_configs = 3; // Information about the current continuous pipeline configs held by the Agent
+    repeated ConfigInfo instance_configs = 4;            // Information about the current instance configs held by the Agent
+    repeated ConfigInfo onetime_pipeline_configs = 5;    // Information about onetime pipeline configs history
 }
 
 // ConfigServer response to Agent's config fetching request
 message FetchConfigResponse {
-    bytes request_id = 1;                     
-    CommonResponse commonResponse = 2;
-    repeated ConfigDetail pipeline_config_updates = 3;  // Agent's pipeline config with details
-    repeated ConfigDetail instance_config_updates = 4;  // Agent's instance config with details
-    repeated CommandDetail custom_command_updates = 5;  // Agent's commands details
+    bytes request_id = 1;
+    CommonResponse common_response = 2;
+    repeated ConfigDetail continuous_pipeline_config_updates = 3; // Agent's continuous pipeline config with details
+    repeated ConfigDetail instance_config_updates = 4;            // Agent's instance config with details
+    repeated CommandDetail onetime_pipeline_config_updates = 5;   // Agent's onetime pipeline config details
 }
 
 // API: /Agent/ReportStatus
@@ -177,20 +167,19 @@ message FetchConfigResponse {
 // if HB server and Status server are different service, this api may be help.
 message ReportStatusRequest {
     bytes request_id = 1;
-    bytes instance_id = 2;                     // Agent's unique identification
-    repeated ConfigInfo pipeline_configs = 3;  // status about the current PIPELINE_CONFIG held by the Agent
-    repeated ConfigInfo instance_configs = 4;  // status about the current AGENT_CONFIG held by the Agent
-    repeated CommandInfo custom_commands = 5;  // status about command history
+    bytes instance_id = 2;                               // Agent's unique identification
+    repeated ConfigInfo continuous_pipeline_configs = 3; // status about the current continuous pipeline configs held by the Agent
+    repeated ConfigInfo instance_configs = 4;            // status about the current instance configs held by the Agent
+    repeated ConfigInfo onetime_pipeline_configs = 5;    // status about onetime pipeline configs history
 }
 
 // ConfigServer response to Agent's report status request
 message ReportStatusResponse {
     bytes request_id = 1;
-    CommonResponse commonResponse = 2;
+    CommonResponse common_response = 2;
 }
 
-message CommonResponse
-{
+message CommonResponse {
     int32 status = 1;
     bytes errorMessage = 2;
 }
diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt
index fe5810d906..cf962d4e29 100644
--- a/core/CMakeLists.txt
+++ b/core/CMakeLists.txt
@@ -33,6 +33,7 @@ cmake_dependent_option(ENABLE_STATIC_LINK_CRT "Build Logtail by linking CRT stat
 option(WITHOUTGDB "Build Logtail without gdb")
 option(WITHSPL "Build Logtail and UT with SPL" ON)
 option(BUILD_LOGTAIL_UT "Build unit test for Logtail")
+cmake_dependent_option(ENABLE_ADDRESS_SANITIZER "Enable address sanitizer" ON "CMAKE_BUILD_TYPE STREQUAL Debug;NOT ANDROID" OFF)
 set(PROVIDER_PATH "provider" CACHE PATH "Path to the provider module") # external provider path can be set with -DPROVIDER_PATH
 set(UNITTEST_PATH "unittest" CACHE PATH "Path to the unittest module") # external unittest path can be set with -DUNITTEST_PATH
 
@@ -61,8 +62,8 @@ if (UNIX)
         set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -ggdb")
         set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -ggdb")
     endif ()
-    set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O1 -fno-omit-frame-pointer")
-    set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O1 -fno-omit-frame-pointer")
+    set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -fno-omit-frame-pointer")
+    set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -fno-omit-frame-pointer")
     set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O2")
     set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O2")
     string(REPLACE "-O3" "" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}")
@@ -116,6 +117,7 @@ set(SUB_DIRECTORIES_LIST
         application app_config checkpoint container_manager metadata logger go_pipeline monitor monitor/metric_constants monitor/profile_sender models
         config config/watcher constants
         pipeline pipeline/batch pipeline/limiter pipeline/plugin pipeline/plugin/creator pipeline/plugin/instance pipeline/plugin/interface pipeline/queue pipeline/route pipeline/serializer
+        task_pipeline
         runner runner/sink/http
         protobuf/sls protobuf/models
         file_server file_server/event file_server/event_handler file_server/event_listener file_server/reader file_server/polling
diff --git a/core/app_config/AppConfig.cpp b/core/app_config/AppConfig.cpp
index 777509210e..d492af6b7b 100644
--- a/core/app_config/AppConfig.cpp
+++ b/core/app_config/AppConfig.cpp
@@ -25,12 +25,13 @@
 #include "common/FileSystemUtil.h"
 #include "common/JsonUtil.h"
 #include "common/LogtailCommonFlags.h"
+#include "common/version.h"
+#include "config/InstanceConfigManager.h"
 #include "config/watcher/InstanceConfigWatcher.h"
 #include "file_server/ConfigManager.h"
 #include "file_server/reader/LogFileReader.h"
 #include "json/value.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/AlarmManager.h"
 #include "monitor/Monitor.h"
 #ifdef __ENTERPRISE__
@@ -39,6 +40,10 @@
 
 using namespace std;
 
+#define ILOGTAIL_PREFIX "ilogtail_"
+#define ILOGTAIL_PIDFILE_SUFFIX ".pid"
+#define LOONGCOLLECTOR_PREFIX "loongcollector_"
+
 DEFINE_FLAG_BOOL(logtail_mode, "logtail mode", false);
 DEFINE_FLAG_INT32(max_buffer_num, "max size", 40);
 DEFINE_FLAG_INT32(pub_max_buffer_num, "max size", 8);
@@ -433,14 +438,6 @@ string GetAgentSnapshotDir() {
     }
 }
 
-string GetAgentProfileLogName() {
-    if (BOOL_FLAG(logtail_mode)) {
-        return "ilogtail_profile.LOG";
-    } else {
-        return "loongcollector_profile.LOG";
-    }
-}
-
 string GetAgentStatusLogName() {
     if (BOOL_FLAG(logtail_mode)) {
         return "ilogtail_status.LOG";
@@ -449,15 +446,6 @@ string GetAgentStatusLogName() {
     }
 }
 
-string GetProfileSnapshotDumpFileName() {
-    if (BOOL_FLAG(logtail_mode)) {
-        return GetProcessExecutionDir() + STRING_FLAG(logtail_profile_snapshot);
-    } else {
-        return GetAgentLogDir() + "loongcollector_profile_snapshot";
-    }
-}
-
-
 string GetObserverEbpfHostPath() {
     if (BOOL_FLAG(logtail_mode)) {
         return STRING_FLAG(sls_observer_ebpf_host_path);
@@ -500,11 +488,75 @@ string GetFileTagsDir() {
     }
 }
 
-string GetPipelineConfigDir() {
+string GetContinuousPipelineConfigDir() {
     if (BOOL_FLAG(logtail_mode)) {
         return "config";
     } else {
-        return "pipeline_config";
+        return "continuous_pipeline_config";
+    }
+}
+
+string GetPluginLogName() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return "logtail_plugin.LOG";
+    } else {
+        return "go_plugin.LOG";
+    }
+}
+
+std::string GetVersionTag() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return "logtail_version";
+    } else {
+        return "loongcollector_version";
+    }
+}
+
+std::string GetGoPluginCheckpoint() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return "checkpoint";
+    } else {
+        return "go_plugin_checkpoint";
+    }
+}
+
+std::string GetAgentName() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return "ilogtail";
+    } else {
+        return "loongcollector";
+    }
+}
+
+std::string GetMonitorInfoFileName() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return "logtail_monitor_info";
+    } else {
+        return "loongcollector_monitor_info";
+    }
+}
+
+std::string GetSymLinkName() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return GetProcessExecutionDir() + "ilogtail";
+    } else {
+        return GetProcessExecutionDir() + "loongcollector";
+    }
+}
+
+std::string GetPidFileName() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return GetProcessExecutionDir() + ILOGTAIL_PREFIX + ILOGTAIL_VERSION + ILOGTAIL_PIDFILE_SUFFIX;
+    } else {
+        return GetAgentRunDir() + "loongcollector.pid";
+    }
+}
+
+std::string GetAgentPrefix() {
+    if (BOOL_FLAG(logtail_mode)) {
+        return ILOGTAIL_PREFIX;
+    } else {
+        return LOONGCOLLECTOR_PREFIX;
     }
 }
 
@@ -884,14 +936,10 @@ void AppConfig::LoadResourceConf(const Json::Value& confJson) {
                        "reader_close_unused_file_time",
                        "ALIYUN_LOGTAIL_READER_CLOSE_UNUSED_FILE_TIME");
 
-    if (confJson.isMember("log_profile_save_interval") && confJson["log_profile_save_interval"].isInt())
-        LogFileProfiler::GetInstance()->SetProfileInterval(confJson["log_profile_save_interval"].asInt());
-
     LOG_DEBUG(sLogger,
               ("logreader delete interval", INT32_FLAG(logreader_filedeleted_remove_interval))(
                   "check handler interval", INT32_FLAG(check_handler_timeout_interval))(
-                  "reader close interval", INT32_FLAG(reader_close_unused_file_time))(
-                  "profile interval", LogFileProfiler::GetInstance()->GetProfileInterval()));
+                  "reader close interval", INT32_FLAG(reader_close_unused_file_time)));
 
 
     if (confJson.isMember("cpu_usage_limit")) {
@@ -1490,8 +1538,8 @@ void AppConfig::ReadFlagsFromMap(const std::unordered_map<std::string, std::stri
  *    - 记录无法转换的值
  */
 void AppConfig::RecurseParseJsonToFlags(const Json::Value& confJson, std::string prefix) {
-    const static unordered_set<string> sIgnoreKeySet = {"data_server_list", "legacy_data_server_list"};
-    const static unordered_set<string> sForceKeySet = {"config_server_address_list", "config_server_list"};
+    const static unordered_set<string> sIgnoreKeySet = {"data_server_list", "data_servers"};
+    const static unordered_set<string> sForceKeySet = {"config_server_address_list", "config_servers"};
     for (auto name : confJson.getMemberNames()) {
         auto jsonvalue = confJson[name];
         string fullName;
@@ -1667,7 +1715,7 @@ void AppConfig::SetLoongcollectorConfDir(const std::string& dirPath) {
     //     = AbsolutePath(STRING_FLAG(ilogtail_local_yaml_config_dir), mLogtailSysConfDir) + PATH_SEPARATOR;
     // mUserRemoteYamlConfigDirPath
     //     = AbsolutePath(STRING_FLAG(ilogtail_remote_yaml_config_dir), mLogtailSysConfDir) + PATH_SEPARATOR;
-    LOG_INFO(sLogger, ("set loongcollector conf dir", mLoongcollectorConfDir));
+    LOG_INFO(sLogger, ("set " + GetAgentName() + " conf dir", mLoongcollectorConfDir));
 }
 
 bool AppConfig::IsHostPathMatchBlacklist(const string& dirPath) const {
diff --git a/core/app_config/AppConfig.h b/core/app_config/AppConfig.h
index 22a5c6dd73..25cdb0fdcb 100644
--- a/core/app_config/AppConfig.h
+++ b/core/app_config/AppConfig.h
@@ -18,12 +18,12 @@
 
 #include <json/json.h>
 
+#include <functional>
 #include <map>
 #include <string>
 #include <unordered_map>
 #include <unordered_set>
 #include <vector>
-#include <functional>
 
 #include "InstanceConfig.h"
 #include "protobuf/sls/sls_logs.pb.h"
@@ -49,14 +49,21 @@ std::string GetInotifyWatcherDirsDumpFileName();
 std::string GetAgentLoggersPrefix();
 std::string GetAgentLogName();
 std::string GetAgentSnapshotDir();
-std::string GetAgentProfileLogName();
 std::string GetAgentStatusLogName();
-std::string GetProfileSnapshotDumpFileName();
 std::string GetObserverEbpfHostPath();
 std::string GetSendBufferFileNamePrefix();
 std::string GetLegacyUserLocalConfigFilePath();
 std::string GetExactlyOnceCheckpoint();
+std::string GetContinuousPipelineConfigDir();
 std::string GetPipelineConfigDir();
+std::string GetPluginLogName();
+std::string GetVersionTag();
+std::string GetGoPluginCheckpoint();
+std::string GetAgentName();
+std::string GetMonitorInfoFileName();
+std::string GetSymLinkName();
+std::string GetPidFileName();
+std::string GetAgentPrefix();
 
 template <class T>
 class DoubleBuffer {
diff --git a/core/application/Application.cpp b/core/application/Application.cpp
index 3fcd0fabeb..42c0fbc3f3 100644
--- a/core/application/Application.cpp
+++ b/core/application/Application.cpp
@@ -32,15 +32,14 @@
 #include "common/version.h"
 #include "config/ConfigDiff.h"
 #include "config/InstanceConfigManager.h"
-#include "config/watcher/ConfigWatcher.h"
 #include "config/watcher/InstanceConfigWatcher.h"
+#include "config/watcher/PipelineConfigWatcher.h"
 #include "file_server/ConfigManager.h"
 #include "file_server/EventDispatcher.h"
 #include "file_server/FileServer.h"
 #include "file_server/event_handler/LogInput.h"
 #include "go_pipeline/LogtailPlugin.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/MetricExportor.h"
 #include "monitor/Monitor.h"
 #include "pipeline/PipelineManager.h"
@@ -49,9 +48,11 @@
 #include "pipeline/queue/SenderQueueManager.h"
 #include "plugin/flusher/sls/DiskBufferWriter.h"
 #include "plugin/input/InputFeedbackInterfaceRegistry.h"
+#include "prometheus/PrometheusInputRunner.h"
 #include "runner/FlusherRunner.h"
 #include "runner/ProcessorRunner.h"
 #include "runner/sink/http/HttpSink.h"
+#include "task_pipeline/TaskPipelineManager.h"
 #ifdef __ENTERPRISE__
 #include "config/provider/EnterpriseConfigProvider.h"
 #include "config/provider/LegacyConfigProvider.h"
@@ -112,7 +113,7 @@ void Application::Init() {
     AppConfig::GetInstance()->LoadAppConfig(GetAgentConfigFile());
 
     // Initialize basic information: IP, hostname, etc.
-    LogFileProfiler::GetInstance();
+    LoongCollectorMonitor::GetInstance();
 #ifdef __ENTERPRISE__
     EnterpriseConfigProvider::GetInstance()->Init("enterprise");
     EnterpriseConfigProvider::GetInstance()->LoadRegionConfig();
@@ -133,25 +134,25 @@ void Application::Init() {
     const string& interface = AppConfig::GetInstance()->GetBindInterface();
     const string& configIP = AppConfig::GetInstance()->GetConfigIP();
     if (!configIP.empty()) {
-        LogFileProfiler::mIpAddr = configIP;
+        LoongCollectorMonitor::mIpAddr = configIP;
         LogtailMonitor::GetInstance()->UpdateConstMetric("logtail_ip", GetHostIp());
     } else if (!interface.empty()) {
-        LogFileProfiler::mIpAddr = GetHostIp(interface);
-        if (LogFileProfiler::mIpAddr.empty()) {
+        LoongCollectorMonitor::mIpAddr = GetHostIp(interface);
+        if (LoongCollectorMonitor::mIpAddr.empty()) {
             LOG_WARNING(sLogger,
                         ("failed to get ip from interface", "try to get any available ip")("interface", interface));
         }
-    } else if (LogFileProfiler::mIpAddr.empty()) {
+    } else if (LoongCollectorMonitor::mIpAddr.empty()) {
         LOG_WARNING(sLogger, ("failed to get ip from hostname or eth0 or bond0", "try to get any available ip"));
     }
-    if (LogFileProfiler::mIpAddr.empty()) {
-        LogFileProfiler::mIpAddr = GetAnyAvailableIP();
-        LOG_INFO(sLogger, ("get available ip succeeded", LogFileProfiler::mIpAddr));
+    if (LoongCollectorMonitor::mIpAddr.empty()) {
+        LoongCollectorMonitor::mIpAddr = GetAnyAvailableIP();
+        LOG_INFO(sLogger, ("get available ip succeeded", LoongCollectorMonitor::mIpAddr));
     }
 
     const string& configHostName = AppConfig::GetInstance()->GetConfigHostName();
     if (!configHostName.empty()) {
-        LogFileProfiler::mHostname = configHostName;
+        LoongCollectorMonitor::mHostname = configHostName;
         LogtailMonitor::GetInstance()->UpdateConstMetric("logtail_hostname", GetHostName());
     }
 
@@ -165,18 +166,19 @@ void Application::Init() {
 #endif
 
     int32_t systemBootTime = AppConfig::GetInstance()->GetSystemBootTime();
-    LogFileProfiler::mSystemBootTime = systemBootTime > 0 ? systemBootTime : GetSystemBootTime();
+    LoongCollectorMonitor::mSystemBootTime = systemBootTime > 0 ? systemBootTime : GetSystemBootTime();
 
     // generate app_info.json
     Json::Value appInfoJson;
-    appInfoJson["ip"] = Json::Value(LogFileProfiler::mIpAddr);
-    appInfoJson["hostname"] = Json::Value(LogFileProfiler::mHostname);
+    appInfoJson["ip"] = Json::Value(LoongCollectorMonitor::mIpAddr);
+    appInfoJson["hostname"] = Json::Value(LoongCollectorMonitor::mHostname);
     appInfoJson["UUID"] = Json::Value(Application::GetInstance()->GetUUID());
     appInfoJson["instance_id"] = Json::Value(Application::GetInstance()->GetInstanceId());
 #ifdef __ENTERPRISE__
-    appInfoJson["loongcollector_version"] = Json::Value(ILOGTAIL_VERSION);
+    appInfoJson["host_id"] = Json::Value(FetchHostId());
+    appInfoJson[GetVersionTag()] = Json::Value(ILOGTAIL_VERSION);
 #else
-    appInfoJson["loongcollector_version"] = Json::Value(string(ILOGTAIL_VERSION) + " Community Edition");
+    appInfoJson[GetVersionTag()] = Json::Value(string(ILOGTAIL_VERSION) + " Community Edition");
     appInfoJson["git_hash"] = Json::Value(ILOGTAIL_GIT_HASH);
     appInfoJson["build_date"] = Json::Value(ILOGTAIL_BUILD_DATE);
 #endif
@@ -189,7 +191,7 @@ void Application::Init() {
 #define ILOGTAIL_COMPILER VERSION_STR(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__)
 #endif
     appInfoJson["compiler"] = Json::Value(ILOGTAIL_COMPILER);
-    appInfoJson["os"] = Json::Value(LogFileProfiler::mOsDetail);
+    appInfoJson["os"] = Json::Value(LoongCollectorMonitor::mOsDetail);
     appInfoJson["update_time"] = GetTimeStamp(time(NULL), "%Y-%m-%d %H:%M:%S");
     string appInfo = appInfoJson.toStyledString();
     OverwriteFile(GetAgentAppInfoFile(), appInfo);
@@ -197,8 +199,8 @@ void Application::Init() {
 }
 
 void Application::Start() { // GCOVR_EXCL_START
-    LogFileProfiler::mStartTime = GetTimeStamp(time(NULL), "%Y-%m-%d %H:%M:%S");
-    LogtailMonitor::GetInstance()->UpdateConstMetric("start_time", LogFileProfiler::mStartTime);
+    LoongCollectorMonitor::mStartTime = GetTimeStamp(time(NULL), "%Y-%m-%d %H:%M:%S");
+    LogtailMonitor::GetInstance()->UpdateConstMetric("start_time", LoongCollectorMonitor::mStartTime);
 
 #if defined(__ENTERPRISE__) && defined(_MSC_VER)
     InitWindowsSignalObject();
@@ -210,16 +212,16 @@ void Application::Start() { // GCOVR_EXCL_START
 
     {
         // add local config dir
-        filesystem::path localConfigPath
-            = filesystem::path(AppConfig::GetInstance()->GetLoongcollectorConfDir()) / GetPipelineConfigDir() / "local";
+        filesystem::path localConfigPath = filesystem::path(AppConfig::GetInstance()->GetLoongcollectorConfDir())
+            / GetContinuousPipelineConfigDir() / "local";
         error_code ec;
         filesystem::create_directories(localConfigPath, ec);
         if (ec) {
             LOG_WARNING(sLogger,
-                        ("failed to create dir for local pipeline_config",
+                        ("failed to create dir for local continuous_pipeline_config",
                          "manual creation may be required")("error code", ec.value())("error msg", ec.message()));
         }
-        ConfigWatcher::GetInstance()->AddSource(localConfigPath.string());
+        PipelineConfigWatcher::GetInstance()->AddSource(localConfigPath.string());
     }
 
 #ifdef __ENTERPRISE__
@@ -276,9 +278,12 @@ void Application::Start() { // GCOVR_EXCL_START
             lastCheckTagsTime = curTime;
         }
         if (curTime - lastConfigCheckTime >= INT32_FLAG(config_scan_interval)) {
-            PipelineConfigDiff pipelineConfigDiff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-            if (!pipelineConfigDiff.IsEmpty()) {
-                PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff);
+            auto configDiff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+            if (!configDiff.first.IsEmpty()) {
+                PipelineManager::GetInstance()->UpdatePipelines(configDiff.first);
+            }
+            if (!configDiff.second.IsEmpty()) {
+                TaskPipelineManager::GetInstance()->UpdatePipelines(configDiff.second);
             }
             InstanceConfigDiff instanceConfigDiff = InstanceConfigWatcher::GetInstance()->CheckConfigDiff();
             if (!instanceConfigDiff.IsEmpty()) {
@@ -287,7 +292,6 @@ void Application::Start() { // GCOVR_EXCL_START
             lastConfigCheckTime = curTime;
         }
         if (curTime - lastProfilingCheckTime >= INT32_FLAG(profiling_check_interval)) {
-            LogFileProfiler::GetInstance()->SendProfileData();
             MetricExportor::GetInstance()->PushMetrics(false);
             lastProfilingCheckTime = curTime;
         }
@@ -324,13 +328,14 @@ void Application::Start() { // GCOVR_EXCL_START
 
         // destruct event handlers here so that it will not block file reading task
         ConfigManager::GetInstance()->DeleteHandlers();
+        PrometheusInputRunner::GetInstance()->CheckGC();
 
         this_thread::sleep_for(chrono::seconds(1));
     }
 } // GCOVR_EXCL_STOP
 
 void Application::GenerateInstanceId() {
-    mInstanceId = CalculateRandomUUID() + "_" + LogFileProfiler::mIpAddr + "_" + ToString(mStartTime);
+    mInstanceId = CalculateRandomUUID() + "_" + LoongCollectorMonitor::mIpAddr + "_" + ToString(mStartTime);
 }
 
 bool Application::TryGetUUID() {
diff --git a/core/checkpoint/CheckPointManager.cpp b/core/checkpoint/CheckPointManager.cpp
index 5376e9326d..8f25b23b75 100644
--- a/core/checkpoint/CheckPointManager.cpp
+++ b/core/checkpoint/CheckPointManager.cpp
@@ -71,10 +71,18 @@ bool CheckPointManager::GetCheckPoint(DevInode devInode, const std::string& conf
     return false;
 }
 
-void CheckPointManager::DeleteDirCheckPoint(const std::string& filename) {
-    std::unordered_map<std::string, DirCheckPointPtr>::iterator it = mDirNameMap.find(filename);
-    if (it != mDirNameMap.end())
+void CheckPointManager::DeleteDirCheckPoint(const std::string& dirname) {
+    std::unordered_map<std::string, DirCheckPointPtr>::iterator it = mDirNameMap.find(dirname);
+    if (it != mDirNameMap.end()) {
         mDirNameMap.erase(it);
+    }
+    auto parentpos = dirname.find_last_of(PATH_SEPARATOR);
+    if (parentpos != std::string::npos) {
+        auto parentDirCheckpoint = mDirNameMap.find(dirname.substr(0, parentpos));
+        if (parentDirCheckpoint != mDirNameMap.end()) {
+            parentDirCheckpoint->second->mSubDir.erase(dirname);
+        }
+    }
 }
 
 bool CheckPointManager::GetDirCheckPoint(const std::string& dirname, DirCheckPointPtr& dirCheckPointPtr) {
@@ -123,8 +131,7 @@ void CheckPointManager::LoadCheckPoint() {
     Json::Value root;
     ParseConfResult cptRes = ParseConfig(AppConfig::GetInstance()->GetCheckPointFilePath(), root);
     // if new checkpoint file not exist, check old checkpoint file.
-    if (cptRes == CONFIG_NOT_EXIST
-        && AppConfig::GetInstance()->GetCheckPointFilePath() != GetCheckPointFileName()) {
+    if (cptRes == CONFIG_NOT_EXIST && AppConfig::GetInstance()->GetCheckPointFilePath() != GetCheckPointFileName()) {
         cptRes = ParseConfig(GetCheckPointFileName(), root);
     }
     if (cptRes != CONFIG_OK) {
@@ -408,7 +415,7 @@ bool CheckPointManager::DumpCheckPointToLocal() {
     result["dir_check_point"] = dirJson;
     result["version"] = Json::Value(Json::UInt(INT32_FLAG(check_point_version)));
     fout << result.toStyledString();
-    if (!fout.good()) {
+    if (!fout) {
         LOG_ERROR(sLogger, ("dump check point to file failed", checkPointFile));
         AlarmManager::GetInstance()->SendAlarm(CHECKPOINT_ALARM, "dump check point to file failed");
         fout.close();
diff --git a/core/checkpoint/CheckPointManager.h b/core/checkpoint/CheckPointManager.h
index 0d652b90d9..3e5a63c3cb 100644
--- a/core/checkpoint/CheckPointManager.h
+++ b/core/checkpoint/CheckPointManager.h
@@ -126,7 +126,7 @@ class CheckPointManager {
     void AddCheckPoint(CheckPoint* checkPointPtr);
     void AddDirCheckPoint(const std::string& dirname);
     void DeleteCheckPoint(DevInode devInode, const std::string& configName);
-    void DeleteDirCheckPoint(const std::string& filename);
+    void DeleteDirCheckPoint(const std::string& dirname);
     void LoadCheckPoint();
     void LoadDirCheckPoint(const Json::Value& root);
     void LoadFileCheckPoint(const Json::Value& root);
diff --git a/core/common/LogtailCommonFlags.cpp b/core/common/LogtailCommonFlags.cpp
index b2cea76fda..1d70138f27 100644
--- a/core/common/LogtailCommonFlags.cpp
+++ b/core/common/LogtailCommonFlags.cpp
@@ -81,7 +81,7 @@
 // app config
 DEFINE_FLAG_STRING(ilogtail_config,
                    "set dataserver & configserver address; (optional)set cpu,mem,bufflerfile,buffermap and etc.",
-                   "loongcollector_config.json");
+                   "ilogtail_config.json");
 DEFINE_FLAG_BOOL(enable_full_drain_mode, "", false);
 DEFINE_FLAG_INT32(cpu_limit_num, "cpu violate limit num before shutdown", 10);
 DEFINE_FLAG_INT32(mem_limit_num, "memory violate limit num before shutdown", 10);
diff --git a/core/common/ParamExtractor.h b/core/common/ParamExtractor.h
index 94432281ed..d63e8a1895 100644
--- a/core/common/ParamExtractor.h
+++ b/core/common/ParamExtractor.h
@@ -81,6 +81,45 @@
                         region); \
     }
 
+#define TASK_PARAM_ERROR_RETURN(logger, alarm, msg, module, config) \
+    if (module.empty()) { \
+        LOG_ERROR(logger, ("failed to parse config", msg)("config", config)); \
+        alarm.SendAlarm(CATEGORY_CONFIG_ALARM, std::string(msg) + ": abort, config: " + config); \
+    } else { \
+        LOG_ERROR(logger, ("failed to parse config", msg)("module", module)("config", config)); \
+        alarm.SendAlarm(CATEGORY_CONFIG_ALARM, \
+                        std::string(msg) + ": abort, module: " + module + ", config: " + config); \
+    } \
+    return false
+#define TASK_PARAM_WARNING_IGNORE(logger, alarm, msg, module, config) \
+    if (module.empty()) { \
+        LOG_WARNING(logger, \
+                    ("problem encountered in config parsing", msg)("action", "ignore param")("config", config)); \
+        alarm.SendAlarm(CATEGORY_CONFIG_ALARM, std::string(msg) + ": ignore param, config: " + config); \
+    } else { \
+        LOG_WARNING(logger, \
+                    ("problem encountered in config parsing", \
+                     msg)("action", "ignore param")("module", module)("config", config)); \
+        alarm.SendAlarm(CATEGORY_CONFIG_ALARM, \
+                        std::string(msg) + ": ignore param, module: " + module + ", config: " + config); \
+    }
+#define TASK_PARAM_WARNING_DEFAULT(logger, alarm, msg, val, module, config) \
+    if (module.empty()) { \
+        LOG_WARNING(logger, \
+                    ("problem encountered in config parsing", \
+                     msg)("action", "use default value instead")("default value", ToString(val))("config", config)); \
+        alarm.SendAlarm(CATEGORY_CONFIG_ALARM, \
+                        std::string(msg) + ": use default value instead, default value: " + ToString(val) \
+                            + ", config: " + config); \
+    } else { \
+        LOG_WARNING(logger, \
+                    ("problem encountered in config parsing", msg)("action", "use default value instead")( \
+                        "default value", ToString(val))("module", module)("config", config)); \
+        alarm.SendAlarm(CATEGORY_CONFIG_ALARM, \
+                        std::string(msg) + ": use default value instead, default value: " + ToString(val) \
+                            + ", module: " + module + ", config: " + config); \
+    }
+
 namespace logtail {
 
 const std::string noModule = "";
diff --git a/core/common/links.cmake b/core/common/links.cmake
index d8ca1feb6b..2790181f58 100644
--- a/core/common/links.cmake
+++ b/core/common/links.cmake
@@ -23,7 +23,7 @@ macro(common_link target_name)
     link_zlib(${target_name})
     link_zstd(${target_name})
     link_unwind(${target_name})
-    if (NOT ANDROID)
+    if (ENABLE_ADDRESS_SANITIZER)
         link_asan(${target_name})
     endif()
     if (UNIX)
diff --git a/core/common/version.h b/core/common/version.h
index 6256797df1..bd72e863c7 100644
--- a/core/common/version.h
+++ b/core/common/version.h
@@ -22,7 +22,4 @@ extern const char* const ILOGTAIL_UPDATE_SUFFIX;
 extern const char* const ILOGTAIL_GIT_HASH;
 extern const char* const ILOGTAIL_BUILD_DATE;
 
-#define ILOGTAIL_PREFIX "ilogtail_"
-#define ILOGTAIL_PIDFILE_SUFFIX ".pid"
-
 #endif
\ No newline at end of file
diff --git a/core/config/ConfigDiff.h b/core/config/ConfigDiff.h
index 8dc3ed2d26..38fa7ba00a 100644
--- a/core/config/ConfigDiff.h
+++ b/core/config/ConfigDiff.h
@@ -21,23 +21,21 @@
 
 #include "config/InstanceConfig.h"
 #include "config/PipelineConfig.h"
+#include "config/TaskConfig.h"
 
 namespace logtail {
 
-class PipelineConfigDiff {
-public:
-    std::vector<PipelineConfig> mAdded;
-    std::vector<PipelineConfig> mModified;
+template <class T>
+struct ConfigDiff {
+    std::vector<T> mAdded;
+    std::vector<T> mModified;
     std::vector<std::string> mRemoved;
-    bool IsEmpty() { return mRemoved.empty() && mAdded.empty() && mModified.empty(); }
-};
 
-class InstanceConfigDiff {
-public:
-    std::vector<InstanceConfig> mAdded;
-    std::vector<InstanceConfig> mModified;
-    std::vector<std::string> mRemoved;
     bool IsEmpty() { return mRemoved.empty() && mAdded.empty() && mModified.empty(); }
 };
 
+using PipelineConfigDiff = ConfigDiff<PipelineConfig>;
+using TaskConfigDiff = ConfigDiff<TaskConfig>;
+using InstanceConfigDiff = ConfigDiff<InstanceConfig>;
+
 } // namespace logtail
diff --git a/core/config/ConfigUtil.cpp b/core/config/ConfigUtil.cpp
new file mode 100644
index 0000000000..2c3975f4c7
--- /dev/null
+++ b/core/config/ConfigUtil.cpp
@@ -0,0 +1,87 @@
+// Copyright 2023 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "config/ConfigUtil.h"
+
+#include "common/FileSystemUtil.h"
+#include "common/JsonUtil.h"
+#include "common/YamlUtil.h"
+#include "logger/Logger.h"
+
+using namespace std;
+
+namespace logtail {
+
+bool LoadConfigDetailFromFile(const filesystem::path& filepath, Json::Value& detail) {
+    const string& ext = filepath.extension().string();
+    const string& configName = filepath.stem().string();
+    if (configName == "region_config") {
+        return false;
+    }
+    if (ext != ".yaml" && ext != ".yml" && ext != ".json") {
+        LOG_WARNING(sLogger, ("unsupported config file format", "skip current object")("filepath", filepath));
+        return false;
+    }
+    string content;
+    if (!ReadFile(filepath.string(), content)) {
+        LOG_WARNING(sLogger, ("failed to open config file", "skip current object")("filepath", filepath));
+        return false;
+    }
+    if (content.empty()) {
+        LOG_WARNING(sLogger, ("empty config file", "skip current object")("filepath", filepath));
+        return false;
+    }
+    string errorMsg;
+    if (!ParseConfigDetail(content, ext, detail, errorMsg)) {
+        LOG_WARNING(sLogger,
+                    ("config file format error", "skip current object")("error msg", errorMsg)("filepath", filepath));
+        return false;
+    }
+    return true;
+}
+
+bool ParseConfigDetail(const string& content, const string& extension, Json::Value& detail, string& errorMsg) {
+    if (extension == ".json") {
+        return ParseJsonTable(content, detail, errorMsg);
+    } else if (extension == ".yaml" || extension == ".yml") {
+        YAML::Node yamlRoot;
+        if (!ParseYamlTable(content, yamlRoot, errorMsg)) {
+            return false;
+        }
+        detail = ConvertYamlToJson(yamlRoot);
+        return true;
+    }
+    return false;
+}
+
+bool IsConfigEnabled(const string& name, const Json::Value& detail) {
+    const char* key = "enable";
+    const Json::Value* itr = detail.find(key, key + strlen(key));
+    if (itr != nullptr) {
+        if (!itr->isBool()) {
+            LOG_WARNING(sLogger,
+                        ("problem encountered in config parsing",
+                         "param enable is not of type bool")("action", "ignore the config")("config", name));
+            return false;
+        }
+        return itr->asBool();
+    }
+    return true;
+}
+
+ConfigType GetConfigType(const Json::Value& detail) {
+    return detail.isMember("task") ? ConfigType::Task : ConfigType::Pipeline;
+}
+
+} // namespace logtail
diff --git a/core/config/ConfigUtil.h b/core/config/ConfigUtil.h
new file mode 100644
index 0000000000..c5fd7d0ae2
--- /dev/null
+++ b/core/config/ConfigUtil.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <json/json.h>
+
+#include <filesystem>
+#include <string>
+
+namespace logtail {
+
+enum class ConfigType { Pipeline, Task };
+
+bool LoadConfigDetailFromFile(const std::filesystem::path& filepath, Json::Value& detail);
+bool ParseConfigDetail(const std::string& content,
+                       const std::string& extenstion,
+                       Json::Value& detail,
+                       std::string& errorMsg);
+bool IsConfigEnabled(const std::string& name, const Json::Value& detail);
+ConfigType GetConfigType(const Json::Value& detail);
+
+} // namespace logtail
diff --git a/core/config/InstanceConfig.cpp b/core/config/InstanceConfig.cpp
deleted file mode 100644
index 94c89ca684..0000000000
--- a/core/config/InstanceConfig.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2023 iLogtail Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "config/InstanceConfig.h"
-
-#include <string>
-
-#include "app_config/AppConfig.h"
-#include "common/FileSystemUtil.h"
-#include "common/Flags.h"
-#include "common/JsonUtil.h"
-#include "common/ParamExtractor.h"
-#include "common/YamlUtil.h"
-#include "pipeline/plugin/PluginRegistry.h"
-
-
-using namespace std;
-
-namespace logtail {} // namespace logtail
diff --git a/core/config/PipelineConfig.cpp b/core/config/PipelineConfig.cpp
index e80d3fae8b..33cf8ce6df 100644
--- a/core/config/PipelineConfig.cpp
+++ b/core/config/PipelineConfig.cpp
@@ -17,11 +17,8 @@
 #include <string>
 
 #include "app_config/AppConfig.h"
-#include "common/FileSystemUtil.h"
 #include "common/Flags.h"
-#include "common/JsonUtil.h"
 #include "common/ParamExtractor.h"
-#include "common/YamlUtil.h"
 #include "pipeline/plugin/PluginRegistry.h"
 
 DEFINE_FLAG_BOOL(enable_env_ref_in_config, "enable environment variable reference replacement in configuration", false);
@@ -667,61 +664,4 @@ bool PipelineConfig::ReplaceEnvVar() {
     return res;
 }
 
-bool LoadConfigDetailFromFile(const filesystem::path& filepath, Json::Value& detail) {
-    const string& ext = filepath.extension().string();
-    const string& configName = filepath.stem().string();
-    if (configName == "region_config") {
-        return false;
-    }
-    if (ext != ".yaml" && ext != ".yml" && ext != ".json") {
-        LOG_WARNING(sLogger, ("unsupported config file format", "skip current object")("filepath", filepath));
-        return false;
-    }
-    string content;
-    if (!ReadFile(filepath.string(), content)) {
-        LOG_WARNING(sLogger, ("failed to open config file", "skip current object")("filepath", filepath));
-        return false;
-    }
-    if (content.empty()) {
-        LOG_WARNING(sLogger, ("empty config file", "skip current object")("filepath", filepath));
-        return false;
-    }
-    string errorMsg;
-    if (!ParseConfigDetail(content, ext, detail, errorMsg)) {
-        LOG_WARNING(sLogger,
-                    ("config file format error", "skip current object")("error msg", errorMsg)("filepath", filepath));
-        return false;
-    }
-    return true;
-}
-
-bool ParseConfigDetail(const string& content, const string& extension, Json::Value& detail, string& errorMsg) {
-    if (extension == ".json") {
-        return ParseJsonTable(content, detail, errorMsg);
-    } else if (extension == ".yaml" || extension == ".yml") {
-        YAML::Node yamlRoot;
-        if (!ParseYamlTable(content, yamlRoot, errorMsg)) {
-            return false;
-        }
-        detail = ConvertYamlToJson(yamlRoot);
-        return true;
-    }
-    return false;
-}
-
-bool IsConfigEnabled(const string& name, const Json::Value& detail) {
-    const char* key = "enable";
-    const Json::Value* itr = detail.find(key, key + strlen(key));
-    if (itr != nullptr) {
-        if (!itr->isBool()) {
-            LOG_WARNING(sLogger,
-                        ("problem encountered in config parsing",
-                         "param enable is not of type bool")("action", "ignore the config")("config", name));
-            return false;
-        }
-        return itr->asBool();
-    }
-    return true;
-}
-
 } // namespace logtail
diff --git a/core/config/PipelineConfig.h b/core/config/PipelineConfig.h
index 20ff581a33..7d845126e9 100644
--- a/core/config/PipelineConfig.h
+++ b/core/config/PipelineConfig.h
@@ -17,7 +17,6 @@
 #pragma once
 
 #include <json/json.h>
-#include <re2/re2.h>
 
 #include <cstdint>
 #include <filesystem>
@@ -84,11 +83,4 @@ inline bool operator!=(const PipelineConfig& lhs, const PipelineConfig& rhs) {
     return !(lhs == rhs);
 }
 
-bool LoadConfigDetailFromFile(const std::filesystem::path& filepath, Json::Value& detail);
-bool ParseConfigDetail(const std::string& content,
-                       const std::string& extenstion,
-                       Json::Value& detail,
-                       std::string& errorMsg);
-bool IsConfigEnabled(const std::string& name, const Json::Value& detail);
-
 } // namespace logtail
diff --git a/core/config/TaskConfig.cpp b/core/config/TaskConfig.cpp
new file mode 100644
index 0000000000..6fec9ebd19
--- /dev/null
+++ b/core/config/TaskConfig.cpp
@@ -0,0 +1,52 @@
+// Copyright 2024 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "config/TaskConfig.h"
+
+#include "common/ParamExtractor.h"
+#include "task_pipeline/TaskRegistry.h"
+
+using namespace std;
+
+namespace logtail {
+
+bool TaskConfig::Parse() {
+    string errorMsg;
+    AlarmManager& alarm = *AlarmManager::GetInstance();
+
+    if (!GetOptionalUIntParam(*mDetail, "createTime", mCreateTime, errorMsg)) {
+        TASK_PARAM_WARNING_DEFAULT(sLogger, alarm, errorMsg, mCreateTime, noModule, mName);
+    }
+
+    auto& detail = (*mDetail)["task"];
+    if (!detail.isObject()) {
+        TASK_PARAM_ERROR_RETURN(sLogger, alarm, "task module is not of type object", noModule, mName);
+    }
+
+    string key = "Type";
+    auto itr = detail.find(key.c_str(), key.c_str() + key.size());
+    if (itr == nullptr) {
+        TASK_PARAM_ERROR_RETURN(sLogger, alarm, "param task.Type is missing", noModule, mName);
+    }
+    if (!itr->isString()) {
+        TASK_PARAM_ERROR_RETURN(sLogger, alarm, "param task.Type is not of type string", noModule, mName);
+    }
+    string pluginType = itr->asString();
+    if (!TaskRegistry::GetInstance()->IsValidPlugin(pluginType)) {
+        TASK_PARAM_ERROR_RETURN(sLogger, alarm, "unsupported task plugin", pluginType, mName);
+    }
+    return true;
+}
+
+} // namespace logtail
diff --git a/core/config/TaskConfig.h b/core/config/TaskConfig.h
new file mode 100644
index 0000000000..5fafc176ef
--- /dev/null
+++ b/core/config/TaskConfig.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <json/json.h>
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+namespace logtail {
+
+struct TaskConfig {
+    std::string mName;
+    std::unique_ptr<Json::Value> mDetail;
+    uint32_t mCreateTime = 0;
+
+    TaskConfig(const std::string& name, std::unique_ptr<Json::Value>&& detail)
+        : mName(name), mDetail(std::move(detail)) {}
+
+    bool Parse();
+};
+
+inline bool operator==(const TaskConfig& lhs, const TaskConfig& rhs) {
+    return (lhs.mName == rhs.mName) && (*lhs.mDetail == *rhs.mDetail);
+}
+
+inline bool operator!=(const TaskConfig& lhs, const TaskConfig& rhs) {
+    return !(lhs == rhs);
+}
+
+} // namespace logtail
diff --git a/core/config/common_provider/CommonConfigProvider.cpp b/core/config/common_provider/CommonConfigProvider.cpp
index 1a5edcfd31..e86808d7a2 100644
--- a/core/config/common_provider/CommonConfigProvider.cpp
+++ b/core/config/common_provider/CommonConfigProvider.cpp
@@ -22,16 +22,17 @@
 
 #include "app_config/AppConfig.h"
 #include "application/Application.h"
-#include "constants/Constants.h"
 #include "common/LogtailCommonFlags.h"
 #include "common/StringTools.h"
 #include "common/UUIDUtil.h"
 #include "common/YamlUtil.h"
 #include "common/version.h"
+#include "config/ConfigUtil.h"
 #include "config/PipelineConfig.h"
 #include "config/feedbacker/ConfigFeedbackReceiver.h"
+#include "constants/Constants.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
+#include "monitor/Monitor.h"
 #include "sdk/Common.h"
 #include "sdk/CurlImp.h"
 #include "sdk/Exception.h"
@@ -120,6 +121,9 @@ void CommonConfigProvider::Stop() {
         mIsThreadRunning = false;
     }
     mStopCV.notify_one();
+    if (!mThreadRes.valid()) {
+        return;
+    }
     future_status s = mThreadRes.wait_for(chrono::seconds(1));
     if (s == future_status::ready) {
         LOG_INFO(sLogger, (sName, "stopped successfully"));
@@ -130,9 +134,9 @@ void CommonConfigProvider::Stop() {
 
 void CommonConfigProvider::LoadConfigFile() {
     error_code ec;
-    lock_guard<mutex> pipelineInfomaplock(mPipelineInfoMapMux);
-    lock_guard<mutex> lockPipeline(mPipelineMux);
-    for (auto const& entry : filesystem::directory_iterator(mPipelineSourceDir, ec)) {
+    lock_guard<mutex> pipelineInfomaplock(mContinuousPipelineInfoMapMux);
+    lock_guard<mutex> lockPipeline(mContinuousPipelineMux);
+    for (auto const& entry : filesystem::directory_iterator(mContinuousPipelineConfigDir, ec)) {
         Json::Value detail;
         if (LoadConfigDetailFromFile(entry, detail)) {
             ConfigInfo info;
@@ -143,8 +147,8 @@ void CommonConfigProvider::LoadConfigFile() {
             }
             info.status = ConfigFeedbackStatus::APPLYING;
             info.detail = detail.toStyledString();
-            mPipelineConfigInfoMap[info.name] = info;
-            ConfigFeedbackReceiver::GetInstance().RegisterPipelineConfig(info.name, this);
+            mContinuousPipelineConfigInfoMap[info.name] = info;
+            ConfigFeedbackReceiver::GetInstance().RegisterContinuousPipelineConfig(info.name, this);
         }
     }
     lock_guard<mutex> instanceInfomaplock(mInstanceInfoMapMux);
@@ -206,11 +210,11 @@ string CommonConfigProvider::GetInstanceId() {
 }
 
 void CommonConfigProvider::FillAttributes(configserver::proto::v2::AgentAttributes& attributes) {
-    attributes.set_hostname(LogFileProfiler::mHostname);
-    attributes.set_ip(LogFileProfiler::mIpAddr);
+    attributes.set_hostname(LoongCollectorMonitor::mHostname);
+    attributes.set_ip(LoongCollectorMonitor::mIpAddr);
     attributes.set_version(ILOGTAIL_VERSION);
     google::protobuf::Map<string, string>* extras = attributes.mutable_extras();
-    extras->insert({"osDetail", LogFileProfiler::mOsDetail});
+    extras->insert({"osDetail", LoongCollectorMonitor::mOsDetail});
 }
 
 void addConfigInfoToRequest(const std::pair<const string, logtail::ConfigInfo>& configInfo,
@@ -265,7 +269,7 @@ configserver::proto::v2::HeartbeatRequest CommonConfigProvider::PrepareHeartbeat
     heartbeatReq.set_request_id(requestID);
     heartbeatReq.set_sequence_num(mSequenceNum);
     heartbeatReq.set_capabilities(configserver::proto::v2::AcceptsInstanceConfig
-                                  | configserver::proto::v2::AcceptsPipelineConfig);
+                                  | configserver::proto::v2::AcceptsContinuousPipelineConfig);
     heartbeatReq.set_instance_id(GetInstanceId());
     heartbeatReq.set_agent_type("LoongCollector");
     FillAttributes(*heartbeatReq.mutable_attributes());
@@ -278,37 +282,18 @@ configserver::proto::v2::HeartbeatRequest CommonConfigProvider::PrepareHeartbeat
     heartbeatReq.set_running_status("running");
     heartbeatReq.set_startup_time(mStartTime);
 
-    lock_guard<mutex> pipelineinfomaplock(mPipelineInfoMapMux);
-    for (const auto& configInfo : mPipelineConfigInfoMap) {
-        addConfigInfoToRequest(configInfo, heartbeatReq.add_pipeline_configs());
+    lock_guard<mutex> pipelineinfomaplock(mContinuousPipelineInfoMapMux);
+    for (const auto& configInfo : mContinuousPipelineConfigInfoMap) {
+        addConfigInfoToRequest(configInfo, heartbeatReq.add_continuous_pipeline_configs());
     }
     lock_guard<mutex> instanceinfomaplock(mInstanceInfoMapMux);
     for (const auto& configInfo : mInstanceConfigInfoMap) {
         addConfigInfoToRequest(configInfo, heartbeatReq.add_instance_configs());
     }
 
-    for (auto& configInfo : mCommandInfoMap) {
-        configserver::proto::v2::CommandInfo* command = heartbeatReq.add_custom_commands();
-        command->set_type(configInfo.second.type);
-        command->set_name(configInfo.second.name);
-        command->set_message(configInfo.second.message);
-        switch (configInfo.second.status) {
-            case ConfigFeedbackStatus::UNSET:
-                command->set_status(configserver::proto::v2::ConfigStatus::UNSET);
-                break;
-            case ConfigFeedbackStatus::APPLYING:
-                command->set_status(configserver::proto::v2::ConfigStatus::APPLYING);
-                break;
-            case ConfigFeedbackStatus::APPLIED:
-                command->set_status(configserver::proto::v2::ConfigStatus::APPLIED);
-                break;
-            case ConfigFeedbackStatus::FAILED:
-                command->set_status(configserver::proto::v2::ConfigStatus::FAILED);
-                break;
-            case ConfigFeedbackStatus::DELETED:
-                break;
-        }
-        command->set_message(configInfo.second.message);
+    lock_guard<mutex> onetimeinfomaplock(mOnetimePipelineInfoMapMux);
+    for (const auto& configInfo : mOnetimePipelineConfigInfoMap) {
+        addConfigInfoToRequest(configInfo, heartbeatReq.add_onetime_pipeline_configs());
     }
     return heartbeatReq;
 }
@@ -369,10 +354,10 @@ bool CommonConfigProvider::SendHttpRequest(const string& operation,
 bool CommonConfigProvider::FetchPipelineConfig(
     configserver::proto::v2::HeartbeatResponse& heartbeatResponse,
     ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail>& result) {
-    if (heartbeatResponse.flags() & ::configserver::proto::v2::FetchPipelineConfigDetail) {
+    if (heartbeatResponse.flags() & ::configserver::proto::v2::FetchContinuousPipelineConfigDetail) {
         return FetchPipelineConfigFromServer(heartbeatResponse, result);
     } else {
-        result.Swap(heartbeatResponse.mutable_pipeline_config_updates());
+        result.Swap(heartbeatResponse.mutable_continuous_pipeline_config_updates());
         return true;
     }
 }
@@ -380,7 +365,7 @@ bool CommonConfigProvider::FetchPipelineConfig(
 bool CommonConfigProvider::FetchInstanceConfig(
     configserver::proto::v2::HeartbeatResponse& heartbeatResponse,
     ::google::protobuf::RepeatedPtrField< ::configserver::proto::v2::ConfigDetail>& result) {
-    if (heartbeatResponse.flags() & ::configserver::proto::v2::FetchPipelineConfigDetail) {
+    if (heartbeatResponse.flags() & ::configserver::proto::v2::FetchContinuousPipelineConfigDetail) {
         return FetchInstanceConfigFromServer(heartbeatResponse, result);
     } else {
         result.Swap(heartbeatResponse.mutable_instance_config_updates());
@@ -421,7 +406,7 @@ bool CommonConfigProvider::DumpConfigFile(const configserver::proto::v2::ConfigD
 void CommonConfigProvider::UpdateRemotePipelineConfig(
     const google::protobuf::RepeatedPtrField<configserver::proto::v2::ConfigDetail>& configs) {
     error_code ec;
-    const std::filesystem::path& sourceDir = mPipelineSourceDir;
+    const std::filesystem::path& sourceDir = mContinuousPipelineConfigDir;
     filesystem::create_directories(sourceDir, ec);
     if (ec) {
         StopUsingConfigServer();
@@ -431,27 +416,27 @@ void CommonConfigProvider::UpdateRemotePipelineConfig(
         return;
     }
 
-    lock_guard<mutex> lock(mPipelineMux);
-    lock_guard<mutex> infomaplock(mPipelineInfoMapMux);
+    lock_guard<mutex> lock(mContinuousPipelineMux);
+    lock_guard<mutex> infomaplock(mContinuousPipelineInfoMapMux);
     for (const auto& config : configs) {
         filesystem::path filePath = sourceDir / (config.name() + ".json");
         if (config.version() == -1) {
-            mPipelineConfigInfoMap.erase(config.name());
+            mContinuousPipelineConfigInfoMap.erase(config.name());
             filesystem::remove(filePath, ec);
-            ConfigFeedbackReceiver::GetInstance().UnregisterPipelineConfig(config.name());
+            ConfigFeedbackReceiver::GetInstance().UnregisterContinuousPipelineConfig(config.name());
         } else {
             if (!DumpConfigFile(config, sourceDir)) {
-                mPipelineConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(),
-                                                                   .version = config.version(),
-                                                                   .status = ConfigFeedbackStatus::FAILED,
-                                                                   .detail = config.detail()};
+                mContinuousPipelineConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(),
+                                                                             .version = config.version(),
+                                                                             .status = ConfigFeedbackStatus::FAILED,
+                                                                             .detail = config.detail()};
                 continue;
             }
-            mPipelineConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(),
-                                                               .version = config.version(),
-                                                               .status = ConfigFeedbackStatus::APPLYING,
-                                                               .detail = config.detail()};
-            ConfigFeedbackReceiver::GetInstance().RegisterPipelineConfig(config.name(), this);
+            mContinuousPipelineConfigInfoMap[config.name()] = ConfigInfo{.name = config.name(),
+                                                                         .version = config.version(),
+                                                                         .status = ConfigFeedbackStatus::APPLYING,
+                                                                         .detail = config.detail()};
+            ConfigFeedbackReceiver::GetInstance().RegisterContinuousPipelineConfig(config.name(), this);
         }
     }
 }
@@ -535,8 +520,8 @@ bool CommonConfigProvider::FetchPipelineConfigFromServer(
     string requestID = CalculateRandomUUID();
     fetchConfigRequest.set_request_id(requestID);
     fetchConfigRequest.set_instance_id(GetInstanceId());
-    for (const auto& config : heartbeatResponse.pipeline_config_updates()) {
-        auto reqConfig = fetchConfigRequest.add_pipeline_configs();
+    for (const auto& config : heartbeatResponse.continuous_pipeline_config_updates()) {
+        auto reqConfig = fetchConfigRequest.add_continuous_pipeline_configs();
         reqConfig->set_name(config.name());
         reqConfig->set_version(config.version());
     }
@@ -549,20 +534,22 @@ bool CommonConfigProvider::FetchPipelineConfigFromServer(
             operation, reqBody, "FetchPipelineConfig", fetchConfigRequest.request_id(), fetchConfigResponse)) {
         configserver::proto::v2::FetchConfigResponse fetchConfigResponsePb;
         fetchConfigResponsePb.ParseFromString(fetchConfigResponse);
-        res.Swap(fetchConfigResponsePb.mutable_pipeline_config_updates());
+        res.Swap(fetchConfigResponsePb.mutable_continuous_pipeline_config_updates());
         return true;
     }
     return false;
 }
 
-void CommonConfigProvider::FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) {
-    lock_guard<mutex> infomaplock(mPipelineInfoMapMux);
-    auto info = mPipelineConfigInfoMap.find(name);
-    if (info != mPipelineConfigInfoMap.end()) {
+void CommonConfigProvider::FeedbackContinuousPipelineConfigStatus(const std::string& name,
+                                                                  ConfigFeedbackStatus status) {
+    lock_guard<mutex> infomaplock(mContinuousPipelineInfoMapMux);
+    auto info = mContinuousPipelineConfigInfoMap.find(name);
+    if (info != mContinuousPipelineConfigInfoMap.end()) {
         info->second.status = status;
     }
     LOG_DEBUG(sLogger,
-              ("CommonConfigProvider", "FeedbackPipelineConfigStatus")("name", name)("status", ToStringView(status)));
+              ("CommonConfigProvider", "FeedbackContinuousPipelineConfigStatus")("name", name)("status",
+                                                                                               ToStringView(status)));
 }
 void CommonConfigProvider::FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status) {
     lock_guard<mutex> infomaplock(mInstanceInfoMapMux);
@@ -573,17 +560,17 @@ void CommonConfigProvider::FeedbackInstanceConfigStatus(const std::string& name,
     LOG_DEBUG(sLogger,
               ("CommonConfigProvider", "FeedbackInstanceConfigStatus")("name", name)("status", ToStringView(status)));
 }
-void CommonConfigProvider::FeedbackCommandConfigStatus(const std::string& type,
-                                                       const std::string& name,
-                                                       ConfigFeedbackStatus status) {
-    lock_guard<mutex> infomaplock(mCommondInfoMapMux);
-    auto info = mCommandInfoMap.find(GenerateCommandFeedBackKey(type, name));
-    if (info != mCommandInfoMap.end()) {
+void CommonConfigProvider::FeedbackOnetimePipelineConfigStatus(const std::string& type,
+                                                               const std::string& name,
+                                                               ConfigFeedbackStatus status) {
+    lock_guard<mutex> infomaplock(mOnetimePipelineInfoMapMux);
+    auto info = mOnetimePipelineConfigInfoMap.find(GenerateOnetimePipelineConfigFeedBackKey(type, name));
+    if (info != mOnetimePipelineConfigInfoMap.end()) {
         info->second.status = status;
     }
     LOG_DEBUG(sLogger,
               ("CommonConfigProvider",
-               "FeedbackCommandConfigStatus")("type", type)("name", name)("status", ToStringView(status)));
+               "FeedbackOnetimePipelineConfigStatus")("type", type)("name", name)("status", ToStringView(status)));
 }
 
 } // namespace logtail
diff --git a/core/config/common_provider/CommonConfigProvider.h b/core/config/common_provider/CommonConfigProvider.h
index d79d22a95e..f7a4e918a6 100644
--- a/core/config/common_provider/CommonConfigProvider.h
+++ b/core/config/common_provider/CommonConfigProvider.h
@@ -38,13 +38,6 @@ struct ConfigInfo {
     std::string detail;
 };
 
-struct CommandInfo {
-    std::string type;
-    std::string name;
-    ConfigFeedbackStatus status;
-    std::string message;
-};
-
 class CommonConfigProvider : public ConfigProvider, ConfigFeedbackable {
 public:
     std::string sName;
@@ -61,10 +54,11 @@ class CommonConfigProvider : public ConfigProvider, ConfigFeedbackable {
     void Init(const std::string& dir) override;
     void Stop() override;
 
-    void FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) override;
+    void FeedbackContinuousPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) override;
     void FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status) override;
-    void
-    FeedbackCommandConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status) override;
+    void FeedbackOnetimePipelineConfigStatus(const std::string& type,
+                                             const std::string& name,
+                                             ConfigFeedbackStatus status) override;
     CommonConfigProvider() = default;
     ~CommonConfigProvider() = default;
 
@@ -105,12 +99,12 @@ class CommonConfigProvider : public ConfigProvider, ConfigFeedbackable {
     bool mConfigServerAvailable = false;
 
     mutable std::mutex mInstanceInfoMapMux;
-    mutable std::mutex mPipelineInfoMapMux;
-    mutable std::mutex mCommondInfoMapMux;
+    mutable std::mutex mContinuousPipelineInfoMapMux;
+    mutable std::mutex mOnetimePipelineInfoMapMux;
 
-    std::unordered_map<std::string, ConfigInfo> mPipelineConfigInfoMap;
+    std::unordered_map<std::string, ConfigInfo> mContinuousPipelineConfigInfoMap;
     std::unordered_map<std::string, ConfigInfo> mInstanceConfigInfoMap;
-    std::unordered_map<std::string, CommandInfo> mCommandInfoMap;
+    std::unordered_map<std::string, ConfigInfo> mOnetimePipelineConfigInfoMap;
 
 private:
     static std::string configVersion;
diff --git a/core/config/common_provider/LegacyCommonConfigProvider.cpp b/core/config/common_provider/LegacyCommonConfigProvider.cpp
index f5d000711f..334b4dc85b 100644
--- a/core/config/common_provider/LegacyCommonConfigProvider.cpp
+++ b/core/config/common_provider/LegacyCommonConfigProvider.cpp
@@ -26,7 +26,7 @@
 #include "common/StringTools.h"
 #include "common/version.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
+#include "monitor/Monitor.h"
 #include "sdk/Common.h"
 #include "sdk/CurlImp.h"
 #include "sdk/Exception.h"
@@ -87,6 +87,9 @@ void LegacyCommonConfigProvider::Stop() {
         mIsThreadRunning = false;
     }
     mStopCV.notify_one();
+    if (!mThreadRes.valid()) {
+        return;
+    }
     future_status s = mThreadRes.wait_for(chrono::seconds(1));
     if (s == future_status::ready) {
         LOG_INFO(sLogger, ("legacy common config provider", "stopped successfully"));
@@ -112,7 +115,8 @@ void LegacyCommonConfigProvider::CheckUpdateThread() {
     }
 }
 
-LegacyCommonConfigProvider::ConfigServerAddress LegacyCommonConfigProvider::GetOneConfigServerAddress(bool changeConfigServer) {
+LegacyCommonConfigProvider::ConfigServerAddress
+LegacyCommonConfigProvider::GetOneConfigServerAddress(bool changeConfigServer) {
     if (0 == mConfigServerAddresses.size()) {
         return ConfigServerAddress("", -1); // No address available
     }
@@ -158,7 +162,7 @@ LegacyCommonConfigProvider::SendHeartbeat(const ConfigServerAddress& configServe
     heartBeatReq.set_agent_id(Application::GetInstance()->GetInstanceId());
     heartBeatReq.set_agent_type("iLogtail");
     attributes.set_version(ILOGTAIL_VERSION);
-    attributes.set_ip(LogFileProfiler::mIpAddr);
+    attributes.set_ip(LoongCollectorMonitor::mIpAddr);
     heartBeatReq.mutable_attributes()->MergeFrom(attributes);
     heartBeatReq.mutable_tags()->MergeFrom({GetConfigServerTags().begin(), GetConfigServerTags().end()});
     heartBeatReq.set_running_status("");
@@ -285,19 +289,21 @@ void LegacyCommonConfigProvider::UpdateRemoteConfig(
     const google::protobuf::RepeatedPtrField<configserver::proto::ConfigCheckResult>& checkResults,
     const google::protobuf::RepeatedPtrField<configserver::proto::ConfigDetail>& configDetails) {
     error_code ec;
-    filesystem::create_directories(mPipelineSourceDir, ec);
+    filesystem::create_directories(mContinuousPipelineConfigDir, ec);
     if (ec) {
         StopUsingConfigServer();
-        LOG_ERROR(sLogger,
-                  ("failed to create dir for legacy common configs", "stop receiving config from legacy common config server")(
-                      "dir", mPipelineSourceDir.string())("error code", ec.value())("error msg", ec.message()));
+        LOG_ERROR(
+            sLogger,
+            ("failed to create dir for legacy common configs",
+             "stop receiving config from legacy common config server")("dir", mContinuousPipelineConfigDir.string())(
+                "error code", ec.value())("error msg", ec.message()));
         return;
     }
 
-    lock_guard<mutex> lock(mPipelineMux);
+    lock_guard<mutex> lock(mContinuousPipelineMux);
     for (const auto& checkResult : checkResults) {
-        filesystem::path filePath = mPipelineSourceDir / (checkResult.name() + ".yaml");
-        filesystem::path tmpFilePath = mPipelineSourceDir / (checkResult.name() + ".yaml.new");
+        filesystem::path filePath = mContinuousPipelineConfigDir / (checkResult.name() + ".yaml");
+        filesystem::path tmpFilePath = mContinuousPipelineConfigDir / (checkResult.name() + ".yaml.new");
         switch (checkResult.check_status()) {
             case configserver::proto::DELETED:
                 mConfigNameVersionMap.erase(checkResult.name());
diff --git a/core/config/feedbacker/ConfigFeedbackReceiver.cpp b/core/config/feedbacker/ConfigFeedbackReceiver.cpp
index b9807ef9e4..4bad1dabed 100644
--- a/core/config/feedbacker/ConfigFeedbackReceiver.cpp
+++ b/core/config/feedbacker/ConfigFeedbackReceiver.cpp
@@ -26,9 +26,10 @@ ConfigFeedbackReceiver& ConfigFeedbackReceiver::GetInstance() {
     return instance;
 }
 
-void ConfigFeedbackReceiver::RegisterPipelineConfig(const std::string& name, ConfigFeedbackable* feedbackable) {
+void ConfigFeedbackReceiver::RegisterContinuousPipelineConfig(const std::string& name,
+                                                              ConfigFeedbackable* feedbackable) {
     std::lock_guard<std::mutex> lock(mMutex);
-    mPipelineConfigFeedbackableMap[name] = feedbackable;
+    mContinuousPipelineConfigFeedbackableMap[name] = feedbackable;
 }
 
 void ConfigFeedbackReceiver::RegisterInstanceConfig(const std::string& name, ConfigFeedbackable* feedbackable) {
@@ -36,16 +37,16 @@ void ConfigFeedbackReceiver::RegisterInstanceConfig(const std::string& name, Con
     mInstanceConfigFeedbackableMap[name] = feedbackable;
 }
 
-void ConfigFeedbackReceiver::RegisterCommand(const std::string& type,
-                                             const std::string& name,
-                                             ConfigFeedbackable* feedbackable) {
+void ConfigFeedbackReceiver::RegisterOnetimePipelineConfig(const std::string& type,
+                                                           const std::string& name,
+                                                           ConfigFeedbackable* feedbackable) {
     std::lock_guard<std::mutex> lock(mMutex);
-    mCommandFeedbackableMap[GenerateCommandFeedBackKey(type, name)] = feedbackable;
+    mOnetimePipelineConfigFeedbackableMap[GenerateOnetimePipelineConfigFeedBackKey(type, name)] = feedbackable;
 }
 
-void ConfigFeedbackReceiver::UnregisterPipelineConfig(const std::string& name) {
+void ConfigFeedbackReceiver::UnregisterContinuousPipelineConfig(const std::string& name) {
     std::lock_guard<std::mutex> lock(mMutex);
-    mPipelineConfigFeedbackableMap.erase(name);
+    mContinuousPipelineConfigFeedbackableMap.erase(name);
 }
 
 void ConfigFeedbackReceiver::UnregisterInstanceConfig(const std::string& name) {
@@ -53,16 +54,17 @@ void ConfigFeedbackReceiver::UnregisterInstanceConfig(const std::string& name) {
     mInstanceConfigFeedbackableMap.erase(name);
 }
 
-void ConfigFeedbackReceiver::UnregisterCommand(const std::string& type, const std::string& name) {
+void ConfigFeedbackReceiver::UnregisterOnetimePipelineConfig(const std::string& type, const std::string& name) {
     std::lock_guard<std::mutex> lock(mMutex);
-    mCommandFeedbackableMap.erase(GenerateCommandFeedBackKey(type, name));
+    mOnetimePipelineConfigFeedbackableMap.erase(GenerateOnetimePipelineConfigFeedBackKey(type, name));
 }
 
-void ConfigFeedbackReceiver::FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) {
+void ConfigFeedbackReceiver::FeedbackContinuousPipelineConfigStatus(const std::string& name,
+                                                                    ConfigFeedbackStatus status) {
     std::lock_guard<std::mutex> lock(mMutex);
-    auto iter = mPipelineConfigFeedbackableMap.find(name);
-    if (iter != mPipelineConfigFeedbackableMap.end()) {
-        iter->second->FeedbackPipelineConfigStatus(name, status);
+    auto iter = mContinuousPipelineConfigFeedbackableMap.find(name);
+    if (iter != mContinuousPipelineConfigFeedbackableMap.end()) {
+        iter->second->FeedbackContinuousPipelineConfigStatus(name, status);
     }
 }
 
@@ -74,17 +76,17 @@ void ConfigFeedbackReceiver::FeedbackInstanceConfigStatus(const std::string& nam
     }
 }
 
-void ConfigFeedbackReceiver::FeedbackCommandConfigStatus(const std::string& type,
-                                                         const std::string& name,
-                                                         ConfigFeedbackStatus status) {
+void ConfigFeedbackReceiver::FeedbackOnetimePipelineConfigStatus(const std::string& type,
+                                                                 const std::string& name,
+                                                                 ConfigFeedbackStatus status) {
     std::lock_guard<std::mutex> lock(mMutex);
-    auto iter = mCommandFeedbackableMap.find(GenerateCommandFeedBackKey(type, name));
-    if (iter != mCommandFeedbackableMap.end()) {
-        iter->second->FeedbackCommandConfigStatus(type, name, status);
+    auto iter = mOnetimePipelineConfigFeedbackableMap.find(GenerateOnetimePipelineConfigFeedBackKey(type, name));
+    if (iter != mOnetimePipelineConfigFeedbackableMap.end()) {
+        iter->second->FeedbackOnetimePipelineConfigStatus(type, name, status);
     }
 }
 
-std::string GenerateCommandFeedBackKey(const std::string& type, const std::string& name) {
+std::string GenerateOnetimePipelineConfigFeedBackKey(const std::string& type, const std::string& name) {
     return type + '\1' + name;
 }
 
diff --git a/core/config/feedbacker/ConfigFeedbackReceiver.h b/core/config/feedbacker/ConfigFeedbackReceiver.h
index 86796afbcd..f20bcdb73c 100644
--- a/core/config/feedbacker/ConfigFeedbackReceiver.h
+++ b/core/config/feedbacker/ConfigFeedbackReceiver.h
@@ -23,27 +23,29 @@
 
 namespace logtail {
 
-std::string GenerateCommandFeedBackKey(const std::string& type, const std::string& name);
+std::string GenerateOnetimePipelineConfigFeedBackKey(const std::string& type, const std::string& name);
 
 class ConfigFeedbackReceiver {
 public:
     static ConfigFeedbackReceiver& GetInstance();
-    void RegisterPipelineConfig(const std::string& name, ConfigFeedbackable* feedbackable);
+    void RegisterContinuousPipelineConfig(const std::string& name, ConfigFeedbackable* feedbackable);
     void RegisterInstanceConfig(const std::string& name, ConfigFeedbackable* feedbackable);
-    void RegisterCommand(const std::string& type, const std::string& name, ConfigFeedbackable* feedbackable);
-    void UnregisterPipelineConfig(const std::string& name);
+    void
+    RegisterOnetimePipelineConfig(const std::string& type, const std::string& name, ConfigFeedbackable* feedbackable);
+    void UnregisterContinuousPipelineConfig(const std::string& name);
     void UnregisterInstanceConfig(const std::string& name);
-    void UnregisterCommand(const std::string& type, const std::string& name);
-    void FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status);
+    void UnregisterOnetimePipelineConfig(const std::string& type, const std::string& name);
+    void FeedbackContinuousPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status);
     void FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status);
-    void FeedbackCommandConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status);
+    void
+    FeedbackOnetimePipelineConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status);
 
 private:
     ConfigFeedbackReceiver() {}
     std::mutex mMutex;
-    std::unordered_map<std::string, ConfigFeedbackable*> mPipelineConfigFeedbackableMap;
+    std::unordered_map<std::string, ConfigFeedbackable*> mContinuousPipelineConfigFeedbackableMap;
     std::unordered_map<std::string, ConfigFeedbackable*> mInstanceConfigFeedbackableMap;
-    std::unordered_map<std::string, ConfigFeedbackable*> mCommandFeedbackableMap;
+    std::unordered_map<std::string, ConfigFeedbackable*> mOnetimePipelineConfigFeedbackableMap;
 };
 
 } // namespace logtail
diff --git a/core/config/feedbacker/ConfigFeedbackable.h b/core/config/feedbacker/ConfigFeedbackable.h
index f027e2e758..29126c9d33 100644
--- a/core/config/feedbacker/ConfigFeedbackable.h
+++ b/core/config/feedbacker/ConfigFeedbackable.h
@@ -28,10 +28,10 @@ std::string_view ToStringView(ConfigFeedbackStatus status);
 class ConfigFeedbackable {
 public:
     virtual ~ConfigFeedbackable() = default; // LCOV_EXCL_LINE
-    virtual void FeedbackPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) = 0;
+    virtual void FeedbackContinuousPipelineConfigStatus(const std::string& name, ConfigFeedbackStatus status) = 0;
     virtual void FeedbackInstanceConfigStatus(const std::string& name, ConfigFeedbackStatus status) = 0;
     virtual void
-    FeedbackCommandConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status)
+    FeedbackOnetimePipelineConfigStatus(const std::string& type, const std::string& name, ConfigFeedbackStatus status)
         = 0;
 };
 
diff --git a/core/config/provider/ConfigProvider.cpp b/core/config/provider/ConfigProvider.cpp
index 402c009ae2..207f52a39a 100644
--- a/core/config/provider/ConfigProvider.cpp
+++ b/core/config/provider/ConfigProvider.cpp
@@ -14,9 +14,9 @@
 
 #include "config/provider/ConfigProvider.h"
 
-#include "InstanceConfigWatcher.h"
 #include "app_config/AppConfig.h"
-#include "config/watcher/ConfigWatcher.h"
+#include "config/watcher/InstanceConfigWatcher.h"
+#include "config/watcher/PipelineConfigWatcher.h"
 
 using namespace std;
 
@@ -24,17 +24,17 @@ namespace logtail {
 
 void ConfigProvider::Init(const string& dir) {
     // default path: /etc/ilogtail/config/${dir}
-    mPipelineSourceDir.assign(AppConfig::GetInstance()->GetLoongcollectorConfDir());
-    mPipelineSourceDir /= GetPipelineConfigDir();
-    mPipelineSourceDir /= dir;
+    mContinuousPipelineConfigDir.assign(AppConfig::GetInstance()->GetLoongcollectorConfDir());
+    mContinuousPipelineConfigDir /= GetContinuousPipelineConfigDir();
+    mContinuousPipelineConfigDir /= dir;
 
     mInstanceSourceDir.assign(AppConfig::GetInstance()->GetLoongcollectorConfDir());
     mInstanceSourceDir /= "instance_config";
     mInstanceSourceDir /= dir;
 
     error_code ec;
-    filesystem::create_directories(mPipelineSourceDir, ec);
-    ConfigWatcher::GetInstance()->AddSource(mPipelineSourceDir, &mPipelineMux);
+    filesystem::create_directories(mContinuousPipelineConfigDir, ec);
+    PipelineConfigWatcher::GetInstance()->AddSource(mContinuousPipelineConfigDir, &mContinuousPipelineMux);
 
     ec.clear();
     filesystem::create_directories(mInstanceSourceDir, ec);
diff --git a/core/config/provider/ConfigProvider.h b/core/config/provider/ConfigProvider.h
index ed58ac3c11..b41f663a8a 100644
--- a/core/config/provider/ConfigProvider.h
+++ b/core/config/provider/ConfigProvider.h
@@ -34,9 +34,9 @@ class ConfigProvider {
     ConfigProvider() = default;
     virtual ~ConfigProvider() = default;
 
-    std::filesystem::path mPipelineSourceDir;
+    std::filesystem::path mContinuousPipelineConfigDir;
     std::filesystem::path mInstanceSourceDir;
-    mutable std::mutex mPipelineMux;
+    mutable std::mutex mContinuousPipelineMux;
     mutable std::mutex mInstanceMux;
 };
 
diff --git a/core/config/watcher/ConfigWatcher.cpp b/core/config/watcher/ConfigWatcher.cpp
index 2c430a60ed..fae78869b0 100644
--- a/core/config/watcher/ConfigWatcher.cpp
+++ b/core/config/watcher/ConfigWatcher.cpp
@@ -14,188 +14,10 @@
 
 #include "config/watcher/ConfigWatcher.h"
 
-#include <memory>
-#include <unordered_set>
-
-#include "PipelineConfig.h"
-#include "logger/Logger.h"
-#include "pipeline/PipelineManager.h"
-
 using namespace std;
 
 namespace logtail {
 
-bool ReadFile(const string& filepath, string& content);
-
-ConfigWatcher::ConfigWatcher() : mPipelineManager(PipelineManager::GetInstance()) {
-}
-
-PipelineConfigDiff ConfigWatcher::CheckConfigDiff() {
-    PipelineConfigDiff diff;
-    unordered_set<string> configSet;
-    for (const auto& dir : mSourceDir) {
-        error_code ec;
-        filesystem::file_status s = filesystem::status(dir, ec);
-        if (ec) {
-            LOG_WARNING(sLogger,
-                        ("failed to get config dir path info", "skip current object")("dir path", dir.string())(
-                            "error code", ec.value())("error msg", ec.message()));
-            continue;
-        }
-        if (!filesystem::exists(s)) {
-            LOG_WARNING(sLogger, ("config dir path not existed", "skip current object")("dir path", dir.string()));
-            continue;
-        }
-        if (!filesystem::is_directory(s)) {
-            LOG_WARNING(sLogger,
-                        ("config dir path is not a directory", "skip current object")("dir path", dir.string()));
-            continue;
-        }
-        for (auto const& entry : filesystem::directory_iterator(dir, ec)) {
-            // lock the dir if it is provided by config provider
-            unique_lock<mutex> lock;
-            auto itr = mDirMutexMap.find(dir.string());
-            if (itr != mDirMutexMap.end()) {
-                lock = unique_lock<mutex>(*itr->second, defer_lock);
-                lock.lock();
-            }
-
-            const filesystem::path& path = entry.path();
-            const string& configName = path.stem().string();
-            const string& filepath = path.string();
-            if (!filesystem::is_regular_file(entry.status(ec))) {
-                LOG_DEBUG(sLogger, ("config file is not a regular file", "skip current object")("filepath", filepath));
-                continue;
-            }
-            if (configSet.find(configName) != configSet.end()) {
-                LOG_WARNING(
-                    sLogger,
-                    ("more than 1 config with the same name is found", "skip current config")("filepath", filepath));
-                continue;
-            }
-            configSet.insert(configName);
-
-            auto iter = mFileInfoMap.find(filepath);
-            uintmax_t size = filesystem::file_size(path, ec);
-            filesystem::file_time_type mTime = filesystem::last_write_time(path, ec);
-            if (iter == mFileInfoMap.end()) {
-                mFileInfoMap[filepath] = make_pair(size, mTime);
-                unique_ptr<Json::Value> detail = make_unique<Json::Value>(new Json::Value());
-                if (!LoadConfigDetailFromFile(path, *detail)) {
-                    continue;
-                }
-                if (!IsConfigEnabled(configName, *detail)) {
-                    LOG_INFO(sLogger, ("new config found and disabled", "skip current object")("config", configName));
-                    continue;
-                }
-                PipelineConfig config(configName, std::move(detail));
-                if (!config.Parse()) {
-                    LOG_ERROR(sLogger, ("new config found but invalid", "skip current object")("config", configName));
-                    AlarmManager::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM,
-                                                           "new config found but invalid: skip current object, config: "
-                                                               + configName,
-                                                           config.mProject,
-                                                           config.mLogstore,
-                                                           config.mRegion);
-                    continue;
-                }
-                diff.mAdded.push_back(std::move(config));
-                LOG_INFO(
-                    sLogger,
-                    ("new config found and passed topology check", "prepare to build pipeline")("config", configName));
-            } else if (iter->second.first != size || iter->second.second != mTime) {
-                // for config currently running, we leave it untouched if new config is invalid
-                mFileInfoMap[filepath] = make_pair(size, mTime);
-                unique_ptr<Json::Value> detail = make_unique<Json::Value>(new Json::Value());
-                if (!LoadConfigDetailFromFile(path, *detail)) {
-                    continue;
-                }
-                if (!IsConfigEnabled(configName, *detail)) {
-                    if (mPipelineManager->FindConfigByName(configName)) {
-                        diff.mRemoved.push_back(configName);
-                        LOG_INFO(sLogger,
-                                 ("existing valid config modified and disabled",
-                                  "prepare to stop current running pipeline")("config", configName));
-                    } else {
-                        LOG_INFO(sLogger,
-                                 ("existing invalid config modified and disabled", "skip current object")("config",
-                                                                                                          configName));
-                    }
-                    continue;
-                }
-                shared_ptr<Pipeline> p = mPipelineManager->FindConfigByName(configName);
-                if (!p) {
-                    PipelineConfig config(configName, std::move(detail));
-                    if (!config.Parse()) {
-                        LOG_ERROR(sLogger,
-                                  ("existing invalid config modified and remains invalid",
-                                   "skip current object")("config", configName));
-                        AlarmManager::GetInstance()->SendAlarm(
-                            CATEGORY_CONFIG_ALARM,
-                            "existing invalid config modified and remains invalid: skip current object, config: "
-                                + configName,
-                            config.mProject,
-                            config.mLogstore,
-                            config.mRegion);
-                        continue;
-                    }
-                    diff.mAdded.push_back(std::move(config));
-                    LOG_INFO(sLogger,
-                             ("existing invalid config modified and passed topology check",
-                              "prepare to build pipeline")("config", configName));
-                } else if (*detail != p->GetConfig()) {
-                    PipelineConfig config(configName, std::move(detail));
-                    if (!config.Parse()) {
-                        LOG_ERROR(sLogger,
-                                  ("existing valid config modified and becomes invalid",
-                                   "keep current pipeline running")("config", configName));
-                        AlarmManager::GetInstance()->SendAlarm(
-                            CATEGORY_CONFIG_ALARM,
-                            "existing valid config modified and becomes invalid: skip current object, config: "
-                                + configName,
-                            config.mProject,
-                            config.mLogstore,
-                            config.mRegion);
-                        continue;
-                    }
-                    diff.mModified.push_back(std::move(config));
-                    LOG_INFO(sLogger,
-                             ("existing valid config modified and passed topology check",
-                              "prepare to rebuild pipeline")("config", configName));
-                } else {
-                    LOG_DEBUG(sLogger,
-                              ("existing valid config file modified, but no change found", "skip current object"));
-                }
-            } else {
-                LOG_DEBUG(sLogger, ("existing config file unchanged", "skip current object"));
-            }
-        }
-    }
-    for (const auto& name : mPipelineManager->GetAllConfigNames()) {
-        if (configSet.find(name) == configSet.end()) {
-            diff.mRemoved.push_back(name);
-            LOG_INFO(sLogger,
-                     ("existing valid config is removed", "prepare to stop current running pipeline")("config", name));
-        }
-    }
-    for (const auto& item : mFileInfoMap) {
-        string configName = filesystem::path(item.first).stem().string();
-        if (configSet.find(configName) == configSet.end()) {
-            mFileInfoMap.erase(item.first);
-        }
-    }
-
-    if (!diff.IsEmpty()) {
-        LOG_INFO(sLogger,
-                 ("config files scan done", "got updates, begin to update pipelines")("added", diff.mAdded.size())(
-                     "modified", diff.mModified.size())("removed", diff.mRemoved.size()));
-    } else {
-        LOG_DEBUG(sLogger, ("config files scan done", "no update"));
-    }
-
-    return diff;
-}
-
 void ConfigWatcher::AddSource(const string& dir, mutex* mux) {
     mSourceDir.emplace_back(dir);
     if (mux != nullptr) {
@@ -203,9 +25,11 @@ void ConfigWatcher::AddSource(const string& dir, mutex* mux) {
     }
 }
 
+#ifdef APSARA_UNIT_TEST_MAIN
 void ConfigWatcher::ClearEnvironment() {
     mSourceDir.clear();
     mFileInfoMap.clear();
 }
+#endif
 
 } // namespace logtail
diff --git a/core/config/watcher/ConfigWatcher.h b/core/config/watcher/ConfigWatcher.h
index ba58b28f76..0b43d18cbb 100644
--- a/core/config/watcher/ConfigWatcher.h
+++ b/core/config/watcher/ConfigWatcher.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2023 iLogtail Authors
+ * Copyright 2024 iLogtail Authors
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,43 +16,33 @@
 
 #pragma once
 
-#include <cstdint>
 #include <filesystem>
 #include <map>
+#include <mutex>
 #include <string>
 #include <utility>
 #include <vector>
 
-#include "config/ConfigDiff.h"
-
 namespace logtail {
 
-class PipelineManager;
-
 class ConfigWatcher {
 public:
     ConfigWatcher(const ConfigWatcher&) = delete;
     ConfigWatcher& operator=(const ConfigWatcher&) = delete;
 
-    static ConfigWatcher* GetInstance() {
-        static ConfigWatcher instance;
-        return &instance;
-    }
-
-    PipelineConfigDiff CheckConfigDiff();
     void AddSource(const std::string& dir, std::mutex* mux = nullptr);
-    // for ut
-    void SetPipelineManager(const PipelineManager* pm) { mPipelineManager = pm; }
+
+#ifdef APSARA_UNIT_TEST_MAIN
     void ClearEnvironment();
+#endif
 
-private:
-    ConfigWatcher();
-    ~ConfigWatcher() = default;
+protected:
+    ConfigWatcher() = default;
+    virtual ~ConfigWatcher() = default;
 
     std::vector<std::filesystem::path> mSourceDir;
-    std::unordered_map<std::string, std::mutex*> mDirMutexMap;
+    std::map<std::string, std::mutex*> mDirMutexMap;
     std::map<std::string, std::pair<uintmax_t, std::filesystem::file_time_type>> mFileInfoMap;
-    const PipelineManager* mPipelineManager = nullptr;
 };
 
-} // namespace logtail
\ No newline at end of file
+} // namespace logtail
diff --git a/core/config/watcher/InstanceConfigWatcher.cpp b/core/config/watcher/InstanceConfigWatcher.cpp
index 6683ac6fab..fcd6e518a3 100644
--- a/core/config/watcher/InstanceConfigWatcher.cpp
+++ b/core/config/watcher/InstanceConfigWatcher.cpp
@@ -17,16 +17,17 @@
 #include <memory>
 #include <unordered_set>
 
-#include "config/InstanceConfig.h"
+#include "common/FileSystemUtil.h"
+#include "config/ConfigUtil.h"
+#include "config/InstanceConfigManager.h"
 #include "logger/Logger.h"
 
 using namespace std;
 
 namespace logtail {
 
-bool ReadFile(const string& filepath, string& content);
-
-InstanceConfigWatcher::InstanceConfigWatcher() : mInstanceConfigManager(InstanceConfigManager::GetInstance()) {
+InstanceConfigWatcher::InstanceConfigWatcher()
+    : ConfigWatcher(), mInstanceConfigManager(InstanceConfigManager::GetInstance()) {
 }
 
 InstanceConfigDiff InstanceConfigWatcher::CheckConfigDiff() {
@@ -167,16 +168,4 @@ InstanceConfigDiff InstanceConfigWatcher::CheckConfigDiff() {
     return diff;
 }
 
-void InstanceConfigWatcher::AddSource(const string& dir, mutex* mux) {
-    mSourceDir.emplace_back(dir);
-    if (mux != nullptr) {
-        mDirMutexMap[dir] = mux;
-    }
-}
-
-void InstanceConfigWatcher::ClearEnvironment() {
-    mSourceDir.clear();
-    mFileInfoMap.clear();
-}
-
 } // namespace logtail
diff --git a/core/config/watcher/InstanceConfigWatcher.h b/core/config/watcher/InstanceConfigWatcher.h
index 81a37fce18..c38abec9fb 100644
--- a/core/config/watcher/InstanceConfigWatcher.h
+++ b/core/config/watcher/InstanceConfigWatcher.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2023 iLogtail Authors
+ * Copyright 2024 iLogtail Authors
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,22 +16,14 @@
 
 #pragma once
 
-#include <cstdint>
-#include <filesystem>
-#include <map>
-#include <string>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "InstanceConfigManager.h"
 #include "config/ConfigDiff.h"
+#include "config/watcher/ConfigWatcher.h"
 
 namespace logtail {
 
 class InstanceConfigManager;
 
-class InstanceConfigWatcher {
+class InstanceConfigWatcher : public ConfigWatcher {
 public:
     InstanceConfigWatcher(const InstanceConfigWatcher&) = delete;
     InstanceConfigWatcher& operator=(const InstanceConfigWatcher&) = delete;
@@ -42,19 +34,16 @@ class InstanceConfigWatcher {
     }
 
     InstanceConfigDiff CheckConfigDiff();
-    void AddSource(const std::string& dir, std::mutex* mux = nullptr);
-    // for ut
+
+#ifdef APSARA_UNIT_TEST_MAIN
     void SetInstanceConfigManager(const InstanceConfigManager* m) { mInstanceConfigManager = m; }
-    void ClearEnvironment();
+#endif
 
 private:
     InstanceConfigWatcher();
     ~InstanceConfigWatcher() = default;
 
-    std::vector<std::filesystem::path> mSourceDir;
-    std::unordered_map<std::string, std::mutex*> mDirMutexMap;
-    std::map<std::string, std::pair<uintmax_t, std::filesystem::file_time_type>> mFileInfoMap;
     const InstanceConfigManager* mInstanceConfigManager = nullptr;
 };
 
-} // namespace logtail
\ No newline at end of file
+} // namespace logtail
diff --git a/core/config/watcher/PipelineConfigWatcher.cpp b/core/config/watcher/PipelineConfigWatcher.cpp
new file mode 100644
index 0000000000..40118cbc47
--- /dev/null
+++ b/core/config/watcher/PipelineConfigWatcher.cpp
@@ -0,0 +1,315 @@
+// Copyright 2023 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "config/watcher/PipelineConfigWatcher.h"
+
+#include <memory>
+#include <unordered_set>
+
+#include "common/FileSystemUtil.h"
+#include "config/ConfigUtil.h"
+#include "logger/Logger.h"
+#include "pipeline/PipelineManager.h"
+#include "task_pipeline/TaskPipelineManager.h"
+
+using namespace std;
+
+namespace logtail {
+
+PipelineConfigWatcher::PipelineConfigWatcher()
+    : ConfigWatcher(),
+      mPipelineManager(PipelineManager::GetInstance()),
+      mTaskPipelineManager(TaskPipelineManager::GetInstance()) {
+}
+
+pair<PipelineConfigDiff, TaskConfigDiff> PipelineConfigWatcher::CheckConfigDiff() {
+    PipelineConfigDiff pDiff;
+    TaskConfigDiff tDiff;
+    unordered_set<string> configSet;
+    for (const auto& dir : mSourceDir) {
+        error_code ec;
+        filesystem::file_status s = filesystem::status(dir, ec);
+        if (ec) {
+            LOG_WARNING(sLogger,
+                        ("failed to get config dir path info", "skip current object")("dir path", dir.string())(
+                            "error code", ec.value())("error msg", ec.message()));
+            continue;
+        }
+        if (!filesystem::exists(s)) {
+            LOG_WARNING(sLogger, ("config dir path not existed", "skip current object")("dir path", dir.string()));
+            continue;
+        }
+        if (!filesystem::is_directory(s)) {
+            LOG_WARNING(sLogger,
+                        ("config dir path is not a directory", "skip current object")("dir path", dir.string()));
+            continue;
+        }
+        for (auto const& entry : filesystem::directory_iterator(dir, ec)) {
+            // lock the dir if it is provided by config provider
+            unique_lock<mutex> lock;
+            auto itr = mDirMutexMap.find(dir.string());
+            if (itr != mDirMutexMap.end()) {
+                lock = unique_lock<mutex>(*itr->second, defer_lock);
+                lock.lock();
+            }
+
+            const filesystem::path& path = entry.path();
+            const string& configName = path.stem().string();
+            const string& filepath = path.string();
+            if (!filesystem::is_regular_file(entry.status(ec))) {
+                LOG_DEBUG(sLogger, ("config file is not a regular file", "skip current object")("filepath", filepath));
+                continue;
+            }
+            if (configSet.find(configName) != configSet.end()) {
+                LOG_WARNING(
+                    sLogger,
+                    ("more than 1 config with the same name is found", "skip current config")("filepath", filepath));
+                continue;
+            }
+            configSet.insert(configName);
+
+            auto iter = mFileInfoMap.find(filepath);
+            uintmax_t size = filesystem::file_size(path, ec);
+            filesystem::file_time_type mTime = filesystem::last_write_time(path, ec);
+            if (iter == mFileInfoMap.end()) {
+                mFileInfoMap[filepath] = make_pair(size, mTime);
+                unique_ptr<Json::Value> detail = make_unique<Json::Value>();
+                if (!LoadConfigDetailFromFile(path, *detail)) {
+                    continue;
+                }
+                if (!IsConfigEnabled(configName, *detail)) {
+                    LOG_INFO(sLogger, ("new config found and disabled", "skip current object")("config", configName));
+                    continue;
+                }
+                if (!CheckAddedConfig(configName, std::move(detail), pDiff, tDiff)) {
+                    continue;
+                }
+            } else if (iter->second.first != size || iter->second.second != mTime) {
+                // for config currently running, we leave it untouched if new config is invalid
+                mFileInfoMap[filepath] = make_pair(size, mTime);
+                unique_ptr<Json::Value> detail = make_unique<Json::Value>();
+                if (!LoadConfigDetailFromFile(path, *detail)) {
+                    continue;
+                }
+                if (!IsConfigEnabled(configName, *detail)) {
+                    switch (GetConfigType(*detail)) {
+                        case ConfigType::Pipeline:
+                            if (mPipelineManager->FindConfigByName(configName)) {
+                                pDiff.mRemoved.push_back(configName);
+                                LOG_INFO(sLogger,
+                                         ("existing valid config modified and disabled",
+                                          "prepare to stop current running pipeline")("config", configName));
+                            } else {
+                                LOG_INFO(sLogger,
+                                         ("existing invalid config modified and disabled",
+                                          "skip current object")("config", configName));
+                            }
+                            break;
+                        case ConfigType::Task:
+                            if (mTaskPipelineManager->FindPipelineByName(configName)) {
+                                tDiff.mRemoved.push_back(configName);
+                                LOG_INFO(sLogger,
+                                         ("existing valid config modified and disabled",
+                                          "prepare to stop current running task")("config", configName));
+                            } else {
+                                LOG_INFO(sLogger,
+                                         ("existing invalid config modified and disabled",
+                                          "skip current object")("config", configName));
+                            }
+                            break;
+                    }
+                    continue;
+                }
+                if (!CheckModifiedConfig(configName, std::move(detail), pDiff, tDiff)) {
+                    continue;
+                }
+            } else {
+                LOG_DEBUG(sLogger, ("existing config file unchanged", "skip current object"));
+            }
+        }
+    }
+    for (const auto& name : mPipelineManager->GetAllConfigNames()) {
+        if (configSet.find(name) == configSet.end()) {
+            pDiff.mRemoved.push_back(name);
+            LOG_INFO(sLogger,
+                     ("existing valid config is removed", "prepare to stop current running pipeline")("config", name));
+        }
+    }
+    for (const auto& name : mTaskPipelineManager->GetAllPipelineNames()) {
+        if (configSet.find(name) == configSet.end()) {
+            tDiff.mRemoved.push_back(name);
+            LOG_INFO(sLogger,
+                     ("existing valid config is removed", "prepare to stop current running task")("config", name));
+        }
+    }
+    for (auto it = mFileInfoMap.begin(); it != mFileInfoMap.end();) {
+        string configName = filesystem::path(it->first).stem().string();
+        if (configSet.find(configName) == configSet.end()) {
+            it = mFileInfoMap.erase(it);
+        } else {
+            ++it;
+        }
+    }
+
+    if (!pDiff.IsEmpty()) {
+        LOG_INFO(sLogger,
+                 ("config files scan done", "got updates, begin to update pipelines")("added", pDiff.mAdded.size())(
+                     "modified", pDiff.mModified.size())("removed", pDiff.mRemoved.size()));
+    } else {
+        LOG_DEBUG(sLogger, ("config files scan done", "no pipeline update"));
+    }
+    if (!tDiff.IsEmpty()) {
+        LOG_INFO(sLogger,
+                 ("config files scan done", "got updates, begin to update tasks")("added", tDiff.mAdded.size())(
+                     "modified", tDiff.mModified.size())("removed", tDiff.mRemoved.size()));
+    } else {
+        LOG_DEBUG(sLogger, ("config files scan done", "no task update"));
+    }
+
+    return make_pair(std::move(pDiff), std::move(tDiff));
+}
+
+bool PipelineConfigWatcher::CheckAddedConfig(const string& configName,
+                                             unique_ptr<Json::Value>&& configDetail,
+                                             PipelineConfigDiff& pDiff,
+                                             TaskConfigDiff& tDiff) {
+    switch (GetConfigType(*configDetail)) {
+        case ConfigType::Pipeline: {
+            PipelineConfig config(configName, std::move(configDetail));
+            if (!config.Parse()) {
+                LOG_ERROR(sLogger, ("new config found but invalid", "skip current object")("config", configName));
+                AlarmManager::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM,
+                                                       "new config found but invalid: skip current object, config: "
+                                                           + configName,
+                                                       config.mProject,
+                                                       config.mLogstore,
+                                                       config.mRegion);
+                return false;
+            }
+            pDiff.mAdded.push_back(std::move(config));
+            LOG_INFO(sLogger,
+                     ("new config found and passed topology check", "prepare to build pipeline")("config", configName));
+            break;
+        }
+        case ConfigType::Task: {
+            TaskConfig config(configName, std::move(configDetail));
+            if (!config.Parse()) {
+                LOG_ERROR(sLogger, ("new config found but invalid", "skip current object")("config", configName));
+                AlarmManager::GetInstance()->SendAlarm(
+                    CATEGORY_CONFIG_ALARM, "new config found but invalid: skip current object, config: " + configName);
+                return false;
+            }
+            tDiff.mAdded.push_back(std::move(config));
+            LOG_INFO(sLogger,
+                     ("new config found and passed topology check", "prepare to build task")("config", configName));
+            break;
+        }
+    }
+    return true;
+}
+
+bool PipelineConfigWatcher::CheckModifiedConfig(const string& configName,
+                                                unique_ptr<Json::Value>&& configDetail,
+                                                PipelineConfigDiff& pDiff,
+                                                TaskConfigDiff& tDiff) {
+    switch (GetConfigType(*configDetail)) {
+        case ConfigType::Pipeline: {
+            shared_ptr<Pipeline> p = mPipelineManager->FindConfigByName(configName);
+            if (!p) {
+                PipelineConfig config(configName, std::move(configDetail));
+                if (!config.Parse()) {
+                    LOG_ERROR(sLogger,
+                              ("existing invalid config modified and remains invalid",
+                               "skip current object")("config", configName));
+                    AlarmManager::GetInstance()->SendAlarm(
+                        CATEGORY_CONFIG_ALARM,
+                        "existing invalid config modified and remains invalid: skip current object, config: "
+                            + configName,
+                        config.mProject,
+                        config.mLogstore,
+                        config.mRegion);
+                    return false;
+                }
+                pDiff.mAdded.push_back(std::move(config));
+                LOG_INFO(sLogger,
+                         ("existing invalid config modified and passed topology check",
+                          "prepare to build pipeline")("config", configName));
+            } else if (*configDetail != p->GetConfig()) {
+                PipelineConfig config(configName, std::move(configDetail));
+                if (!config.Parse()) {
+                    LOG_ERROR(sLogger,
+                              ("existing valid config modified and becomes invalid",
+                               "keep current pipeline running")("config", configName));
+                    AlarmManager::GetInstance()->SendAlarm(
+                        CATEGORY_CONFIG_ALARM,
+                        "existing valid config modified and becomes invalid: skip current object, config: "
+                            + configName,
+                        config.mProject,
+                        config.mLogstore,
+                        config.mRegion);
+                    return false;
+                }
+                pDiff.mModified.push_back(std::move(config));
+                LOG_INFO(sLogger,
+                         ("existing valid config modified and passed topology check",
+                          "prepare to rebuild pipeline")("config", configName));
+            } else {
+                LOG_DEBUG(sLogger, ("existing valid config file modified, but no change found", "skip current object"));
+            }
+            break;
+        }
+        case ConfigType::Task: {
+            auto& p = mTaskPipelineManager->FindPipelineByName(configName);
+            if (!p) {
+                TaskConfig config(configName, std::move(configDetail));
+                if (!config.Parse()) {
+                    LOG_ERROR(sLogger,
+                              ("existing invalid config modified and remains invalid",
+                               "skip current object")("config", configName));
+                    AlarmManager::GetInstance()->SendAlarm(
+                        CATEGORY_CONFIG_ALARM,
+                        "existing invalid config modified and remains invalid: skip current object, config: "
+                            + configName);
+                    return false;
+                }
+                tDiff.mAdded.push_back(std::move(config));
+                LOG_INFO(sLogger,
+                         ("existing invalid config modified and passed topology check",
+                          "prepare to build task")("config", configName));
+            } else if (*configDetail != p->GetConfig()) {
+                TaskConfig config(configName, std::move(configDetail));
+                if (!config.Parse()) {
+                    LOG_ERROR(sLogger,
+                              ("existing valid config modified and becomes invalid",
+                               "keep current task running")("config", configName));
+                    AlarmManager::GetInstance()->SendAlarm(
+                        CATEGORY_CONFIG_ALARM,
+                        "existing valid config modified and becomes invalid: skip current object, config: "
+                            + configName);
+                    return false;
+                }
+                tDiff.mModified.push_back(std::move(config));
+                LOG_INFO(sLogger,
+                         ("existing valid config modified and passed topology check",
+                          "prepare to rebuild task")("config", configName));
+            } else {
+                LOG_DEBUG(sLogger, ("existing valid config file modified, but no change found", "skip current object"));
+            }
+            break;
+        }
+    }
+    return true;
+}
+
+} // namespace logtail
diff --git a/core/config/watcher/PipelineConfigWatcher.h b/core/config/watcher/PipelineConfigWatcher.h
new file mode 100644
index 0000000000..d1f77967fe
--- /dev/null
+++ b/core/config/watcher/PipelineConfigWatcher.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2023 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "config/ConfigDiff.h"
+#include "config/watcher/ConfigWatcher.h"
+
+namespace logtail {
+
+class PipelineManager;
+class TaskPipelineManager;
+
+class PipelineConfigWatcher : public ConfigWatcher {
+public:
+    PipelineConfigWatcher(const PipelineConfigWatcher&) = delete;
+    PipelineConfigWatcher& operator=(const PipelineConfigWatcher&) = delete;
+
+    static PipelineConfigWatcher* GetInstance() {
+        static PipelineConfigWatcher instance;
+        return &instance;
+    }
+
+    std::pair<PipelineConfigDiff, TaskConfigDiff> CheckConfigDiff();
+
+#ifdef APSARA_UNIT_TEST_MAIN
+    void SetPipelineManager(const PipelineManager* pm) { mPipelineManager = pm; }
+#endif
+
+private:
+    PipelineConfigWatcher();
+    ~PipelineConfigWatcher() = default;
+
+    bool CheckAddedConfig(const std::string& configName,
+                          std::unique_ptr<Json::Value>&& configDetail,
+                          PipelineConfigDiff& pDiff,
+                          TaskConfigDiff& tDiff);
+    bool CheckModifiedConfig(const std::string& configName,
+                             std::unique_ptr<Json::Value>&& configDetail,
+                             PipelineConfigDiff& pDiff,
+                             TaskConfigDiff& tDiff);
+
+    const PipelineManager* mPipelineManager = nullptr;
+    const TaskPipelineManager* mTaskPipelineManager = nullptr;
+};
+
+} // namespace logtail
diff --git a/core/constants/SpanConstants.cpp b/core/constants/SpanConstants.cpp
new file mode 100644
index 0000000000..262c4f15b2
--- /dev/null
+++ b/core/constants/SpanConstants.cpp
@@ -0,0 +1,43 @@
+// Copyright 2024 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "SpanConstants.h"
+
+namespace logtail {
+
+    const std::string DEFAULT_TRACE_TAG_TRACE_ID = "traceId";
+    const std::string DEFAULT_TRACE_TAG_SPAN_ID = "spanId";
+    const std::string DEFAULT_TRACE_TAG_PARENT_ID = "parentSpanId";
+    const std::string DEFAULT_TRACE_TAG_SPAN_NAME = "spanName";
+    const std::string DEFAULT_TRACE_TAG_SERVICE_NAME = "serviceName";
+    const std::string DEFAULT_TRACE_TAG_START_TIME_NANO = "startTime";
+    const std::string DEFAULT_TRACE_TAG_END_TIME_NANO = "endTime";
+    const std::string DEFAULT_TRACE_TAG_DURATION = "duration";
+    const std::string DEFAULT_TRACE_TAG_ATTRIBUTES = "attributes";
+    const std::string DEFAULT_TRACE_TAG_RESOURCE = "resources";
+    const std::string DEFAULT_TRACE_TAG_LINKS = "links";
+    const std::string DEFAULT_TRACE_TAG_EVENTS = "events";
+    const std::string DEFAULT_TRACE_TAG_TIMESTAMP = "timestamp";
+    const std::string DEFAULT_TRACE_TAG_STATUS_CODE = "statusCode";
+    const std::string DEFAULT_TRACE_TAG_STATUS_MESSAGE = "statusMessage";
+    const std::string DEFAULT_TRACE_TAG_SPAN_KIND = "kind";
+    const std::string DEFAULT_TRACE_TAG_TRACE_STATE = "traceState";
+    const std::string DEFAULT_TRACE_TAG_SPAN_EVENT_NAME = "name";
+#ifdef __ENTERPRISE__
+    // for arms
+    const std::string DEFAULT_TRACE_TAG_APP_ID = "pid";
+    const std::string DEFAULT_TRACE_TAG_IP = "ip";
+#endif
+
+} // namespace logtail
\ No newline at end of file
diff --git a/core/constants/SpanConstants.h b/core/constants/SpanConstants.h
new file mode 100644
index 0000000000..36a9d2f030
--- /dev/null
+++ b/core/constants/SpanConstants.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+#include <string>
+
+namespace logtail {
+    extern const std::string DEFAULT_TRACE_TAG_TRACE_ID;
+    extern const std::string DEFAULT_TRACE_TAG_SPAN_ID;
+    extern const std::string DEFAULT_TRACE_TAG_PARENT_ID;
+    extern const std::string DEFAULT_TRACE_TAG_SPAN_NAME;
+    extern const std::string DEFAULT_TRACE_TAG_SERVICE_NAME;
+    extern const std::string DEFAULT_TRACE_TAG_START_TIME_NANO;
+    extern const std::string DEFAULT_TRACE_TAG_END_TIME_NANO;
+    extern const std::string DEFAULT_TRACE_TAG_DURATION;
+    extern const std::string DEFAULT_TRACE_TAG_ATTRIBUTES;
+    extern const std::string DEFAULT_TRACE_TAG_RESOURCE;
+    extern const std::string DEFAULT_TRACE_TAG_LINKS;
+    extern const std::string DEFAULT_TRACE_TAG_EVENTS;
+    extern const std::string DEFAULT_TRACE_TAG_TIMESTAMP;
+    extern const std::string DEFAULT_TRACE_TAG_STATUS_CODE;
+    extern const std::string DEFAULT_TRACE_TAG_STATUS_MESSAGE;
+    extern const std::string DEFAULT_TRACE_TAG_SPAN_KIND;
+    extern const std::string DEFAULT_TRACE_TAG_TRACE_STATE;
+    extern const std::string DEFAULT_TRACE_TAG_SPAN_EVENT_NAME ;
+#ifdef __ENTERPRISE__
+    // for arms
+    extern const std::string DEFAULT_TRACE_TAG_APP_ID;
+    extern const std::string DEFAULT_TRACE_TAG_IP;
+#endif
+
+} // namespace logtail
\ No newline at end of file
diff --git a/core/constants/TagConstants.cpp b/core/constants/TagConstants.cpp
index edd7ea4e55..4d4a865192 100644
--- a/core/constants/TagConstants.cpp
+++ b/core/constants/TagConstants.cpp
@@ -63,4 +63,7 @@ namespace logtail {
     const std::string DEFAULT_METRIC_TAG_CONTAINER_IP = DEFAULT_TAG_CONTAINER_IP;
     const std::string DEFAULT_METRIC_TAG_IMAGE_NAME = DEFAULT_TAG_IMAGE_NAME;
 
+////////////////////////// TRACE ////////////////////////
+
+
 } // namespace logtail
\ No newline at end of file
diff --git a/core/constants/TagConstants.h b/core/constants/TagConstants.h
index 52d4213961..0b83d4092d 100644
--- a/core/constants/TagConstants.h
+++ b/core/constants/TagConstants.h
@@ -44,4 +44,8 @@ namespace logtail {
     extern const std::string DEFAULT_METRIC_TAG_CONTAINER_IP;
     extern const std::string DEFAULT_METRIC_TAG_IMAGE_NAME;
 
+////////////////////////// TRACE ////////////////////////
+
+
+
 } // namespace logtail
\ No newline at end of file
diff --git a/core/dependencies.cmake b/core/dependencies.cmake
index cc27bea370..5b69e49d82 100644
--- a/core/dependencies.cmake
+++ b/core/dependencies.cmake
@@ -392,9 +392,12 @@ endmacro()
 
 # asan for debug
 macro(link_asan target_name)
-    if(CMAKE_BUILD_TYPE MATCHES Debug)
+    if (UNIX)
         target_compile_options(${target_name} PUBLIC -fsanitize=address)
         target_link_options(${target_name} PUBLIC -fsanitize=address -static-libasan)
+    elseif(MSVC)
+        target_compile_options(${target_name} PUBLIC /fsanitize=address)
+        target_link_options(${target_name} PUBLIC /fsanitize=address)
     endif()
 endmacro()
 
diff --git a/core/file_server/ConfigManager.cpp b/core/file_server/ConfigManager.cpp
index 3ae6c241f9..1c702aac7b 100644
--- a/core/file_server/ConfigManager.cpp
+++ b/core/file_server/ConfigManager.cpp
@@ -36,7 +36,6 @@
 #include "app_config/AppConfig.h"
 #include "checkpoint/CheckPointManager.h"
 #include "common/CompressTools.h"
-#include "constants/Constants.h"
 #include "common/ErrorUtil.h"
 #include "common/ExceptionBase.h"
 #include "common/FileSystemUtil.h"
@@ -46,10 +45,10 @@
 #include "common/StringTools.h"
 #include "common/TimeUtil.h"
 #include "common/version.h"
+#include "constants/Constants.h"
 #include "file_server/EventDispatcher.h"
-#include "file_server/event_handler/EventHandler.h"
 #include "file_server/FileServer.h"
-#include "monitor/LogFileProfiler.h"
+#include "file_server/event_handler/EventHandler.h"
 #include "monitor/AlarmManager.h"
 #include "pipeline/Pipeline.h"
 #include "pipeline/PipelineManager.h"
@@ -101,7 +100,7 @@ DEFINE_FLAG_INT32(docker_config_update_interval, "interval between docker config
 
 namespace logtail {
 
-// 
+//
 ParseConfResult ParseConfig(const std::string& configName, Json::Value& jsonRoot) {
     // Get full path, if it is a relative path, prepend process execution dir.
     std::string fullPath = configName;
@@ -111,10 +110,15 @@ ParseConfResult ParseConfig(const std::string& configName, Json::Value& jsonRoot
 
     ifstream is;
     is.open(fullPath.c_str());
-    if (!is.good()) {
+    if (!is) { // https://horstmann.com/cpp/pitfalls.html
+        return CONFIG_NOT_EXIST;
+    }
+    std::string buffer;
+    try {
+        buffer.assign(std::istreambuf_iterator<char>(is), std::istreambuf_iterator<char>());
+    } catch (const std::ios_base::failure& e) {
         return CONFIG_NOT_EXIST;
     }
-    std::string buffer((std::istreambuf_iterator<char>(is)), (std::istreambuf_iterator<char>()));
     if (!IsValidJson(buffer.c_str(), buffer.length())) {
         return CONFIG_INVALID_FORMAT;
     }
@@ -145,7 +149,7 @@ bool ConfigManager::RegisterHandlersRecursively(const std::string& path,
         return result;
 
     if (!config.first->IsDirectoryInBlacklist(path))
-        result = EventDispatcher::GetInstance()->RegisterEventHandler(path.c_str(), config, mSharedHandler);
+        result = EventDispatcher::GetInstance()->RegisterEventHandler(path, config, mSharedHandler);
 
     if (!result)
         return result;
@@ -200,7 +204,7 @@ ConfigManager::ConfigManager() {
     // CorrectionLogtailSysConfDir(); // first create dir then rewrite system-uuid file in GetSystemUUID
     // use a thread to get uuid, work around for CalculateDmiUUID hang
     // mUUID = CalculateDmiUUID();
-    // mInstanceId = CalculateRandomUUID() + "_" + LogFileProfiler::mIpAddr + "_" + ToString(time(NULL));
+    // mInstanceId = CalculateRandomUUID() + "_" + LoongCollectorMonitor::mIpAddr + "_" + ToString(time(NULL));
     // ReloadMappingConfig();
 }
 
@@ -306,9 +310,16 @@ void ConfigManager::RegisterWildcardPath(const FileDiscoveryConfig& config, cons
             if (registerStatus == GET_REGISTER_STATUS_ERROR) {
                 return;
             }
-            if (EventDispatcher::GetInstance()->RegisterEventHandler(item.c_str(), config, mSharedHandler)) {
+            if (config.first->mPreservedDirDepth < 0)
                 RegisterDescendants(
                     item, config, config.first->mMaxDirSearchDepth < 0 ? 100 : config.first->mMaxDirSearchDepth);
+            else {
+                // preserve_depth register
+                RegisterHandlersWithinDepth(item,
+                                            config,
+                                            config.first->mPreservedDirDepth,
+                                            config.first->mMaxDirSearchDepth < 0 ? 100
+                                                                                 : config.first->mMaxDirSearchDepth);
             }
         } else {
             RegisterWildcardPath(config, item, depth + 1);
@@ -382,9 +393,16 @@ void ConfigManager::RegisterWildcardPath(const FileDiscoveryConfig& config, cons
                 if (registerStatus == GET_REGISTER_STATUS_ERROR) {
                     return;
                 }
-                if (EventDispatcher::GetInstance()->RegisterEventHandler(item.c_str(), config, mSharedHandler)) {
+                if (config.first->mPreservedDirDepth < 0)
                     RegisterDescendants(
                         item, config, config.first->mMaxDirSearchDepth < 0 ? 100 : config.first->mMaxDirSearchDepth);
+                else {
+                    // preserve_depth register
+                    RegisterHandlersWithinDepth(
+                        item,
+                        config,
+                        config.first->mPreservedDirDepth,
+                        config.first->mMaxDirSearchDepth < 0 ? 100 : config.first->mMaxDirSearchDepth);
                 }
             } else {
                 RegisterWildcardPath(config, item, depth + 1);
@@ -421,52 +439,57 @@ bool ConfigManager::RegisterHandlers(const string& basePath, const FileDiscovery
     DirRegisterStatus registerStatus = EventDispatcher::GetInstance()->IsDirRegistered(basePath);
     if (registerStatus == GET_REGISTER_STATUS_ERROR)
         return result;
-    // dir in config is valid by default, do not call pathValidator
-    result = EventDispatcher::GetInstance()->RegisterEventHandler(basePath.c_str(), config, mSharedHandler);
-    // if we come into a failure, do not try to register others, there must be something wrong!
-    if (!result)
-        return result;
 
     if (config.first->mPreservedDirDepth < 0)
         result = RegisterDescendants(
             basePath, config, config.first->mMaxDirSearchDepth < 0 ? 100 : config.first->mMaxDirSearchDepth);
     else {
         // preserve_depth register
-        int depth = config.first->mPreservedDirDepth;
-        result = RegisterHandlersWithinDepth(basePath, config, depth);
+        result = RegisterHandlersWithinDepth(basePath,
+                                             config,
+                                             config.first->mPreservedDirDepth,
+                                             config.first->mMaxDirSearchDepth < 0 ? 100
+                                                                                  : config.first->mMaxDirSearchDepth);
     }
     return result;
 }
 
 bool ConfigManager::RegisterDirectory(const std::string& source, const std::string& object) {
-    // TODO��A potential bug: FindBestMatch will test @object with filePattern, which has very
+    // TODO: A potential bug: FindBestMatch will test @object with filePattern, which has very
     // low possibility to match a sub directory name, so here will return false in most cases.
     // e.g.: source: /path/to/monitor, file pattern: *.log, object: subdir.
     // Match(subdir, *.log) = false.
     FileDiscoveryConfig config = FindBestMatch(source, object);
-    if (config.first && !config.first->IsDirectoryInBlacklist(source))
-        return EventDispatcher::GetInstance()->RegisterEventHandler(source.c_str(), config, mSharedHandler);
+    if (config.first && !config.first->IsDirectoryInBlacklist(source)) {
+        return EventDispatcher::GetInstance()->RegisterEventHandler(source, config, mSharedHandler);
+    }
     return false;
 }
 
-bool ConfigManager::RegisterHandlersWithinDepth(const std::string& path, const FileDiscoveryConfig& config, int depth) {
+bool ConfigManager::RegisterHandlersWithinDepth(const std::string& path,
+                                                const FileDiscoveryConfig& config,
+                                                int preservedDirDepth,
+                                                int maxDepth) {
+    if (maxDepth < 0) {
+        return true;
+    }
     if (AppConfig::GetInstance()->IsHostPathMatchBlacklist(path)) {
         LOG_INFO(sLogger, ("ignore path matching host path blacklist", path));
         return false;
     }
-    if (depth <= 0) {
-        DirCheckPointPtr dirCheckPoint;
-        if (CheckPointManager::Instance()->GetDirCheckPoint(path, dirCheckPoint) == false)
+    if (preservedDirDepth < 0) {
+        fsutil::PathStat statBuf;
+        if (!fsutil::PathStat::stat(path, statBuf)) {
+            return true;
+        }
+        int64_t sec = 0;
+        int64_t nsec = 0;
+        statBuf.GetLastWriteTime(sec, nsec);
+        auto curTime = time(nullptr);
+        if (curTime - sec > INT32_FLAG(timeout_interval)) {
             return true;
-        // path had dircheckpoint means it was watched before, so it is valid
-        const set<string>& subdir = dirCheckPoint.get()->mSubDir;
-        for (set<string>::iterator it = subdir.begin(); it != subdir.end(); it++) {
-            if (EventDispatcher::GetInstance()->RegisterEventHandler((*it).c_str(), config, mSharedHandler))
-                RegisterHandlersWithinDepth(*it, config, depth - 1);
         }
-        return true;
     }
-    bool result = true;
 
     fsutil::Dir dir(path);
     if (!dir.Open()) {
@@ -480,30 +503,44 @@ bool ConfigManager::RegisterHandlersWithinDepth(const std::string& path, const F
         LOG_ERROR(sLogger, ("Open dir error: ", path.c_str())("errno", err));
         return false;
     }
+    if (!(EventDispatcher::GetInstance()->RegisterEventHandler(path, config, mSharedHandler))) {
+        // break;// fail early, do not try to register others
+        return false;
+    }
+    if (maxDepth == 0) {
+        return true;
+    }
+
+    if (preservedDirDepth == 0) {
+        DirCheckPointPtr dirCheckPoint;
+        if (CheckPointManager::Instance()->GetDirCheckPoint(path, dirCheckPoint)) {
+            // path had dircheckpoint means it was watched before, so it is valid
+            const set<string>& subdir = dirCheckPoint.get()->mSubDir;
+            for (const auto& it : subdir) {
+                RegisterHandlersWithinDepth(it, config, 0, maxDepth - 1);
+            }
+            return true;
+        }
+    }
     fsutil::Entry ent;
     while ((ent = dir.ReadNext())) {
         string item = PathJoin(path, ent.Name());
         if (ent.IsDir() && !config.first->IsDirectoryInBlacklist(item)) {
-            if (!(EventDispatcher::GetInstance()->RegisterEventHandler(item.c_str(), config, mSharedHandler))) {
-                // break;// fail early, do not try to register others
-                result = false;
-            } else // sub dir will not be registered if parent dir fails
-                RegisterHandlersWithinDepth(item, config, depth - 1);
+            RegisterHandlersWithinDepth(item, config, preservedDirDepth - 1, maxDepth - 1);
         }
     }
-
-    return result;
+    return true;
 }
 
 // path not terminated by '/', path already registered
 bool ConfigManager::RegisterDescendants(const string& path, const FileDiscoveryConfig& config, int withinDepth) {
+    if (withinDepth < 0) {
+        return true;
+    }
     if (AppConfig::GetInstance()->IsHostPathMatchBlacklist(path)) {
         LOG_INFO(sLogger, ("ignore path matching host path blacklist", path));
         return false;
     }
-    if (withinDepth <= 0) {
-        return true;
-    }
 
     fsutil::Dir dir(path);
     if (!dir.Open()) {
@@ -516,14 +553,20 @@ bool ConfigManager::RegisterDescendants(const string& path, const FileDiscoveryC
         LOG_ERROR(sLogger, ("Open dir error: ", path.c_str())("errno", err));
         return false;
     }
+    if (!EventDispatcher::GetInstance()->RegisterEventHandler(path, config, mSharedHandler)) {
+        // break;// fail early, do not try to register others
+        return false;
+    }
+    if (withinDepth == 0) {
+        return true;
+    }
+
     fsutil::Entry ent;
     bool result = true;
     while ((ent = dir.ReadNext())) {
         string item = PathJoin(path, ent.Name());
         if (ent.IsDir() && !config.first->IsDirectoryInBlacklist(item)) {
-            result = EventDispatcher::GetInstance()->RegisterEventHandler(item.c_str(), config, mSharedHandler);
-            if (result)
-                RegisterDescendants(item, config, withinDepth - 1);
+            RegisterDescendants(item, config, withinDepth - 1);
         }
     }
     return result;
diff --git a/core/file_server/ConfigManager.h b/core/file_server/ConfigManager.h
index 71020a467f..c9f718e6c0 100644
--- a/core/file_server/ConfigManager.h
+++ b/core/file_server/ConfigManager.h
@@ -510,7 +510,10 @@ class ConfigManager {
      * @param path is the current dir that being registered
      * @depth is the num of sub dir layers that should be registered
      */
-    bool RegisterHandlersWithinDepth(const std::string& path, const FileDiscoveryConfig& config, int depth);
+    bool RegisterHandlersWithinDepth(const std::string& path,
+                                     const FileDiscoveryConfig& config,
+                                     int preservedDirDepth,
+                                     int maxDepth);
     bool RegisterDescendants(const std::string& path, const FileDiscoveryConfig& config, int withinDepth);
     // bool CheckLogType(const std::string& logTypeStr, LogType& logType);
     // 废弃
diff --git a/core/file_server/EventDispatcher.cpp b/core/file_server/EventDispatcher.cpp
index 1b03c06211..3585bdc1f3 100644
--- a/core/file_server/EventDispatcher.cpp
+++ b/core/file_server/EventDispatcher.cpp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 #include "EventDispatcher.h"
+
 #include "Flags.h"
 #if defined(__linux__)
 #include <fnmatch.h>
@@ -45,7 +46,6 @@
 #include "file_server/event_handler/LogInput.h"
 #include "file_server/polling/PollingDirFile.h"
 #include "file_server/polling/PollingModify.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/AlarmManager.h"
 #include "monitor/MetricExportor.h"
 #include "protobuf/sls/metric.pb.h"
@@ -88,7 +88,7 @@ DEFINE_FLAG_INT32(default_max_inotify_watch_num, "the max allowed inotify watch
 
 namespace logtail {
 
-EventDispatcher::EventDispatcher() : mWatchNum(0), mInotifyWatchNum(0) {
+EventDispatcher::EventDispatcher() : mWatchNum(0), mInotifyWatchNum(0), mEventListener(EventListener::GetInstance()) {
     /*
      * May add multiple inotify fd instances in the future,
      * so use epoll here though a little more sophisticated than select
@@ -99,7 +99,6 @@ EventDispatcher::EventDispatcher() : mWatchNum(0), mInotifyWatchNum(0) {
     //     mListenFd = -1;
     //     mStreamLogTcpFd = -1;
     // #endif
-    mEventListener = EventListener::GetInstance();
     if (!AppConfig::GetInstance()->NoInotify()) {
         if (!mEventListener->Init()) {
             AlarmManager::GetInstance()->SendAlarm(EPOLL_ERROR_ALARM,
@@ -141,7 +140,7 @@ EventDispatcher::~EventDispatcher() {
         delete mTimeoutHandler;
 }
 
-bool EventDispatcher::RegisterEventHandler(const char* path,
+bool EventDispatcher::RegisterEventHandler(const string& path,
                                            const FileDiscoveryConfig& config,
                                            EventHandler*& handler) {
     if (AppConfig::GetInstance()->IsHostPathMatchBlacklist(path)) {
@@ -176,7 +175,7 @@ bool EventDispatcher::RegisterEventHandler(const char* path,
         return false;
     }
     uint64_t inode = statBuf.GetDevInode().inode;
-    int wd;
+    int wd = -1;
     MapType<string, int>::Type::iterator pathIter = mPathWdMap.find(path);
     if (pathIter != mPathWdMap.end()) {
         wd = pathIter->second;
@@ -235,8 +234,8 @@ bool EventDispatcher::RegisterEventHandler(const char* path,
     } else {
         // need check mEventListener valid
         if (mEventListener->IsInit() && !AppConfig::GetInstance()->IsInInotifyBlackList(path)) {
-            wd = mEventListener->AddWatch(path);
-            if (!mEventListener->IsValidID(wd)) {
+            wd = mEventListener->AddWatch(path.c_str());
+            if (!EventListener::IsValidID(wd)) {
                 string str = ErrnoToString(GetErrno());
                 LOG_WARNING(sLogger, ("failed to register dir", path)("reason", str));
 #if defined(__linux__)
@@ -278,14 +277,7 @@ bool EventDispatcher::RegisterEventHandler(const char* path,
 
     bool dirTimeOutFlag = config.first->IsTimeout(path);
 
-    if (!mEventListener->IsValidID(wd)) {
-        if (dirTimeOutFlag) {
-            LOG_DEBUG(
-                sLogger,
-                ("Drop timeout path, source", path)("config, basepath", config.first->GetBasePath())(
-                    "preseveDepth", config.first->mPreservedDirDepth)("maxDepth", config.first->mMaxDirSearchDepth));
-            return false;
-        }
+    if (!EventListener::IsValidID(wd)) {
         wd = mNonInotifyWd;
         if (mNonInotifyWd == INT_MIN)
             mNonInotifyWd = -1;
@@ -317,7 +309,7 @@ bool EventDispatcher::RegisterEventHandler(const char* path,
 }
 
 // read files when add dir inotify watcher at first time
-void EventDispatcher::AddExistedFileEvents(const char* path, int wd) {
+void EventDispatcher::AddExistedFileEvents(const string& path, int wd) {
     fsutil::Dir dir(path);
     if (!dir.Open()) {
         auto err = GetErrno();
@@ -624,7 +616,7 @@ void EventDispatcher::AddExistedCheckPointFileEvents() {
     // Because they are not in v1 checkpoint manager, no need to delete them.
     auto exactlyOnceConfigs = FileServer::GetInstance()->GetExactlyOnceConfigs();
     if (!exactlyOnceConfigs.empty()) {
-        static auto sCptMV2 = CheckpointManagerV2::GetInstance();
+        static auto* sCptMV2 = CheckpointManagerV2::GetInstance();
         auto exactlyOnceCpts = sCptMV2->ScanCheckpoints(exactlyOnceConfigs);
         LOG_INFO(sLogger,
                  ("start add exactly once checkpoint events",
@@ -693,14 +685,13 @@ void EventDispatcher::AddExistedCheckPointFileEvents() {
     }
 }
 
-bool EventDispatcher::AddTimeoutWatch(const char* path) {
+bool EventDispatcher::AddTimeoutWatch(const string& path) {
     MapType<string, int>::Type::iterator itr = mPathWdMap.find(path);
     if (itr != mPathWdMap.end()) {
         mWdUpdateTimeMap[itr->second] = time(NULL);
         return true;
-    } else {
-        return false;
     }
+    return false;
 }
 
 void EventDispatcher::AddOneToOneMapEntry(DirInfo* dirInfo, int wd) {
@@ -817,11 +808,11 @@ void EventDispatcher::UnregisterAllDir(const string& baseDir) {
     LOG_DEBUG(sLogger, ("Remove all sub dir", baseDir));
     auto subDirAndHandlers = FindAllSubDirAndHandler(baseDir);
     for (auto& subDirAndHandler : subDirAndHandlers) {
-        mTimeoutHandler->Handle(Event(subDirAndHandler.first.c_str(), "", 0, 0));
+        mTimeoutHandler->Handle(Event(subDirAndHandler.first, "", 0, 0));
     }
 }
 
-void EventDispatcher::UnregisterEventHandler(const char* path) {
+void EventDispatcher::UnregisterEventHandler(const string& path) {
     MapType<string, int>::Type::iterator pos = mPathWdMap.find(path);
     if (pos == mPathWdMap.end())
         return;
@@ -836,7 +827,7 @@ void EventDispatcher::UnregisterEventHandler(const char* path) {
     }
     RemoveOneToOneMapEntry(wd);
     mWdUpdateTimeMap.erase(wd);
-    if (mEventListener->IsValidID(wd) && mEventListener->IsInit()) {
+    if (EventListener::IsValidID(wd) && mEventListener->IsInit()) {
         mEventListener->RemoveWatch(wd);
         mInotifyWatchNum--;
     }
@@ -848,7 +839,7 @@ void EventDispatcher::StopAllDir(const string& baseDir) {
     LOG_DEBUG(sLogger, ("Stop all sub dir", baseDir));
     auto subDirAndHandlers = FindAllSubDirAndHandler(baseDir);
     for (auto& subDirAndHandler : subDirAndHandlers) {
-        Event e(subDirAndHandler.first.c_str(), "", EVENT_ISDIR | EVENT_CONTAINER_STOPPED, -1, 0);
+        Event e(subDirAndHandler.first, "", EVENT_ISDIR | EVENT_CONTAINER_STOPPED, -1, 0);
         subDirAndHandler.second->Handle(e);
     }
 }
@@ -869,7 +860,7 @@ DirRegisterStatus EventDispatcher::IsDirRegistered(const string& path) {
     return PATH_INODE_NOT_REGISTERED;
 }
 
-bool EventDispatcher::IsRegistered(const char* path) {
+bool EventDispatcher::IsRegistered(const std::string& path) {
     MapType<string, int>::Type::iterator itr = mPathWdMap.find(path);
     if (itr == mPathWdMap.end())
         return false;
@@ -895,7 +886,7 @@ void EventDispatcher::HandleTimeout() {
     time_t curTime = time(NULL);
     MapType<int, time_t>::Type::iterator itr = mWdUpdateTimeMap.begin();
     for (; itr != mWdUpdateTimeMap.end(); ++itr) {
-        if (curTime - (itr->second) >= INT32_FLAG(timeout_interval)) {
+        if (curTime - (itr->second) > INT32_FLAG(timeout_interval)) {
             // add to vector then batch process to avoid possible iterator change problem
             // mHandler may remove what itr points to, thus change the layout of the map container
             // what follows may not work
@@ -916,33 +907,30 @@ void EventDispatcher::HandleTimeout() {
     }
 }
 
-void EventDispatcher::PropagateTimeout(const char* path) {
-    char* tmp = strdup(path);
-    MapType<string, int>::Type::iterator pathpos = mPathWdMap.find(tmp);
+void EventDispatcher::PropagateTimeout(const std::string& path) {
+    auto pathpos = mPathWdMap.find(path);
     if (pathpos == mPathWdMap.end()) {
         // walkarond of bug#5760293, should find the scenarios
-        AlarmManager::GetInstance()->SendAlarm(
-            INVALID_MEMORY_ACCESS_ALARM, "PropagateTimeout access invalid key of mPathWdMap, path : " + string(tmp));
-        LOG_ERROR(sLogger, ("PropagateTimeout access invalid key of mPathWdMap, path", string(tmp)));
-        free(tmp);
+        AlarmManager::GetInstance()->SendAlarm(INVALID_MEMORY_ACCESS_ALARM,
+                                               "PropagateTimeout access invalid key of mPathWdMap, path : " + path);
+        LOG_ERROR(sLogger, ("PropagateTimeout access invalid key of mPathWdMap, path", path));
         return;
     }
-    MapType<int, time_t>::Type::iterator pos = mWdUpdateTimeMap.find(pathpos->second);
-    char* slashpos;
-    time_t curTime = time(NULL);
+    string tmp(path);
+    auto pos = mWdUpdateTimeMap.find(pathpos->second);
+    time_t curTime = time(nullptr);
     while (pos != mWdUpdateTimeMap.end()) {
         pos->second = curTime;
-        slashpos = strrchr(tmp, '/');
-        if (slashpos == NULL)
+        auto slashpos = tmp.rfind('/');
+        if (slashpos == string::npos)
             break;
-        *slashpos = '\0';
+        tmp.resize(slashpos);
         pathpos = mPathWdMap.find(tmp);
         if (pathpos != mPathWdMap.end())
             pos = mWdUpdateTimeMap.find(pathpos->second);
         else
             break;
     }
-    free(tmp);
 }
 
 void EventDispatcher::StartTimeCount() {
@@ -968,7 +956,7 @@ void EventDispatcher::DumpAllHandlersMeta(bool remove) {
         int wd = timeout[i];
         string path = mWdDirInfoMap[wd]->mPath;
         if (remove) {
-            UnregisterEventHandler(path.c_str());
+            UnregisterEventHandler(path);
             ConfigManager::GetInstance()->RemoveHandler(path, false);
             if (ConfigManager::GetInstance()->FindBestMatch(path).first == NULL) {
                 continue;
@@ -984,26 +972,29 @@ void EventDispatcher::ProcessHandlerTimeOut() {
     for (; mapIter != mWdDirInfoMap.end(); ++mapIter) {
         mapIter->second->mHandler->HandleTimeOut();
     }
-    return;
 }
 
 void EventDispatcher::DumpCheckPointPeriod(int32_t curTime) {
     if (CheckPointManager::Instance()->NeedDump(curTime)) {
-        LOG_INFO(sLogger, ("checkpoint dump", "starts"));
-        FileServer::GetInstance()->Pause(false);
-        DumpAllHandlersMeta(false);
-
-        if (!(CheckPointManager::Instance()->DumpCheckPointToLocal()))
-            LOG_WARNING(sLogger, ("dump checkpoint to local", "failed"));
-        else
-            LOG_DEBUG(sLogger, ("dump checkpoint to local", "succeeded"));
-        // after save checkpoint, we should clear all checkpoint
-        CheckPointManager::Instance()->RemoveAllCheckPoint();
-        FileServer::GetInstance()->Resume(false);
-        LOG_INFO(sLogger, ("checkpoint dump", "succeeded"));
+        DumpCheckPoint();
     }
 }
 
+void EventDispatcher::DumpCheckPoint() {
+    LOG_INFO(sLogger, ("checkpoint dump", "starts"));
+    FileServer::GetInstance()->Pause(false);
+    DumpAllHandlersMeta(false);
+
+    if (!(CheckPointManager::Instance()->DumpCheckPointToLocal()))
+        LOG_WARNING(sLogger, ("dump checkpoint to local", "failed"));
+    else
+        LOG_DEBUG(sLogger, ("dump checkpoint to local", "succeeded"));
+    // after save checkpoint, we should clear all checkpoint
+    CheckPointManager::Instance()->RemoveAllCheckPoint();
+    FileServer::GetInstance()->Resume(false);
+    LOG_INFO(sLogger, ("checkpoint dump", "succeeded"));
+}
+
 bool EventDispatcher::IsAllFileRead() {
     for (auto it = mWdDirInfoMap.begin(); it != mWdDirInfoMap.end(); ++it) {
         if (!((it->second)->mHandler)->IsAllFileRead()) {
diff --git a/core/file_server/EventDispatcher.h b/core/file_server/EventDispatcher.h
index d3162720a4..01483e9727 100644
--- a/core/file_server/EventDispatcher.h
+++ b/core/file_server/EventDispatcher.h
@@ -17,24 +17,25 @@
 #pragma once
 #include <sys/types.h>
 #if defined(__linux__)
-#include <sys/socket.h>
-#include <netinet/in.h>
 #include <arpa/inet.h>
 #include <netdb.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
 #include <unistd.h>
 #endif
 #include <stddef.h>
 #include <time.h>
+
+#include <set>
 #include <string>
 #include <unordered_map>
 #include <unordered_set>
-#include <set>
-#include "monitor/LogFileProfiler.h"
-#include "file_server/polling/PollingModify.h"
-#include "file_server/polling/PollingDirFile.h"
-#include "file_server/event_listener/EventListener.h"
+
 #include "checkpoint/CheckPointManager.h"
 #include "file_server/FileDiscoveryOptions.h"
+#include "file_server/event_listener/EventListener.h"
+#include "file_server/polling/PollingDirFile.h"
+#include "file_server/polling/PollingModify.h"
 namespace logtail {
 
 class TimeoutHandler;
@@ -99,7 +100,7 @@ class EventDispatcher {
      *
      * @return true on success; on error false is returned
      */
-    bool RegisterEventHandler(const char* path, const FileDiscoveryConfig&, EventHandler*& handler);
+    bool RegisterEventHandler(const std::string& path, const FileDiscoveryConfig&, EventHandler*& handler);
 
     /** Unregister handler for path; If no handler registered for path, do nothing but return.
      * After this call, no event watched on this path any more.
@@ -109,7 +110,7 @@ class EventDispatcher {
      * @param path for whom event handler will be removed.
      */
     // TODO see whether report errors
-    void UnregisterEventHandler(const char* path);
+    void UnregisterEventHandler(const std::string& path);
 
     /** Close handlers for path; If no handler registered for path, do nothing but return.
      *
@@ -134,7 +135,7 @@ class EventDispatcher {
      *
      * @return true if registered, false if not
      */
-    bool IsRegistered(const char* path);
+    bool IsRegistered(const std::string& path);
 
     /** Test whether a directory is registered.
      *
@@ -184,11 +185,11 @@ class EventDispatcher {
     //  * @return true on success; false on failure
     //  */
     // bool Dispatch();
-// #if defined(_MSC_VER)
-//     virtual void InitWindowsSignalObject() {}
-//     virtual void SyncWindowsSignalObject() {}
-//     virtual void ReleaseWindowsSignalObject() {}
-// #endif
+    // #if defined(_MSC_VER)
+    //     virtual void InitWindowsSignalObject() {}
+    //     virtual void SyncWindowsSignalObject() {}
+    //     virtual void ReleaseWindowsSignalObject() {}
+    // #endif
     // #if defined(__linux__)
     //     virtual void InitShennong() = 0;
     //     virtual void CheckShennong() = 0;
@@ -203,9 +204,10 @@ class EventDispatcher {
     void CheckSymbolicLink();
 
     void DumpCheckPointPeriod(int32_t curTime);
+    void DumpCheckPoint();
 
     void StartTimeCount();
-    void PropagateTimeout(const char* path);
+    void PropagateTimeout(const std::string& path);
     void HandleTimeout();
 
     void ReadInotifyEvents(std::vector<Event*>& eventVec);
@@ -230,8 +232,8 @@ class EventDispatcher {
     /**
      * @return true on success; false if path isn't registered by RegisterEventHandler.
      */
-    bool AddTimeoutWatch(const char* path);
-    void AddExistedFileEvents(const char* path, int wd);
+    bool AddTimeoutWatch(const std::string& path);
+    void AddExistedFileEvents(const std::string& path, int wd);
 
     enum class ValidateCheckpointResult {
         kNormal,
diff --git a/core/file_server/FileDiscoveryOptions.cpp b/core/file_server/FileDiscoveryOptions.cpp
index 7aeda2786d..ad47349f7f 100644
--- a/core/file_server/FileDiscoveryOptions.cpp
+++ b/core/file_server/FileDiscoveryOptions.cpp
@@ -575,7 +575,7 @@ bool FileDiscoveryOptions::IsWildcardPathMatch(const string& path, const string&
 
 // XXX: assume path is a subdir under mBasePath
 bool FileDiscoveryOptions::IsTimeout(const string& path) const {
-    if (mPreservedDirDepth < 0 || mWildcardPaths.size() > 0)
+    if (mPreservedDirDepth < 0)
         return false;
 
     // we do not check if (path.find(mBasePath) == 0)
diff --git a/core/file_server/FileServer.cpp b/core/file_server/FileServer.cpp
index 511f202e9a..fc04b83a38 100644
--- a/core/file_server/FileServer.cpp
+++ b/core/file_server/FileServer.cpp
@@ -42,8 +42,17 @@ FileServer::FileServer() {
 void FileServer::Start() {
     ConfigManager::GetInstance()->LoadDockerConfig();
     CheckPointManager::Instance()->LoadCheckPoint();
+    LOG_INFO(sLogger, ("watch dirs", "start"));
+    auto start = GetCurrentTimeInMilliSeconds();
     ConfigManager::GetInstance()->RegisterHandlers();
-    LOG_INFO(sLogger, ("watch dirs", "succeeded"));
+    auto costMs = GetCurrentTimeInMilliSeconds() - start;
+    if (costMs >= 60 * 1000) {
+        AlarmManager::GetInstance()->SendAlarm(REGISTER_HANDLERS_TOO_SLOW_ALARM,
+                                               "Registering handlers took " + ToString(costMs) + " ms");
+        LOG_WARNING(sLogger, ("watch dirs", "succeeded")("costMs", costMs));
+    } else {
+        LOG_INFO(sLogger, ("watch dirs", "succeeded")("costMs", costMs));
+    }
     EventDispatcher::GetInstance()->AddExistedCheckPointFileEvents();
     // the dump time must be reset after dir registration, since it may take long on NFS.
     CheckPointManager::Instance()->ResetLastDumpTime();
diff --git a/core/file_server/event_handler/EventHandler.cpp b/core/file_server/event_handler/EventHandler.cpp
index 44c4fa662f..1933e2c748 100644
--- a/core/file_server/event_handler/EventHandler.cpp
+++ b/core/file_server/event_handler/EventHandler.cpp
@@ -119,7 +119,7 @@ void NormalEventHandler::Handle(const Event& event) {
                           "max depth", config.first->mMaxDirSearchDepth));
             EventHandler* newHandler = new CreateModifyHandler(mCreateHandlerPtr);
             EventHandler* handler = newHandler;
-            if (EventDispatcher::GetInstance()->RegisterEventHandler(path.c_str(), config, handler)) {
+            if (EventDispatcher::GetInstance()->RegisterEventHandler(path, config, handler)) {
                 if (handler != newHandler)
                     delete newHandler;
                 else
@@ -157,11 +157,13 @@ void CreateHandler::Handle(const Event& event) {
     if (!config.first)
         return;
     else if (event.IsDir())
-        ConfigManager::GetInstance()->RegisterHandlersRecursively(path, config, false);
+        ConfigManager::GetInstance()->RegisterHandlers(path, config);
     else {
         // symbolic link
-        if (EventDispatcher::GetInstance()->IsDirRegistered(path) == PATH_INODE_NOT_REGISTERED)
+        if (EventDispatcher::GetInstance()->IsDirRegistered(path) == PATH_INODE_NOT_REGISTERED) {
+            // TODO: why not use RegisterHandlers
             ConfigManager::GetInstance()->RegisterHandlersRecursively(path, config, true);
+        }
     }
 }
 
@@ -173,8 +175,9 @@ void CreateHandler::HandleTimeOut() {
 // TimeoutHandler implementation
 void TimeoutHandler::Handle(const Event& ev) {
     const string& dir = ev.GetSource();
-    EventDispatcher::GetInstance()->UnregisterEventHandler(dir.c_str());
+    EventDispatcher::GetInstance()->UnregisterEventHandler(dir);
     ConfigManager::GetInstance()->RemoveHandler(dir);
+    CheckPointManager::Instance()->DeleteDirCheckPoint(dir);
 }
 
 
@@ -769,7 +772,7 @@ void ModifyHandler::Handle(const Event& event) {
                     reader->GetQueueKey(), mConfigName, event, reader->GetDevInode(), curTime);
                 return;
             }
-            unique_ptr<LogBuffer> logBuffer(new LogBuffer);
+            auto logBuffer = make_unique<LogBuffer>();
             hasMoreData = reader->ReadLog(*logBuffer, &event);
             int32_t pushRetry = PushLogToProcessor(reader, logBuffer.get());
             if (!hasMoreData) {
@@ -1061,28 +1064,16 @@ void ModifyHandler::DeleteRollbackReader() {
 }
 
 void ModifyHandler::ForceReadLogAndPush(LogFileReaderPtr reader) {
-    LogBuffer* logBuffer = new LogBuffer;
+    auto logBuffer = make_unique<LogBuffer>();
     auto pEvent = reader->CreateFlushTimeoutEvent();
     reader->ReadLog(*logBuffer, pEvent.get());
-    PushLogToProcessor(reader, logBuffer);
+    PushLogToProcessor(reader, logBuffer.get());
 }
 
 int32_t ModifyHandler::PushLogToProcessor(LogFileReaderPtr reader, LogBuffer* logBuffer) {
     int32_t pushRetry = 0;
     if (!logBuffer->rawBuffer.empty()) {
         reader->ReportMetrics(logBuffer->readLength);
-        LogFileProfiler::GetInstance()->AddProfilingReadBytes(reader->GetConfigName(),
-                                                              reader->GetRegion(),
-                                                              reader->GetProject(),
-                                                              reader->GetLogstore(),
-                                                              reader->GetConvertedPath(),
-                                                              reader->GetHostLogPath(),
-                                                              reader->GetExtraTags(),
-                                                              reader->GetDevInode().dev,
-                                                              reader->GetDevInode().inode,
-                                                              reader->GetFileSize(),
-                                                              reader->GetLastFilePos(),
-                                                              time(NULL));
         PipelineEventGroup group = LogFileReader::GenerateEventGroup(reader, logBuffer);
 
         while (!ProcessorRunner::GetInstance()->PushQueue(reader->GetQueueKey(), 0, std::move(group))) // 10ms
diff --git a/core/file_server/event_handler/LogInput.cpp b/core/file_server/event_handler/LogInput.cpp
index aef3726614..6765717922 100644
--- a/core/file_server/event_handler/LogInput.cpp
+++ b/core/file_server/event_handler/LogInput.cpp
@@ -48,6 +48,7 @@ using namespace std;
 
 DEFINE_FLAG_INT32(check_symbolic_link_interval, "seconds", 120);
 DEFINE_FLAG_INT32(check_base_dir_interval, "seconds", 60);
+DEFINE_FLAG_INT32(check_timeout_interval, "seconds", 600);
 DEFINE_FLAG_INT32(log_input_thread_wait_interval, "microseconds", 20 * 1000);
 DEFINE_FLAG_INT64(read_fs_events_interval, "microseconds", 20 * 1000);
 DEFINE_FLAG_INT32(check_handler_timeout_interval, "seconds", 180);
@@ -95,7 +96,7 @@ void LogInput::Start() {
     mEnableFileIncludedByMultiConfigs = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(
         METRIC_RUNNER_FILE_ENABLE_FILE_INCLUDED_BY_MULTI_CONFIGS_FLAG);
 
-    new Thread([this]() { ProcessLoop(); });
+    mThreadRes = async(launch::async, &LogInput::ProcessLoop, this);
 }
 
 void LogInput::Resume() {
@@ -106,15 +107,20 @@ void LogInput::Resume() {
 }
 
 void LogInput::HoldOn() {
-    LOG_INFO(sLogger, ("event handle daemon pause", "starts"));
-    if (BOOL_FLAG(enable_full_drain_mode) && Application::GetInstance()->IsExiting()) {
+    if (Application::GetInstance()->IsExiting()) {
+        LOG_INFO(sLogger, ("input event handle daemon", "stop starts"));
         unique_lock<mutex> lock(mThreadRunningMux);
-        mStopCV.wait(lock, [this]() { return mInteruptFlag; });
+        if (!mThreadRes.valid()) {
+            return;
+        }
+        mThreadRes.wait(); // should we set a timeout here? what it network outrage for an hour?
+        LOG_INFO(sLogger, ("input event handle daemon", "stopped successfully"));
     } else {
+        LOG_INFO(sLogger, ("input event handle daemon pause", "starts"));
         mInteruptFlag = true;
         mAccessMainThreadRWL.lock();
+        LOG_INFO(sLogger, ("input event handle daemon pause", "succeeded"));
     }
-    LOG_INFO(sLogger, ("event handle daemon pause", "succeeded"));
 }
 
 void LogInput::TryReadEvents(bool forceRead) {
@@ -361,14 +367,14 @@ void LogInput::UpdateCriticalMetric(int32_t curTime) {
     mEventProcessCount = 0;
 }
 
-void* LogInput::ProcessLoop() {
+void LogInput::ProcessLoop() {
     LOG_INFO(sLogger, ("event handle daemon", "started"));
     EventDispatcher* dispatcher = EventDispatcher::GetInstance();
     dispatcher->StartTimeCount();
     int32_t prevTime = time(NULL);
     mLastReadEventTime = prevTime;
     int32_t curTime = prevTime;
-    srand(prevTime);
+    srand(0); // avoid random failures in unit tests
     int32_t lastCheckDir = prevTime - rand() % 60;
     int32_t lastCheckSymbolicLink = prevTime - rand() % 60;
     time_t lastCheckHandlerTimeOut = prevTime - rand() % 60;
@@ -428,7 +434,7 @@ void* LogInput::ProcessLoop() {
             lastCheckSymbolicLink = 0;
         }
 
-        if (curTime - prevTime >= INT32_FLAG(timeout_interval)) {
+        if (curTime - prevTime >= INT32_FLAG(check_timeout_interval)) {
             dispatcher->HandleTimeout();
             prevTime = curTime;
         }
@@ -462,19 +468,13 @@ void* LogInput::ProcessLoop() {
             lastClearConfigCache = curTime;
         }
 
-        if (BOOL_FLAG(enable_full_drain_mode) && Application::GetInstance()->IsExiting()
-            && EventDispatcher::GetInstance()->IsAllFileRead()) {
+        if (Application::GetInstance()->IsExiting()
+            && (!BOOL_FLAG(enable_full_drain_mode) || EventDispatcher::GetInstance()->IsAllFileRead())) {
             break;
         }
     }
 
     mInteruptFlag = true;
-    mStopCV.notify_one();
-
-    if (!BOOL_FLAG(enable_full_drain_mode)) {
-        LOG_WARNING(sLogger, ("LogInputThread", "Exit"));
-    }
-    return NULL;
 }
 
 void LogInput::PushEventQueue(std::vector<Event*>& eventVec) {
diff --git a/core/file_server/event_handler/LogInput.h b/core/file_server/event_handler/LogInput.h
index aadeb17082..ec294f00da 100644
--- a/core/file_server/event_handler/LogInput.h
+++ b/core/file_server/event_handler/LogInput.h
@@ -65,7 +65,7 @@ class LogInput : public LogRunnable {
 private:
     LogInput();
     ~LogInput();
-    void* ProcessLoop();
+    void ProcessLoop();
     void ProcessEvent(EventDispatcher* dispatcher, Event* ev);
     Event* PopEventQueue();
     void UpdateCriticalMetric(int32_t curTime);
@@ -88,8 +88,8 @@ class LogInput : public LogRunnable {
     IntGaugePtr mEnableFileIncludedByMultiConfigs;
 
     std::atomic_int mLastReadEventTime{0};
+    std::future<void> mThreadRes;
     mutable std::mutex mThreadRunningMux;
-    mutable std::condition_variable mStopCV;
 
     mutable std::mutex mFeedbackMux;
     mutable std::condition_variable mFeedbackCV;
diff --git a/core/file_server/event_listener/EventListener_Linux.cpp b/core/file_server/event_listener/EventListener_Linux.cpp
index 634fc27a40..ae07e7223e 100644
--- a/core/file_server/event_listener/EventListener_Linux.cpp
+++ b/core/file_server/event_listener/EventListener_Linux.cpp
@@ -41,7 +41,6 @@ bool logtail::EventListener::Init() {
 
 int logtail::EventListener::AddWatch(const char* dir) {
     return inotify_add_watch(mInotifyFd, dir, mWatchEventMask);
-    ;
 }
 
 bool logtail::EventListener::RemoveWatch(int wd) {
diff --git a/core/file_server/polling/PollingCache.h b/core/file_server/polling/PollingCache.h
index 575eba7303..ea3ed8c38a 100644
--- a/core/file_server/polling/PollingCache.h
+++ b/core/file_server/polling/PollingCache.h
@@ -32,6 +32,10 @@ struct DirFileCache {
     void SetConfigMatched(bool configMatched) { mConfigMatched = configMatched; }
     bool HasMatchedConfig() const { return mConfigMatched; }
 
+
+    void SetExceedPreservedDirDepth(bool exceed) { mExceedPreservedDirDepth = exceed; }
+    bool GetExceedPreservedDirDepth() const { return mExceedPreservedDirDepth; }
+
     void SetCheckRound(uint64_t curRound) { mLastCheckRound = curRound; }
     uint64_t GetLastCheckRound() const { return mLastCheckRound; }
 
@@ -51,6 +55,7 @@ struct DirFileCache {
     int32_t mLastEventTime = 0;
 
     bool mConfigMatched = false;
+    bool mExceedPreservedDirDepth = false;
     uint64_t mLastCheckRound = 0;
     // Last modified time on filesystem in nanoseconds.
     int64_t mLastModifyTime = 0;
diff --git a/core/file_server/polling/PollingDirFile.cpp b/core/file_server/polling/PollingDirFile.cpp
index 92deb2cad3..0b106605ff 100644
--- a/core/file_server/polling/PollingDirFile.cpp
+++ b/core/file_server/polling/PollingDirFile.cpp
@@ -130,133 +130,135 @@ void PollingDirFile::Polling() {
     LOG_INFO(sLogger, ("polling discovery", "started"));
     mHoldOnFlag = false;
     while (mRuningFlag) {
-        LOG_DEBUG(sLogger, ("start dir file polling, mCurrentRound", mCurrentRound));
-        {
-            PTScopedLock thradLock(mPollingThreadLock);
-            mStatCount = 0;
-            mNewFileVec.clear();
-            ++mCurrentRound;
-
-            // Get a copy of config list from ConfigManager.
-            // PollingDirFile has to be held on at first because raw pointers are used here.
-            vector<FileDiscoveryConfig> sortedConfigs;
-            vector<FileDiscoveryConfig> wildcardConfigs;
-            auto nameConfigMap = FileServer::GetInstance()->GetAllFileDiscoveryConfigs();
-            for (auto itr = nameConfigMap.begin(); itr != nameConfigMap.end(); ++itr) {
-                if (itr->second.first->GetWildcardPaths().empty())
-                    sortedConfigs.push_back(itr->second);
-                else
-                    wildcardConfigs.push_back(itr->second);
-            }
-            sort(sortedConfigs.begin(), sortedConfigs.end(), FileDiscoveryOptions::CompareByPathLength);
-
-            size_t configTotal = nameConfigMap.size();
-            LogtailMonitor::GetInstance()->UpdateMetric("config_count", configTotal);
-            LoongCollectorMonitor::GetInstance()->SetAgentConfigTotal(configTotal);
-            {
-                ScopedSpinLock lock(mCacheLock);
-                size_t pollingDirCacheSize = mDirCacheMap.size();
-                LogtailMonitor::GetInstance()->UpdateMetric("polling_dir_cache", pollingDirCacheSize);
-                mPollingDirCacheSize->Set(pollingDirCacheSize);
-                size_t pollingFileCacheSize = mFileCacheMap.size();
-                LogtailMonitor::GetInstance()->UpdateMetric("polling_file_cache", pollingFileCacheSize);
-                mPollingFileCacheSize->Set(pollingFileCacheSize);
-            }
+        PollingIteration();
 
-            // Iterate all normal configs, make sure stat count will not exceed limit.
-            for (auto itr = sortedConfigs.begin();
-                 itr != sortedConfigs.end() && mStatCount <= INT32_FLAG(polling_max_stat_count);
-                 ++itr) {
-                if (!mRuningFlag || mHoldOnFlag)
-                    break;
-
-                const FileDiscoveryOptions* config = itr->first;
-                const PipelineContext* ctx = itr->second;
-                if (!config->IsContainerDiscoveryEnabled()) {
-                    fsutil::PathStat baseDirStat;
-                    if (!fsutil::PathStat::stat(config->GetBasePath(), baseDirStat)) {
-                        LOG_DEBUG(sLogger,
-                                  ("get base dir info error: ", config->GetBasePath())(ctx->GetProjectName(),
-                                                                                       ctx->GetLogstoreName()));
-                        continue;
-                    }
+        // Sleep for a while, by default, 5s on Linux, 1s on Windows.
+        for (int i = 0; i < 10 && mRuningFlag; ++i) {
+            usleep(INT32_FLAG(dirfile_check_interval_ms) * 100);
+        }
+    }
+    LOG_DEBUG(sLogger, ("dir file polling thread done", ""));
+}
 
-                    int32_t lastConfigStatCount = mStatCount;
-                    if (!PollingNormalConfigPath(*itr, config->GetBasePath(), string(), baseDirStat, 0)) {
-                        LOG_DEBUG(sLogger,
-                                  ("logPath in config not exist", config->GetBasePath())(ctx->GetProjectName(),
-                                                                                         ctx->GetLogstoreName()));
-                    }
-                    CheckConfigPollingStatCount(lastConfigStatCount, *itr, false);
-                } else {
-                    for (size_t i = 0; i < config->GetContainerInfo()->size(); ++i) {
-                        const string& basePath = (*config->GetContainerInfo())[i].mRealBaseDir;
-                        fsutil::PathStat baseDirStat;
-                        if (!fsutil::PathStat::stat(basePath.c_str(), baseDirStat)) {
-                            LOG_DEBUG(sLogger,
-                                      ("get docker base dir info error: ", basePath)(ctx->GetProjectName(),
-                                                                                     ctx->GetLogstoreName()));
-                            continue;
-                        }
-                        int32_t lastConfigStatCount = mStatCount;
-                        if (!PollingNormalConfigPath(*itr, basePath, string(), baseDirStat, 0)) {
-                            LOG_DEBUG(sLogger,
-                                      ("docker logPath in config not exist", basePath)(ctx->GetProjectName(),
-                                                                                       ctx->GetLogstoreName()));
-                        }
-                        CheckConfigPollingStatCount(lastConfigStatCount, *itr, true);
-                    }
-                }
+void PollingDirFile::PollingIteration() {
+    LOG_DEBUG(sLogger, ("start dir file polling, mCurrentRound", mCurrentRound));
+    PTScopedLock thradLock(mPollingThreadLock);
+    mStatCount = 0;
+    mNewFileVec.clear();
+    ++mCurrentRound;
+
+    // Get a copy of config list from ConfigManager.
+    // PollingDirFile has to be held on at first because raw pointers are used here.
+    vector<FileDiscoveryConfig> sortedConfigs;
+    vector<FileDiscoveryConfig> wildcardConfigs;
+    auto nameConfigMap = FileServer::GetInstance()->GetAllFileDiscoveryConfigs();
+    for (auto itr = nameConfigMap.begin(); itr != nameConfigMap.end(); ++itr) {
+        if (itr->second.first->GetWildcardPaths().empty())
+            sortedConfigs.push_back(itr->second);
+        else
+            wildcardConfigs.push_back(itr->second);
+    }
+    sort(sortedConfigs.begin(), sortedConfigs.end(), FileDiscoveryOptions::CompareByPathLength);
+
+    size_t configTotal = nameConfigMap.size();
+    LogtailMonitor::GetInstance()->UpdateMetric("config_count", configTotal);
+    LoongCollectorMonitor::GetInstance()->SetAgentConfigTotal(configTotal);
+    {
+        ScopedSpinLock lock(mCacheLock);
+        size_t pollingDirCacheSize = mDirCacheMap.size();
+        LogtailMonitor::GetInstance()->UpdateMetric("polling_dir_cache", pollingDirCacheSize);
+        mPollingDirCacheSize->Set(pollingDirCacheSize);
+        size_t pollingFileCacheSize = mFileCacheMap.size();
+        LogtailMonitor::GetInstance()->UpdateMetric("polling_file_cache", pollingFileCacheSize);
+        mPollingFileCacheSize->Set(pollingFileCacheSize);
+    }
+
+    // Iterate all normal configs, make sure stat count will not exceed limit.
+    for (auto itr = sortedConfigs.begin();
+         itr != sortedConfigs.end() && mStatCount <= INT32_FLAG(polling_max_stat_count);
+         ++itr) {
+        if (!mRuningFlag || mHoldOnFlag)
+            break;
+
+        const FileDiscoveryOptions* config = itr->first;
+        const PipelineContext* ctx = itr->second;
+        if (!config->IsContainerDiscoveryEnabled()) {
+            fsutil::PathStat baseDirStat;
+            if (!fsutil::PathStat::stat(config->GetBasePath(), baseDirStat)) {
+                LOG_DEBUG(sLogger,
+                          ("get base dir info error: ", config->GetBasePath())(ctx->GetProjectName(),
+                                                                               ctx->GetLogstoreName()));
+                continue;
             }
 
-            // Iterate all wildcard configs, make sure stat count will not exceed limit.
-            for (auto itr = wildcardConfigs.begin();
-                 itr != wildcardConfigs.end() && mStatCount <= INT32_FLAG(polling_max_stat_count);
-                 ++itr) {
-                if (!mRuningFlag || mHoldOnFlag)
-                    break;
-
-                const FileDiscoveryOptions* config = itr->first;
-                const PipelineContext* ctx = itr->second;
-                if (!config->IsContainerDiscoveryEnabled()) {
-                    int32_t lastConfigStatCount = mStatCount;
-                    if (!PollingWildcardConfigPath(*itr, config->GetWildcardPaths()[0], 0)) {
-                        LOG_DEBUG(sLogger,
-                                  ("can not find matched path in config, Wildcard begin logPath",
-                                   config->GetBasePath())(ctx->GetProjectName(), ctx->GetLogstoreName()));
-                    }
-                    CheckConfigPollingStatCount(lastConfigStatCount, *itr, false);
-                } else {
-                    for (size_t i = 0; i < config->GetContainerInfo()->size(); ++i) {
-                        const string& baseWildcardPath = (*config->GetContainerInfo())[i].mRealBaseDir;
-                        int32_t lastConfigStatCount = mStatCount;
-                        if (!PollingWildcardConfigPath(*itr, baseWildcardPath, 0)) {
-                            LOG_DEBUG(sLogger,
-                                      ("can not find matched path in config, "
-                                       "Wildcard begin logPath ",
-                                       baseWildcardPath)(ctx->GetProjectName(), ctx->GetLogstoreName()));
-                        }
-                        CheckConfigPollingStatCount(lastConfigStatCount, *itr, true);
-                    }
+            int32_t lastConfigStatCount = mStatCount;
+            if (!PollingNormalConfigPath(*itr, config->GetBasePath(), string(), baseDirStat, 0)) {
+                LOG_DEBUG(sLogger,
+                          ("logPath in config not exist", config->GetBasePath())(ctx->GetProjectName(),
+                                                                                 ctx->GetLogstoreName()));
+            }
+            CheckConfigPollingStatCount(lastConfigStatCount, *itr, false);
+        } else {
+            for (size_t i = 0; i < config->GetContainerInfo()->size(); ++i) {
+                const string& basePath = (*config->GetContainerInfo())[i].mRealBaseDir;
+                fsutil::PathStat baseDirStat;
+                if (!fsutil::PathStat::stat(basePath, baseDirStat)) {
+                    LOG_DEBUG(
+                        sLogger,
+                        ("get docker base dir info error: ", basePath)(ctx->GetProjectName(), ctx->GetLogstoreName()));
+                    continue;
                 }
+                int32_t lastConfigStatCount = mStatCount;
+                if (!PollingNormalConfigPath(*itr, basePath, string(), baseDirStat, 0)) {
+                    LOG_DEBUG(sLogger,
+                              ("docker logPath in config not exist", basePath)(ctx->GetProjectName(),
+                                                                               ctx->GetLogstoreName()));
+                }
+                CheckConfigPollingStatCount(lastConfigStatCount, *itr, true);
             }
+        }
+    }
 
-            // Add collected new files to PollingModify.
-            PollingModify::GetInstance()->AddNewFile(mNewFileVec);
+    // Iterate all wildcard configs, make sure stat count will not exceed limit.
+    for (auto itr = wildcardConfigs.begin();
+         itr != wildcardConfigs.end() && mStatCount <= INT32_FLAG(polling_max_stat_count);
+         ++itr) {
+        if (!mRuningFlag || mHoldOnFlag)
+            break;
 
-            // Check cache, clear unavailable and overtime items.
-            if (mCurrentRound % INT32_FLAG(check_not_exist_file_dir_round) == 0) {
-                ClearUnavailableFileAndDir();
+        const FileDiscoveryOptions* config = itr->first;
+        const PipelineContext* ctx = itr->second;
+        if (!config->IsContainerDiscoveryEnabled()) {
+            int32_t lastConfigStatCount = mStatCount;
+            if (!PollingWildcardConfigPath(*itr, config->GetWildcardPaths()[0], 0)) {
+                LOG_DEBUG(sLogger,
+                          ("can not find matched path in config, Wildcard begin logPath",
+                           config->GetBasePath())(ctx->GetProjectName(), ctx->GetLogstoreName()));
+            }
+            CheckConfigPollingStatCount(lastConfigStatCount, *itr, false);
+        } else {
+            for (size_t i = 0; i < config->GetContainerInfo()->size(); ++i) {
+                const string& baseWildcardPath = (*config->GetContainerInfo())[i].mRealBaseDir;
+                int32_t lastConfigStatCount = mStatCount;
+                if (!PollingWildcardConfigPath(*itr, baseWildcardPath, 0)) {
+                    LOG_DEBUG(sLogger,
+                              ("can not find matched path in config, "
+                               "Wildcard begin logPath ",
+                               baseWildcardPath)(ctx->GetProjectName(), ctx->GetLogstoreName()));
+                }
+                CheckConfigPollingStatCount(lastConfigStatCount, *itr, true);
             }
-            ClearTimeoutFileAndDir();
         }
+    }
 
-        // Sleep for a while, by default, 5s on Linux, 1s on Windows.
-        for (int i = 0; i < 10 && mRuningFlag; ++i) {
-            usleep(INT32_FLAG(dirfile_check_interval_ms) * 100);
-        }
+    // Add collected new files to PollingModify.
+    PollingModify::GetInstance()->AddNewFile(mNewFileVec);
+
+    // Check cache, clear unavailable and overtime items.
+    if (mCurrentRound % INT32_FLAG(check_not_exist_file_dir_round) == 0) {
+        ClearUnavailableFileAndDir();
     }
-    LOG_DEBUG(sLogger, ("dir file polling thread done", ""));
+    ClearTimeoutFileAndDir();
 }
 
 // Last Modified Time (LMD) of directory changes when a file or a subdirectory is added,
@@ -265,6 +267,7 @@ void PollingDirFile::Polling() {
 // NOTE: So, we can not find changes in subdirectories of the directory according to LMD.
 bool PollingDirFile::CheckAndUpdateDirMatchCache(const string& dirPath,
                                                  const fsutil::PathStat& statBuf,
+                                                 bool exceedPreservedDirDepth,
                                                  bool& newFlag) {
     int64_t sec, nsec;
     statBuf.GetLastWriteTime(sec, nsec);
@@ -277,6 +280,7 @@ bool PollingDirFile::CheckAndUpdateDirMatchCache(const string& dirPath,
     if (iter == mDirCacheMap.end()) {
         DirFileCache& dirCache = mDirCacheMap[dirPath];
         dirCache.SetConfigMatched(true);
+        dirCache.SetExceedPreservedDirDepth(exceedPreservedDirDepth);
         dirCache.SetCheckRound(mCurrentRound);
         dirCache.SetLastModifyTime(modifyTime);
         // Directories found at round 1 or too old are considered as old data.
@@ -301,7 +305,8 @@ bool PollingDirFile::CheckAndUpdateDirMatchCache(const string& dirPath,
 bool PollingDirFile::CheckAndUpdateFileMatchCache(const string& fileDir,
                                                   const string& fileName,
                                                   const fsutil::PathStat& statBuf,
-                                                  bool needFindBestMatch) {
+                                                  bool needFindBestMatch,
+                                                  bool exceedPreservedDirDepth) {
     int64_t sec, nsec;
     statBuf.GetLastWriteTime(sec, nsec);
     int64_t modifyTime = NANO_CONVERTING * sec + nsec;
@@ -317,6 +322,7 @@ bool PollingDirFile::CheckAndUpdateFileMatchCache(const string& fileDir,
 
         DirFileCache& fileCache = mFileCacheMap[filePath];
         fileCache.SetConfigMatched(matchFlag);
+        fileCache.SetExceedPreservedDirDepth(exceedPreservedDirDepth);
         fileCache.SetCheckRound(mCurrentRound);
         fileCache.SetLastModifyTime(modifyTime);
 
@@ -357,10 +363,21 @@ bool PollingDirFile::PollingNormalConfigPath(const FileDiscoveryConfig& pConfig,
                                              const string& obj,
                                              const fsutil::PathStat& statBuf,
                                              int depth) {
-    if (pConfig.first->mMaxDirSearchDepth >= 0 && depth > pConfig.first->mMaxDirSearchDepth)
-        return false;
-    if (pConfig.first->mPreservedDirDepth >= 0 && depth > pConfig.first->mPreservedDirDepth)
+    if (pConfig.first->mMaxDirSearchDepth >= 0 && depth > pConfig.first->mMaxDirSearchDepth) {
         return false;
+    }
+    bool exceedPreservedDirDepth = false;
+    if (pConfig.first->mPreservedDirDepth >= 0 && depth > pConfig.first->mPreservedDirDepth) {
+        exceedPreservedDirDepth = true;
+        int64_t sec = 0;
+        int64_t nsec = 0;
+        statBuf.GetLastWriteTime(sec, nsec);
+        auto curTime = time(nullptr);
+        LOG_DEBUG(sLogger, ("PollingNormalConfigPath", srcPath + "/" + obj)("curTime", curTime)("writeTime", sec));
+        if (curTime - sec > INT32_FLAG(timeout_interval)) {
+            return false;
+        }
+    }
 
     string dirPath = obj.empty() ? srcPath : PathJoin(srcPath, obj);
     if (AppConfig::GetInstance()->IsHostPathMatchBlacklist(dirPath)) {
@@ -368,7 +385,7 @@ bool PollingDirFile::PollingNormalConfigPath(const FileDiscoveryConfig& pConfig,
         return false;
     }
     bool isNewDirectory = false;
-    if (!CheckAndUpdateDirMatchCache(dirPath, statBuf, isNewDirectory))
+    if (!CheckAndUpdateDirMatchCache(dirPath, statBuf, exceedPreservedDirDepth, isNewDirectory))
         return true;
     if (isNewDirectory) {
         PollingEventQueue::GetInstance()->PushEvent(new Event(srcPath, obj, EVENT_CREATE | EVENT_ISDIR, -1, 0));
@@ -471,7 +488,7 @@ bool PollingDirFile::PollingNormalConfigPath(const FileDiscoveryConfig& pConfig,
         if (buf.IsDir() && (!needCheckDirMatch || !pConfig.first->IsDirectoryInBlacklist(item))) {
             PollingNormalConfigPath(pConfig, dirPath, entName, buf, depth + 1);
         } else if (buf.IsRegFile()) {
-            if (CheckAndUpdateFileMatchCache(dirPath, entName, buf, needFindBestMatch)) {
+            if (CheckAndUpdateFileMatchCache(dirPath, entName, buf, needFindBestMatch, exceedPreservedDirDepth)) {
                 LOG_DEBUG(sLogger, ("add to modify event", entName)("round", mCurrentRound));
                 mNewFileVec.push_back(SplitedFilePath(dirPath, entName));
             }
@@ -639,12 +656,44 @@ void PollingDirFile::ClearTimeoutFileAndDir() {
     }
 
     // Collect deleted files, so that it can notify PollingModify later.
+    s_lastClearTime = curTime;
     std::vector<SplitedFilePath> deleteFileVec;
     {
         ScopedSpinLock lock(mCacheLock);
+        bool clearExceedPreservedDirDepth = false;
+        for (auto iter = mDirCacheMap.begin(); iter != mDirCacheMap.end();) {
+            if (iter->second.GetExceedPreservedDirDepth()
+                && (NANO_CONVERTING * curTime - iter->second.GetLastModifyTime())
+                    > NANO_CONVERTING * INT32_FLAG(timeout_interval)) {
+                iter = mDirCacheMap.erase(iter);
+                clearExceedPreservedDirDepth = true;
+            } else
+                ++iter;
+        }
+        if (clearExceedPreservedDirDepth) {
+            LOG_INFO(sLogger, ("After clear DirCache size", mDirCacheMap.size()));
+        }
+        clearExceedPreservedDirDepth = false;
+        for (auto iter = mFileCacheMap.begin(); iter != mFileCacheMap.end();) {
+            if (iter->second.GetExceedPreservedDirDepth()
+                && (NANO_CONVERTING * curTime - iter->second.GetLastModifyTime())
+                    > NANO_CONVERTING * INT32_FLAG(timeout_interval)) {
+                // If the file has been added to PollingModify, remove it here.
+                if (iter->second.HasMatchedConfig() && iter->second.HasEventFlag()) {
+                    deleteFileVec.push_back(SplitedFilePath(iter->first));
+                    LOG_INFO(sLogger, ("delete file cache", iter->first)("vec size", deleteFileVec.size()));
+                }
+                iter = mFileCacheMap.erase(iter);
+                clearExceedPreservedDirDepth = true;
+            } else
+                ++iter;
+        }
+        if (clearExceedPreservedDirDepth) {
+            LOG_INFO(sLogger, ("After clear FileCache size", mFileCacheMap.size()));
+        }
+
         if (mDirCacheMap.size() > (size_t)INT32_FLAG(polling_dir_upperlimit)) {
             LOG_INFO(sLogger, ("start clear dir cache", mDirCacheMap.size()));
-            s_lastClearTime = curTime;
             for (auto iter = mDirCacheMap.begin(); iter != mDirCacheMap.end();) {
                 if ((NANO_CONVERTING * curTime - iter->second.GetLastModifyTime())
                     > NANO_CONVERTING * INT32_FLAG(polling_dir_timeout)) {
@@ -652,12 +701,11 @@ void PollingDirFile::ClearTimeoutFileAndDir() {
                 } else
                     ++iter;
             }
-            LOG_INFO(sLogger, ("After clear", mDirCacheMap.size())("Cost time", time(NULL) - s_lastClearTime));
+            LOG_INFO(sLogger, ("After clear DirCache size", mDirCacheMap.size()));
         }
 
         if (mFileCacheMap.size() > (size_t)INT32_FLAG(polling_file_upperlimit)) {
             LOG_INFO(sLogger, ("start clear file cache", mFileCacheMap.size()));
-            s_lastClearTime = curTime;
             for (auto iter = mFileCacheMap.begin(); iter != mFileCacheMap.end();) {
                 if ((NANO_CONVERTING * curTime - iter->second.GetLastModifyTime())
                     > NANO_CONVERTING * INT32_FLAG(polling_file_timeout)) {
@@ -670,7 +718,7 @@ void PollingDirFile::ClearTimeoutFileAndDir() {
                 } else
                     ++iter;
             }
-            LOG_INFO(sLogger, ("After clear", mFileCacheMap.size())("Cost time", time(NULL) - s_lastClearTime));
+            LOG_INFO(sLogger, ("After clear FileCache size", mFileCacheMap.size()));
         }
     }
 
diff --git a/core/file_server/polling/PollingDirFile.h b/core/file_server/polling/PollingDirFile.h
index 4bf7fb8f52..8406bafdb7 100644
--- a/core/file_server/polling/PollingDirFile.h
+++ b/core/file_server/polling/PollingDirFile.h
@@ -58,6 +58,7 @@ class PollingDirFile : public LogRunnable {
     ~PollingDirFile();
 
     void Polling();
+    void PollingIteration();
 
     // PollingNormalConfigPath polls config with normal base path recursively.
     // @config: config to poll.
@@ -85,8 +86,10 @@ class PollingDirFile : public LogRunnable {
     // @newFlag: a boolean to indicate caller that it is a new directory, generate event for it.
     // @return a boolean to indicate should the directory be continued to poll.
     //   It will returns true always now (might change in future).
-    bool CheckAndUpdateDirMatchCache(const std::string& dirPath, const fsutil::PathStat& statBuf, bool& newFlag);
-
+    bool CheckAndUpdateDirMatchCache(const std::string& dirPath,
+                                     const fsutil::PathStat& statBuf,
+                                     bool exceedPreservedDirDepth,
+                                     bool& newFlag);
     // CheckAndUpdateFileMatchCache updates file cache (add if not existing).
     // @fileDir+@fileName: absolute path of the file.
     // @needFindBestMatch: false indicates that the file has already found the
@@ -96,7 +99,8 @@ class PollingDirFile : public LogRunnable {
     bool CheckAndUpdateFileMatchCache(const std::string& fileDir,
                                       const std::string& fileName,
                                       const fsutil::PathStat& statBuf,
-                                      bool needFindBestMatch);
+                                      bool needFindBestMatch,
+                                      bool exceedPreservedDirDepth);
 
     // ClearUnavailableFileAndDir checks cache, remove unavailable items.
     // By default, it will be called every 20 rounds (flag check_not_exist_file_dir_round).
diff --git a/core/file_server/polling/PollingEventQueue.h b/core/file_server/polling/PollingEventQueue.h
index dbad5c611e..513c13cad9 100644
--- a/core/file_server/polling/PollingEventQueue.h
+++ b/core/file_server/polling/PollingEventQueue.h
@@ -46,6 +46,7 @@ class PollingEventQueue {
     friend class EventDispatcher;
     friend class EventDispatcherBase;
     friend class PollingUnittest;
+    friend class PollingPreservedDirDepthUnittest;
 
     void Clear();
     Event* FindEvent(const std::string& src, const std::string& obj, int32_t eventType = -1);
diff --git a/core/file_server/polling/PollingModify.cpp b/core/file_server/polling/PollingModify.cpp
index 839ce4c1d6..33b7f3fad8 100644
--- a/core/file_server/polling/PollingModify.cpp
+++ b/core/file_server/polling/PollingModify.cpp
@@ -245,59 +245,7 @@ void PollingModify::Polling() {
     LOG_INFO(sLogger, ("polling modify", "started"));
     mHoldOnFlag = false;
     while (mRuningFlag) {
-        {
-            PTScopedLock threadLock(mPollingThreadLock);
-            LoadFileNameInQueues();
-
-            vector<SplitedFilePath> deletedFileVec;
-            vector<Event*> pollingEventVec;
-            int32_t statCount = 0;
-            size_t pollingModifySizeTotal = mModifyCacheMap.size();
-            LogtailMonitor::GetInstance()->UpdateMetric("polling_modify_size", pollingModifySizeTotal);
-            mPollingModifySize->Set(pollingModifySizeTotal);
-            for (auto iter = mModifyCacheMap.begin(); iter != mModifyCacheMap.end(); ++iter) {
-                if (!mRuningFlag || mHoldOnFlag)
-                    break;
-
-                const SplitedFilePath& filePath = iter->first;
-                ModifyCheckCache& modifyCache = iter->second;
-                fsutil::PathStat logFileStat;
-                if (!fsutil::PathStat::stat(PathJoin(filePath.mFileDir, filePath.mFileName), logFileStat)) {
-                    if (errno == ENOENT) {
-                        LOG_DEBUG(sLogger, ("file deleted", PathJoin(filePath.mFileDir, filePath.mFileName)));
-                        if (UpdateDeletedFile(filePath, modifyCache, pollingEventVec)) {
-                            deletedFileVec.push_back(filePath);
-                        }
-                    } else {
-                        LOG_DEBUG(sLogger, ("get file info error", PathJoin(filePath.mFileDir, filePath.mFileName)));
-                    }
-                } else {
-                    int64_t sec, nsec;
-                    logFileStat.GetLastWriteTime(sec, nsec);
-                    timespec mtim{sec, nsec};
-                    auto devInode = logFileStat.GetDevInode();
-                    UpdateFile(filePath,
-                               modifyCache,
-                               devInode.dev,
-                               devInode.inode,
-                               logFileStat.GetFileSize(),
-                               mtim,
-                               pollingEventVec);
-                }
-
-                ++statCount;
-                if (statCount % INT32_FLAG(modify_stat_count) == 0) {
-                    usleep(1000 * INT32_FLAG(modify_stat_sleepMs));
-                }
-            }
-
-            if (pollingEventVec.size() > 0) {
-                PollingEventQueue::GetInstance()->PushEvent(pollingEventVec);
-            }
-            for (size_t i = 0; i < deletedFileVec.size(); ++i) {
-                mModifyCacheMap.erase(deletedFileVec[i]);
-            }
-        }
+        PollingIteration();
 
         // Sleep for a while, by default, 1s.
         for (int i = 0; i < 10 && mRuningFlag; ++i) {
@@ -307,6 +255,55 @@ void PollingModify::Polling() {
     LOG_INFO(sLogger, ("PollingModify::Polling", "stop"));
 }
 
+void PollingModify::PollingIteration() {
+    PTScopedLock threadLock(mPollingThreadLock);
+    LoadFileNameInQueues();
+
+    vector<SplitedFilePath> deletedFileVec;
+    vector<Event*> pollingEventVec;
+    int32_t statCount = 0;
+    size_t pollingModifySizeTotal = mModifyCacheMap.size();
+    LogtailMonitor::GetInstance()->UpdateMetric("polling_modify_size", pollingModifySizeTotal);
+    mPollingModifySize->Set(pollingModifySizeTotal);
+    for (auto iter = mModifyCacheMap.begin(); iter != mModifyCacheMap.end(); ++iter) {
+        if (!mRuningFlag || mHoldOnFlag)
+            break;
+
+        const SplitedFilePath& filePath = iter->first;
+        ModifyCheckCache& modifyCache = iter->second;
+        fsutil::PathStat logFileStat;
+        if (!fsutil::PathStat::stat(PathJoin(filePath.mFileDir, filePath.mFileName), logFileStat)) {
+            if (errno == ENOENT) {
+                LOG_DEBUG(sLogger, ("file deleted", PathJoin(filePath.mFileDir, filePath.mFileName)));
+                if (UpdateDeletedFile(filePath, modifyCache, pollingEventVec)) {
+                    deletedFileVec.push_back(filePath);
+                }
+            } else {
+                LOG_DEBUG(sLogger, ("get file info error", PathJoin(filePath.mFileDir, filePath.mFileName)));
+            }
+        } else {
+            int64_t sec, nsec;
+            logFileStat.GetLastWriteTime(sec, nsec);
+            timespec mtim{sec, nsec};
+            auto devInode = logFileStat.GetDevInode();
+            UpdateFile(
+                filePath, modifyCache, devInode.dev, devInode.inode, logFileStat.GetFileSize(), mtim, pollingEventVec);
+        }
+
+        ++statCount;
+        if (statCount % INT32_FLAG(modify_stat_count) == 0) {
+            usleep(1000 * INT32_FLAG(modify_stat_sleepMs));
+        }
+    }
+
+    if (pollingEventVec.size() > 0) {
+        PollingEventQueue::GetInstance()->PushEvent(pollingEventVec);
+    }
+    for (size_t i = 0; i < deletedFileVec.size(); ++i) {
+        mModifyCacheMap.erase(deletedFileVec[i]);
+    }
+}
+
 #ifdef APSARA_UNIT_TEST_MAIN
 bool PollingModify::FindNewFile(const std::string& dir, const std::string& fileName) {
     PTScopedLock lock(mFileLock);
diff --git a/core/file_server/polling/PollingModify.h b/core/file_server/polling/PollingModify.h
index 5e3738a92a..938a0596de 100644
--- a/core/file_server/polling/PollingModify.h
+++ b/core/file_server/polling/PollingModify.h
@@ -62,6 +62,7 @@ class PollingModify : public LogRunnable {
     ~PollingModify();
 
     void Polling();
+    void PollingIteration();
 
     // MakeSpaceForNewFile tries to release some space from modify cache
     // for LoadFileNameInQueues to add new files.
diff --git a/core/file_server/reader/LogFileReader.cpp b/core/file_server/reader/LogFileReader.cpp
index 62c9a49c6f..08b7693354 100644
--- a/core/file_server/reader/LogFileReader.cpp
+++ b/core/file_server/reader/LogFileReader.cpp
@@ -46,7 +46,6 @@
 #include "file_server/reader/GloablFileDescriptorManager.h"
 #include "file_server/reader/JsonLogFileReader.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/AlarmManager.h"
 #include "monitor/metric_constants/MetricConstants.h"
 #include "pipeline/queue/ExactlyOnceQueueManager.h"
@@ -296,7 +295,7 @@ bool LogFileReader::ShouldForceReleaseDeletedFileFd() {
 }
 
 void LogFileReader::InitReader(bool tailExisted, FileReadPolicy policy, uint32_t eoConcurrency) {
-    mSourceId = LogFileProfiler::mIpAddr + "_" + mReaderConfig.second->GetConfigName() + "_" + mHostLogPath + "_"
+    mSourceId = LoongCollectorMonitor::mIpAddr + "_" + mReaderConfig.second->GetConfigName() + "_" + mHostLogPath + "_"
         + CalculateRandomUUID();
 
     if (!tailExisted) {
diff --git a/core/go_pipeline/LogtailPlugin.cpp b/core/go_pipeline/LogtailPlugin.cpp
index 993c050798..6e41963c82 100644
--- a/core/go_pipeline/LogtailPlugin.cpp
+++ b/core/go_pipeline/LogtailPlugin.cpp
@@ -26,8 +26,8 @@
 #include "container_manager/ConfigContainerInfoUpdateCmd.h"
 #include "file_server/ConfigManager.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/AlarmManager.h"
+#include "monitor/Monitor.h"
 #include "pipeline/PipelineManager.h"
 #include "pipeline/queue/SenderQueueManager.h"
 #include "provider/Provider.h"
@@ -65,9 +65,12 @@ LogtailPlugin::LogtailPlugin() {
     mPluginCfg["LoongcollectorConfDir"] = AppConfig::GetInstance()->GetLoongcollectorConfDir();
     mPluginCfg["LoongcollectorLogDir"] = GetAgentLogDir();
     mPluginCfg["LoongcollectorDataDir"] = GetAgentDataDir();
+    mPluginCfg["LoongcollectorPluginLogName"] = GetPluginLogName();
+    mPluginCfg["LoongcollectorVersionTag"] = GetVersionTag();
+    mPluginCfg["LoongcollectorCheckPointFile"] = GetGoPluginCheckpoint();
     mPluginCfg["LoongcollectorThirdPartyDir"] = GetAgentThirdPartyDir();
-    mPluginCfg["HostIP"] = LogFileProfiler::mIpAddr;
-    mPluginCfg["Hostname"] = LogFileProfiler::mHostname;
+    mPluginCfg["HostIP"] = LoongCollectorMonitor::mIpAddr;
+    mPluginCfg["Hostname"] = LoongCollectorMonitor::mHostname;
     mPluginCfg["EnableContainerdUpperDirDetect"] = BOOL_FLAG(enable_containerd_upper_dir_detect);
     mPluginCfg["EnableSlsMetricsFormat"] = BOOL_FLAG(enable_sls_metrics_format);
 }
@@ -616,4 +619,3 @@ K8sContainerMeta LogtailPlugin::GetContainerMeta(const string& containerID) {
     }
     return K8sContainerMeta();
 }
-
diff --git a/core/logger/Logger.cpp b/core/logger/Logger.cpp
index c754d5b37b..701fa2738d 100644
--- a/core/logger/Logger.cpp
+++ b/core/logger/Logger.cpp
@@ -195,7 +195,7 @@ void Logger::LoadConfig(const std::string& filePath) {
     // Load config file, check if it is valid or not.
     do {
         std::ifstream in(filePath);
-        if (!in.good())
+        if (!in)
             break;
 
         in.seekg(0, std::ios::end);
@@ -405,8 +405,8 @@ void Logger::LoadDefaultConfig(std::map<std::string, LoggerConfig>& loggerCfgs,
     loggerCfgs.insert({DEFAULT_LOGGER_NAME, LoggerConfig{"AsyncFileSink", level::warn}});
     if (sinkCfgs.find("AsyncFileSink") != sinkCfgs.end())
         return;
-    sinkCfgs.insert({"AsyncFileSink",
-                     SinkConfig{"AsyncFile", 10, 20000000, 300, GetAgentLogDir() + GetAgentLogName(), "Gzip"}});
+    sinkCfgs.insert(
+        {"AsyncFileSink", SinkConfig{"AsyncFile", 10, 20000000, 300, GetAgentLogDir() + GetAgentLogName(), "Gzip"}});
 }
 
 void Logger::LoadAllDefaultConfigs(std::map<std::string, LoggerConfig>& loggerCfgs,
@@ -414,15 +414,12 @@ void Logger::LoadAllDefaultConfigs(std::map<std::string, LoggerConfig>& loggerCf
     LoadDefaultConfig(loggerCfgs, sinkCfgs);
 
     loggerCfgs.insert({GetAgentLoggersPrefix(), LoggerConfig{"AsyncFileSink", level::info}});
-    loggerCfgs.insert({GetAgentLoggersPrefix() + "/profile", LoggerConfig{"AsyncFileSinkProfile", level::info}});
     loggerCfgs.insert({GetAgentLoggersPrefix() + "/status", LoggerConfig{"AsyncFileSinkStatus", level::info}});
 
     std::string dirPath = GetAgentSnapshotDir();
     if (!Mkdir(dirPath)) {
         LogMsg(std::string("Create snapshot dir error ") + dirPath + ", error" + ErrnoToString(GetErrno()));
     }
-    sinkCfgs.insert(
-        {"AsyncFileSinkProfile", SinkConfig{"AsyncFile", 61, 1, 1, dirPath + PATH_SEPARATOR + GetAgentProfileLogName()}});
     sinkCfgs.insert(
         {"AsyncFileSinkStatus", SinkConfig{"AsyncFile", 61, 1, 1, dirPath + PATH_SEPARATOR + GetAgentStatusLogName()}});
 }
diff --git a/core/logtail.cpp b/core/logtail.cpp
index 52e7e7db9c..90a2d187e9 100644
--- a/core/logtail.cpp
+++ b/core/logtail.cpp
@@ -47,6 +47,7 @@ DECLARE_FLAG_INT32(data_server_port);
 DECLARE_FLAG_BOOL(enable_env_ref_in_config);
 DECLARE_FLAG_BOOL(enable_sls_metrics_format);
 DECLARE_FLAG_BOOL(enable_containerd_upper_dir_detect);
+DECLARE_FLAG_BOOL(logtail_mode);
 
 void HandleSigtermSignal(int signum, siginfo_t* info, void* context) {
     LOG_INFO(sLogger, ("received signal", "SIGTERM"));
@@ -74,6 +75,12 @@ void enable_core(void) {
 
 static void overwrite_community_edition_flags() {
     // support run in installation dir on default
+    if(BOOL_FLAG(logtail_mode)) {
+        STRING_FLAG(logtail_sys_conf_dir) = ".";
+        STRING_FLAG(check_point_filename) = "checkpoint/logtail_check_point";
+        STRING_FLAG(default_buffer_file_path) = "checkpoint";
+        STRING_FLAG(ilogtail_docker_file_path_config) = "checkpoint/docker_path_config.json";
+    }
     STRING_FLAG(metrics_report_method) = "";
     INT32_FLAG(data_server_port) = 443;
     BOOL_FLAG(enable_env_ref_in_config) = true;
diff --git a/core/models/SpanEvent.cpp b/core/models/SpanEvent.cpp
index 812ccbe54b..4396a92860 100644
--- a/core/models/SpanEvent.cpp
+++ b/core/models/SpanEvent.cpp
@@ -15,6 +15,7 @@
  */
 
 #include "models/SpanEvent.h"
+#include "constants/SpanConstants.h"
 
 using namespace std;
 
@@ -75,16 +76,15 @@ size_t SpanEvent::SpanLink::DataSize() const {
     return mTraceId.size() + mSpanId.size() + mTraceState.size() + mTags.DataSize();
 }
 
-#ifdef APSARA_UNIT_TEST_MAIN
 Json::Value SpanEvent::SpanLink::ToJson() const {
     Json::Value root;
-    root["traceId"] = mTraceId.to_string();
-    root["spanId"] = mSpanId.to_string();
+    root[DEFAULT_TRACE_TAG_TRACE_ID] = mTraceId.to_string();
+    root[DEFAULT_TRACE_TAG_SPAN_ID] = mSpanId.to_string();
     if (!mTraceState.empty()) {
-        root["traceState"] = mTraceState.to_string();
+        root[DEFAULT_TRACE_TAG_TRACE_STATE] = mTraceState.to_string();
     }
     if (!mTags.mInner.empty()) {
-        Json::Value& tags = root["tags"];
+        Json::Value& tags = root[DEFAULT_TRACE_TAG_ATTRIBUTES];
         for (const auto& tag : mTags.mInner) {
             tags[tag.first.to_string()] = tag.second.to_string();
         }
@@ -92,17 +92,18 @@ Json::Value SpanEvent::SpanLink::ToJson() const {
     return root;
 }
 
+#ifdef APSARA_UNIT_TEST_MAIN
 void SpanEvent::SpanLink::FromJson(const Json::Value& value) {
-    SetTraceId(value["traceId"].asString());
-    SetSpanId(value["spanId"].asString());
-    if (value.isMember("traceState")) {
-        string s = value["traceState"].asString();
+    SetTraceId(value[DEFAULT_TRACE_TAG_TRACE_ID].asString());
+    SetSpanId(value[DEFAULT_TRACE_TAG_SPAN_ID].asString());
+    if (value.isMember(DEFAULT_TRACE_TAG_TRACE_STATE)) {
+        string s = value[DEFAULT_TRACE_TAG_TRACE_STATE].asString();
         if (!s.empty()) {
             SetTraceState(s);
         }
     }
-    if (value.isMember("tags")) {
-        Json::Value tags = value["tags"];
+    if (value.isMember(DEFAULT_TRACE_TAG_ATTRIBUTES)) {
+        Json::Value tags = value[DEFAULT_TRACE_TAG_ATTRIBUTES];
         for (const auto& key : tags.getMemberNames()) {
             SetTag(key, tags[key].asString());
         }
@@ -155,13 +156,12 @@ size_t SpanEvent::InnerEvent::DataSize() const {
     return sizeof(decltype(mTimestampNs)) + mName.size() + mTags.DataSize();
 }
 
-#ifdef APSARA_UNIT_TEST_MAIN
 Json::Value SpanEvent::InnerEvent::ToJson() const {
     Json::Value root;
-    root["name"] = mName.to_string();
-    root["timestampNs"] = static_cast<int64_t>(mTimestampNs);
+    root[DEFAULT_TRACE_TAG_SPAN_EVENT_NAME] = mName.to_string();
+    root[DEFAULT_TRACE_TAG_TIMESTAMP] = static_cast<int64_t>(mTimestampNs);
     if (!mTags.mInner.empty()) {
-        Json::Value& tags = root["tags"];
+        Json::Value& tags = root[DEFAULT_TRACE_TAG_ATTRIBUTES];
         for (const auto& tag : mTags.mInner) {
             tags[tag.first.to_string()] = tag.second.to_string();
         }
@@ -169,11 +169,12 @@ Json::Value SpanEvent::InnerEvent::ToJson() const {
     return root;
 }
 
+#ifdef APSARA_UNIT_TEST_MAIN
 void SpanEvent::InnerEvent::FromJson(const Json::Value& value) {
-    SetName(value["name"].asString());
-    SetTimestampNs(value["timestampNs"].asUInt64());
-    if (value.isMember("tags")) {
-        Json::Value tags = value["tags"];
+    SetName(value[DEFAULT_TRACE_TAG_SPAN_EVENT_NAME].asString());
+    SetTimestampNs(value[DEFAULT_TRACE_TAG_TIMESTAMP].asUInt64());
+    if (value.isMember(DEFAULT_TRACE_TAG_ATTRIBUTES)) {
+        Json::Value tags = value[DEFAULT_TRACE_TAG_ATTRIBUTES];
         for (const auto& key : tags.getMemberNames()) {
             SetTag(key, tags[key].asString());
         }
@@ -350,19 +351,19 @@ Json::Value SpanEvent::ToJson(bool enableEventMeta) const {
     root["startTimeNs"] = static_cast<int64_t>(mStartTimeNs);
     root["endTimeNs"] = static_cast<int64_t>(mEndTimeNs);
     if (!mTags.mInner.empty()) {
-        Json::Value& tags = root["tags"];
+        Json::Value& tags = root[DEFAULT_TRACE_TAG_ATTRIBUTES];
         for (const auto& tag : mTags.mInner) {
             tags[tag.first.to_string()] = tag.second.to_string();
         }
     }
     if (!mEvents.empty()) {
-        Json::Value& events = root["events"];
+        Json::Value& events = root[DEFAULT_TRACE_TAG_EVENTS];
         for (const auto& event : mEvents) {
             events.append(event.ToJson());
         }
     }
     if (!mLinks.empty()) {
-        Json::Value& links = root["links"];
+        Json::Value& links = root[DEFAULT_TRACE_TAG_LINKS];
         for (const auto& link : mLinks) {
             links.append(link.ToJson());
         }
@@ -405,21 +406,21 @@ bool SpanEvent::FromJson(const Json::Value& root) {
     }
     SetStartTimeNs(root["startTimeNs"].asUInt64());
     SetEndTimeNs(root["endTimeNs"].asUInt64());
-    if (root.isMember("tags")) {
-        Json::Value tags = root["tags"];
+    if (root.isMember(DEFAULT_TRACE_TAG_ATTRIBUTES)) {
+        Json::Value tags = root[DEFAULT_TRACE_TAG_ATTRIBUTES];
         for (const auto& key : tags.getMemberNames()) {
             SetTag(key, tags[key].asString());
         }
     }
-    if (root.isMember("events")) {
-        Json::Value events = root["events"];
+    if (root.isMember(DEFAULT_TRACE_TAG_EVENTS)) {
+        Json::Value events = root[DEFAULT_TRACE_TAG_EVENTS];
         for (const auto& event : events) {
             InnerEvent* e = AddEvent();
             e->FromJson(event);
         }
     }
-    if (root.isMember("links")) {
-        Json::Value links = root["links"];
+    if (root.isMember(DEFAULT_TRACE_TAG_LINKS)) {
+        Json::Value links = root[DEFAULT_TRACE_TAG_LINKS];
         for (const auto& link : links) {
             SpanLink* l = AddLink();
             l->FromJson(link);
@@ -438,4 +439,37 @@ bool SpanEvent::FromJson(const Json::Value& root) {
 }
 #endif
 
+const static std::string sSpanStatusCodeUnSet = "UNSET";
+const static std::string sSpanStatusCodeOk = "OK";
+const static std::string sSpanStatusCodeError = "ERROR";
+
+const std::string& GetStatusString(SpanEvent::StatusCode status) {
+    switch (status) {
+        case SpanEvent::StatusCode::Unset: return sSpanStatusCodeUnSet;
+        case SpanEvent::StatusCode::Ok:   return sSpanStatusCodeOk;
+        case SpanEvent::StatusCode::Error:     return sSpanStatusCodeError;
+        default:               return sSpanStatusCodeUnSet;
+    }
+}
+
+const static std::string sSpanKindUnspecified = "unspecified";
+const static std::string sSpanKindInternal = "internal";
+const static std::string sSpanKindServer = "server";
+const static std::string sSpanKindClient = "client";
+const static std::string sSpanKindProducer = "producer";
+const static std::string sSpanKindConsumer = "consumer";
+const static std::string sSpanKindUnknown = "unknown";
+
+const std::string& GetKindString(SpanEvent::Kind kind) {
+    switch (kind) {
+        case SpanEvent::Kind::Unspecified: return sSpanKindUnspecified;
+        case SpanEvent::Kind::Internal:   return sSpanKindInternal;
+        case SpanEvent::Kind::Server:     return sSpanKindServer;
+        case SpanEvent::Kind::Client:     return sSpanKindClient;
+        case SpanEvent::Kind::Producer:   return sSpanKindProducer;
+        case SpanEvent::Kind::Consumer:   return sSpanKindConsumer;
+        default:               return sSpanKindUnknown;
+    }
+}
+
 } // namespace logtail
diff --git a/core/models/SpanEvent.h b/core/models/SpanEvent.h
index 584d64a5d6..780664eb1a 100644
--- a/core/models/SpanEvent.h
+++ b/core/models/SpanEvent.h
@@ -19,6 +19,7 @@
 #include <map>
 #include <string>
 #include <vector>
+#include <json/json.h>
 
 #include "common/memory/SourceBuffer.h"
 #include "models/PipelineEvent.h"
@@ -67,8 +68,8 @@ class SpanEvent : public PipelineEvent {
 
         size_t DataSize() const;
 
-#ifdef APSARA_UNIT_TEST_MAIN
         Json::Value ToJson() const;
+#ifdef APSARA_UNIT_TEST_MAIN
         void FromJson(const Json::Value& value);
 #endif
 
@@ -107,8 +108,8 @@ class SpanEvent : public PipelineEvent {
 
         size_t DataSize() const;
 
-#ifdef APSARA_UNIT_TEST_MAIN
         Json::Value ToJson() const;
+#ifdef APSARA_UNIT_TEST_MAIN
         void FromJson(const Json::Value& value);
 #endif
 
@@ -214,4 +215,7 @@ class SpanEvent : public PipelineEvent {
 #endif
 };
 
+const std::string& GetStatusString(SpanEvent::StatusCode status);
+const std::string& GetKindString(SpanEvent::Kind kind);
+
 } // namespace logtail
diff --git a/core/monitor/AlarmManager.cpp b/core/monitor/AlarmManager.cpp
index d4f0edfbf6..e5911d5c87 100644
--- a/core/monitor/AlarmManager.cpp
+++ b/core/monitor/AlarmManager.cpp
@@ -12,9 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "monitor/AlarmManager.h"
+#include "AlarmManager.h"
 
-#include "LogFileProfiler.h"
+#include "Monitor.h"
 #include "app_config/AppConfig.h"
 #include "common/LogtailCommonFlags.h"
 #include "common/StringTools.h"
@@ -104,6 +104,7 @@ AlarmManager::AlarmManager() {
     mMessageType[COMPRESS_FAIL_ALARM] = "COMPRESS_FAIL_ALARM";
     mMessageType[SERIALIZE_FAIL_ALARM] = "SERIALIZE_FAIL_ALARM";
     mMessageType[RELABEL_METRIC_FAIL_ALARM] = "RELABEL_METRIC_FAIL_ALARM";
+    mMessageType[REGISTER_HANDLERS_TOO_SLOW_ALARM] = "REGISTER_HANDLERS_TOO_SLOW_ALARM";
 }
 
 void AlarmManager::Init() {
@@ -117,6 +118,9 @@ void AlarmManager::Stop() {
         mIsThreadRunning = false;
     }
     mStopCV.notify_one();
+    if (!mThreadRes.valid()) {
+        return;
+    }
     future_status s = mThreadRes.wait_for(chrono::seconds(1));
     if (s == future_status::ready) {
         LOG_INFO(sLogger, ("alarm gathering", "stopped successfully"));
@@ -141,7 +145,6 @@ bool AlarmManager::SendAlarmLoop() {
 }
 
 void AlarmManager::SendAllRegionAlarm() {
-    AlarmMessage* messagePtr = nullptr;
     int32_t currentTime = time(nullptr);
     size_t sendRegionIndex = 0;
     size_t sendAlarmTypeIndex = 0;
@@ -186,7 +189,7 @@ void AlarmManager::SendAllRegionAlarm() {
 
             // LOG_DEBUG(sLogger, ("3Send Alarm", region)("region", sendRegionIndex)("alarm index",
             // mMessageType[sendAlarmTypeIndex]));
-            map<string, AlarmMessage*>& alarmMap = alarmBufferVec[sendAlarmTypeIndex];
+            map<string, unique_ptr<AlarmMessage>>& alarmMap = alarmBufferVec[sendAlarmTypeIndex];
             if (alarmMap.size() == 0
                 || currentTime - lastUpdateTimeVec[sendAlarmTypeIndex] < INT32_FLAG(logtail_alarm_interval)) {
                 // go next alarm type
@@ -217,12 +220,13 @@ void AlarmManager::SendAllRegionAlarm() {
 
             // LOG_DEBUG(sLogger, ("4Send Alarm", region)("region", sendRegionIndex)("alarm index",
             // mMessageType[sendAlarmTypeIndex]));
-            logGroup.set_source(LogFileProfiler::mIpAddr);
+            logGroup.set_source(LoongCollectorMonitor::mIpAddr);
             logGroup.set_category(ALARM_SLS_LOGSTORE_NAME);
             auto now = GetCurrentLogtailTime();
-            for (map<string, AlarmMessage*>::iterator mapIter = alarmMap.begin(); mapIter != alarmMap.end();
+            for (map<string, unique_ptr<AlarmMessage>>::iterator mapIter = alarmMap.begin();
+                 mapIter != alarmMap.end();
                  ++mapIter) {
-                messagePtr = mapIter->second;
+                auto& messagePtr = mapIter->second;
 
                 // LOG_DEBUG(sLogger, ("5Send Alarm", region)("region", sendRegionIndex)("alarm index",
                 // sendAlarmTypeIndex)("msg", messagePtr->mMessage));
@@ -245,7 +249,7 @@ void AlarmManager::SendAllRegionAlarm() {
 
                 contentPtr = logPtr->add_contents();
                 contentPtr->set_key("ip");
-                contentPtr->set_value(LogFileProfiler::mIpAddr);
+                contentPtr->set_value(LoongCollectorMonitor::mIpAddr);
 
                 contentPtr = logPtr->add_contents();
                 contentPtr->set_key("os");
@@ -266,7 +270,6 @@ void AlarmManager::SendAllRegionAlarm() {
                     contentPtr->set_key("category");
                     contentPtr->set_value(messagePtr->mCategory);
                 }
-                delete messagePtr;
             }
             lastUpdateTimeVec[sendAlarmTypeIndex] = currentTime;
             alarmMap.clear();
@@ -319,9 +322,8 @@ void AlarmManager::SendAlarm(const AlarmType alarmType,
     string key = projectName + "_" + category;
     AlarmVector& alarmBufferVec = *MakesureLogtailAlarmMapVecUnlocked(region);
     if (alarmBufferVec[alarmType].find(key) == alarmBufferVec[alarmType].end()) {
-        AlarmMessage* messagePtr
-            = new AlarmMessage(mMessageType[alarmType], projectName, category, message, 1);
-        alarmBufferVec[alarmType].insert(pair<string, AlarmMessage*>(key, messagePtr));
+        auto* messagePtr = new AlarmMessage(mMessageType[alarmType], projectName, category, message, 1);
+        alarmBufferVec[alarmType].emplace(key, messagePtr);
     } else
         alarmBufferVec[alarmType][key]->IncCount();
 }
diff --git a/core/monitor/AlarmManager.h b/core/monitor/AlarmManager.h
index b8c248c2e5..6357097262 100644
--- a/core/monitor/AlarmManager.h
+++ b/core/monitor/AlarmManager.h
@@ -95,7 +95,8 @@ enum AlarmType {
     COMPRESS_FAIL_ALARM = 65,
     SERIALIZE_FAIL_ALARM = 66,
     RELABEL_METRIC_FAIL_ALARM = 67,
-    ALL_LOGTAIL_ALARM_NUM = 68
+    REGISTER_HANDLERS_TOO_SLOW_ALARM = 68,
+    ALL_LOGTAIL_ALARM_NUM = 69
 };
 
 struct AlarmMessage {
@@ -106,10 +107,10 @@ struct AlarmMessage {
     int32_t mCount;
 
     AlarmMessage(const std::string& type,
-                        const std::string& projectName,
-                        const std::string& category,
-                        const std::string& message,
-                        const int32_t count)
+                 const std::string& projectName,
+                 const std::string& category,
+                 const std::string& message,
+                 const int32_t count)
         : mMessageType(type), mProjectName(projectName), mCategory(category), mMessage(message), mCount(count) {}
     void IncCount(int32_t inc = 1) { mCount += inc; }
 };
@@ -134,7 +135,7 @@ class AlarmManager {
     bool IsLowLevelAlarmValid();
 
 private:
-    typedef std::vector<std::map<std::string, AlarmMessage*> > AlarmVector;
+    using AlarmVector = std::vector<std::map<std::string, std::unique_ptr<AlarmMessage>>>;
 
     AlarmManager();
     ~AlarmManager() = default;
@@ -151,7 +152,7 @@ class AlarmManager {
 
 
     std::vector<std::string> mMessageType;
-    std::map<std::string, std::pair<std::shared_ptr<AlarmVector>, std::vector<int32_t> > > mAllAlarmMap;
+    std::map<std::string, std::pair<std::shared_ptr<AlarmVector>, std::vector<int32_t>>> mAllAlarmMap;
     PTMutex mAlarmBufferMutex;
 
     std::atomic_int mLastLowLevelTime{0};
diff --git a/core/monitor/LogFileProfiler.cpp b/core/monitor/LogFileProfiler.cpp
deleted file mode 100644
index 95b9456fb7..0000000000
--- a/core/monitor/LogFileProfiler.cpp
+++ /dev/null
@@ -1,562 +0,0 @@
-// Copyright 2022 iLogtail Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "LogFileProfiler.h"
-
-#include <string>
-
-#include "app_config/AppConfig.h"
-#include "constants/Constants.h"
-#include "common/ErrorUtil.h"
-#include "common/LogtailCommonFlags.h"
-#include "common/MachineInfoUtil.h"
-#include "common/RuntimeUtil.h"
-#include "common/StringTools.h"
-#include "common/TimeUtil.h"
-#include "common/version.h"
-#include "file_server/ConfigManager.h"
-#include "logger/Logger.h"
-#include "provider/Provider.h"
-#include "pipeline/queue/QueueKeyManager.h"
-
-DEFINE_FLAG_INT32(profile_data_send_interval, "interval of send LogFile/DomainSocket profile data, seconds", 600);
-
-using namespace std;
-using namespace sls_logs;
-
-namespace logtail {
-
-string LogFileProfiler::mHostname;
-string LogFileProfiler::mIpAddr;
-string LogFileProfiler::mOsDetail;
-string LogFileProfiler::mUsername;
-int32_t LogFileProfiler::mSystemBootTime = -1;
-string LogFileProfiler::mECSInstanceID;
-string LogFileProfiler::mECSUserID;
-string LogFileProfiler::mECSRegionID;
-string LogFileProfiler::mStartTime;
-
-LogFileProfiler::LogFileProfiler() {
-    srand(time(NULL));
-    mSendInterval = INT32_FLAG(profile_data_send_interval);
-    mLastSendTime = time(NULL) - (rand() % (mSendInterval / 10)) * 10;
-    mDumpFileName = GetProfileSnapshotDumpFileName();
-    mBakDumpFileName = GetProfileSnapshotDumpFileName() + "_bak";
-
-    mHostname = GetHostName();
-#if defined(_MSC_VER)
-    mHostname = EncodingConverter::GetInstance()->FromACPToUTF8(mHostname);
-#endif
-    mIpAddr = GetHostIp();
-    mOsDetail = GetOsDetail();
-    mUsername = GetUsername();
-    // TODO: This may take up to 3s to construct the object. This is bad.
-    ECSMeta ecsMeta = FetchECSMeta();
-    mECSInstanceID = ecsMeta.instanceID;
-    mECSUserID = ecsMeta.userID;
-    mECSRegionID = ecsMeta.regionID;
-}
-
-bool LogFileProfiler::GetProfileData(LogGroup& logGroup, LogStoreStatistic* statistic) {
-    if (statistic->mReadBytes + statistic->mSkipBytes == 0)
-        return false;
-
-    Log* logPtr = logGroup.add_logs();
-    auto now = GetCurrentLogtailTime();
-    SetLogTime(logPtr, AppConfig::GetInstance()->EnableLogTimeAutoAdjust() ? now.tv_sec + GetTimeDelta() : now.tv_sec);
-
-    Log_Content* contentPtr = logPtr->add_contents();
-    contentPtr->set_key("logreader_project_name");
-    contentPtr->set_value(statistic->mProjectName);
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("category");
-    contentPtr->set_value(statistic->mCategory);
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("config_name");
-    contentPtr->set_value(statistic->mConfigName);
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("file_name");
-    contentPtr->set_value(statistic->mConvertedPath.empty() ? "logstore_statistics" : statistic->mConvertedPath);
-    if (!statistic->mHostLogPath.empty()) {
-        contentPtr = logPtr->add_contents();
-        contentPtr->set_key("host_log_path");
-        contentPtr->set_value(statistic->mHostLogPath);
-    }
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("loongcollector_version");
-    contentPtr->set_value(ILOGTAIL_VERSION);
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("source_ip");
-    contentPtr->set_value(mIpAddr);
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("os");
-    contentPtr->set_value(OS_NAME);
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("total_bytes");
-    contentPtr->set_value(ToString(statistic->mReadBytes));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("skip_bytes");
-    contentPtr->set_value(ToString(statistic->mSkipBytes));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("succeed_lines");
-    contentPtr->set_value(ToString(statistic->mSplitLines - statistic->mParseFailures - statistic->mSendFailures));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("parse_failures");
-    contentPtr->set_value(ToString(statistic->mParseFailures));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("send_failures");
-    contentPtr->set_value(ToString(statistic->mSendFailures));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("regex_match_failures");
-    contentPtr->set_value(ToString(statistic->mRegexMatchFailures));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("history_data_failures");
-    contentPtr->set_value(ToString(statistic->mHistoryFailures));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("time_format_failures");
-    contentPtr->set_value(ToString(statistic->mParseTimeFailures));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("file_dev");
-    contentPtr->set_value(ToString(statistic->mFileDev));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("file_inode");
-    contentPtr->set_value(ToString(statistic->mFileInode));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("last_read_time");
-    contentPtr->set_value(ToString(statistic->mLastReadTime));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("read_count");
-    contentPtr->set_value(ToString(statistic->mReadCount));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("file_size");
-    contentPtr->set_value(ToString(statistic->mFileSize));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("read_offset");
-    contentPtr->set_value(ToString(statistic->mReadOffset));
-    contentPtr = logPtr->add_contents();
-    contentPtr->set_key("read_avg_delay");
-    contentPtr->set_value(ToString(statistic->mReadCount == 0 ? 0 : statistic->mReadDelaySum / statistic->mReadCount));
-
-    if (!statistic->mTags.empty()) {
-        const std::vector<sls_logs::LogTag>& extraTags = statistic->mTags;
-        for (size_t i = 0; i < extraTags.size(); ++i) {
-            contentPtr = logPtr->add_contents();
-            contentPtr->set_key(extraTags[i].key());
-            contentPtr->set_value(extraTags[i].value());
-        }
-    }
-
-    if (!statistic->mErrorLine.empty()) {
-        contentPtr = logPtr->add_contents();
-        contentPtr->set_key("error_line");
-        contentPtr->set_value(statistic->mErrorLine);
-    }
-
-    // get logstore send info
-    // if (statistic->mHostLogPath.empty()) {
-    //     QueueKey fbKey = QueueKeyManager::GetInstance()->GetKey("-flusher_sls-" + statistic->mProjectName + "#"
-    //                                                             + statistic->mCategory);
-    //     LogstoreSenderStatistics senderStatistics = Sender::GetInstance()->GetSenderStatistics(fbKey);
-
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("max_unsend_time");
-    //     contentPtr->set_value(ToString(senderStatistics.mMaxUnsendTime));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("min_unsend_time");
-    //     contentPtr->set_value(ToString(senderStatistics.mMinUnsendTime));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("max_send_success_time");
-    //     contentPtr->set_value(ToString(senderStatistics.mMaxSendSuccessTime));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("send_queue_size");
-    //     contentPtr->set_value(ToString(senderStatistics.mSendQueueSize));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("send_network_error");
-    //     contentPtr->set_value(ToString(senderStatistics.mSendNetWorkErrorCount));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("send_quota_error");
-    //     contentPtr->set_value(ToString(senderStatistics.mSendQuotaErrorCount));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("send_discard_error");
-    //     contentPtr->set_value(ToString(senderStatistics.mSendDiscardErrorCount));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("send_success_count");
-    //     contentPtr->set_value(ToString(senderStatistics.mSendSuccessCount));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("send_block_flag");
-    //     contentPtr->set_value(ToString(senderStatistics.mSendBlockFlag));
-    //     contentPtr = logPtr->add_contents();
-    //     contentPtr->set_key("sender_valid_flag");
-    //     contentPtr->set_value(ToString(senderStatistics.mValidToSendFlag));
-    // }
-
-    return true;
-}
-
-LogFileProfiler::LogstoreSenderStatisticsMap*
-LogFileProfiler::MakesureRegionStatisticsMapUnlocked(const string& region) {
-    // @todo
-    // string region;
-    std::map<std::string, LogstoreSenderStatisticsMap*>::iterator iter = mAllStatisticsMap.find(region);
-    if (iter != mAllStatisticsMap.end()) {
-        return iter->second;
-    }
-    LogstoreSenderStatisticsMap* pMap = new LogstoreSenderStatisticsMap;
-    mAllStatisticsMap.insert(std::pair<std::string, LogstoreSenderStatisticsMap*>(region, pMap));
-    return pMap;
-}
-
-void LogFileProfiler::SendProfileData(bool forceSend) {
-    int32_t curTime = time(NULL);
-    if (!forceSend && (curTime - mLastSendTime < mSendInterval))
-        return;
-    size_t sendRegionIndex = 0;
-    Json::Value detail;
-    Json::Value logstore;
-    do {
-        LogGroup logGroup;
-        logGroup.set_category("shennong_log_profile");
-        logGroup.set_source(LogFileProfiler::mIpAddr);
-        string region;
-        {
-            // only lock statisticsMap, not sender or dump
-            std::lock_guard<std::mutex> lock(mStatisticLock);
-            if (mAllStatisticsMap.size() <= sendRegionIndex) {
-                break;
-            }
-
-            size_t iterIndex = 0;
-            std::map<std::string, LogstoreSenderStatisticsMap*>::iterator iter = mAllStatisticsMap.begin();
-            while (iterIndex != sendRegionIndex) {
-                ++iterIndex;
-                ++iter;
-            }
-
-            ++sendRegionIndex;
-            region = iter->first;
-            LogstoreSenderStatisticsMap& statisticsMap = *(iter->second);
-            if (statisticsMap.size() > (size_t)0) {
-                std::unordered_map<string, LogStoreStatistic*>::iterator iter = statisticsMap.begin();
-                for (; iter != statisticsMap.end();) {
-                    GetProfileData(logGroup, iter->second);
-                    if ((curTime - iter->second->mLastUpdateTime) > mSendInterval * 3) {
-                        delete iter->second;
-                        iter = statisticsMap.erase(iter);
-                    } else {
-                        iter->second->Reset();
-                        iter++;
-                    }
-                }
-            }
-        }
-        UpdateDumpData(logGroup, detail, logstore);
-        GetProfileSender()->SendToProfileProject(region, logGroup);
-    } while (true);
-    DumpToLocal(curTime, forceSend, detail, logstore);
-    mLastSendTime = curTime;
-}
-
-
-// 1. when in container, convertedPath is the file path in container, hostLogPath is the file path on host.
-//    eg. /home/admin/access.log in container, convertedPath = "/home/admin/access.log",
-//    hostLogPath="/logtail_host/xxx/home/admin/access.log". so hostLogPath is unique.
-// 2. On host, convertedPath = hostLogPath.
-void LogFileProfiler::AddProfilingData(const std::string& configName,
-                                       const std::string& region,
-                                       const std::string& projectName,
-                                       const std::string& category,
-                                       const std::string& convertedPath,
-                                       const std::string& hostLogPath,
-                                       const std::vector<sls_logs::LogTag>& tags,
-                                       uint64_t readBytes,
-                                       uint64_t skipBytes,
-                                       uint64_t splitLines,
-                                       uint64_t parseFailures,
-                                       uint64_t regexMatchFailures,
-                                       uint64_t parseTimeFailures,
-                                       uint64_t historyFailures,
-                                       uint64_t sendFailures,
-                                       const std::string& errorLine) {
-    if (!hostLogPath.empty()) {
-        // logstore statistics
-        AddProfilingData(configName,
-                         region,
-                         projectName,
-                         category,
-                         "",
-                         "",
-                         tags,
-                         readBytes,
-                         skipBytes,
-                         splitLines,
-                         parseFailures,
-                         regexMatchFailures,
-                         parseTimeFailures,
-                         historyFailures,
-                         sendFailures,
-                         "");
-    }
-    string key = projectName + "_" + category + "_" + configName + "_" + hostLogPath;
-    std::lock_guard<std::mutex> lock(mStatisticLock);
-    LogstoreSenderStatisticsMap& statisticsMap = *MakesureRegionStatisticsMapUnlocked(region);
-    std::unordered_map<string, LogStoreStatistic*>::iterator iter = statisticsMap.find(key);
-    if (iter != statisticsMap.end()) {
-        (iter->second)->mReadBytes += readBytes;
-        (iter->second)->mSkipBytes += skipBytes;
-        (iter->second)->mSplitLines += splitLines;
-        (iter->second)->mParseFailures += parseFailures;
-        (iter->second)->mRegexMatchFailures += regexMatchFailures;
-        (iter->second)->mParseTimeFailures += parseTimeFailures;
-        (iter->second)->mHistoryFailures += historyFailures;
-        (iter->second)->mSendFailures += sendFailures;
-        if ((iter->second)->mErrorLine.empty())
-            (iter->second)->mErrorLine = errorLine;
-        (iter->second)->mLastUpdateTime = time(NULL);
-    } else {
-        LogStoreStatistic* statistic = NULL;
-        if (hostLogPath.empty()) {
-            std::vector<sls_logs::LogTag> empty;
-            statistic = new LogStoreStatistic(configName,
-                                              projectName,
-                                              category,
-                                              convertedPath,
-                                              hostLogPath,
-                                              empty,
-                                              readBytes,
-                                              skipBytes,
-                                              splitLines,
-                                              parseFailures,
-                                              regexMatchFailures,
-                                              parseTimeFailures,
-                                              historyFailures,
-                                              sendFailures,
-                                              errorLine);
-        } else {
-            statistic = new LogStoreStatistic(configName,
-                                              projectName,
-                                              category,
-                                              convertedPath,
-                                              hostLogPath,
-                                              tags,
-                                              readBytes,
-                                              skipBytes,
-                                              splitLines,
-                                              parseFailures,
-                                              regexMatchFailures,
-                                              parseTimeFailures,
-                                              historyFailures,
-                                              sendFailures,
-                                              errorLine);
-        }
-        statisticsMap.insert(std::pair<string, LogStoreStatistic*>(key, statistic));
-    }
-}
-
-void LogFileProfiler::AddProfilingSkipBytes(const std::string& configName,
-                                            const std::string& region,
-                                            const std::string& projectName,
-                                            const std::string& category,
-                                            const std::string& convertedPath,
-                                            const std::string& hostLogPath,
-                                            const std::vector<sls_logs::LogTag>& tags,
-                                            uint64_t skipBytes) {
-    if (!hostLogPath.empty()) {
-        // logstore statistics
-        AddProfilingSkipBytes(configName, region, projectName, category, "", "", tags, skipBytes);
-    }
-    string key = projectName + "_" + category + "_" + configName + "_" + hostLogPath;
-    std::lock_guard<std::mutex> lock(mStatisticLock);
-    LogstoreSenderStatisticsMap& statisticsMap = *MakesureRegionStatisticsMapUnlocked(region);
-    std::unordered_map<string, LogStoreStatistic*>::iterator iter = statisticsMap.find(key);
-    if (iter != statisticsMap.end()) {
-        (iter->second)->mSkipBytes += skipBytes;
-        (iter->second)->mLastUpdateTime = time(NULL);
-    } else {
-        LogStoreStatistic* statistic = NULL;
-        if (hostLogPath.empty()) {
-            std::vector<sls_logs::LogTag> empty;
-            statistic = new LogStoreStatistic(configName, projectName, category, convertedPath, hostLogPath, empty);
-        } else {
-            statistic = new LogStoreStatistic(configName, projectName, category, convertedPath, hostLogPath, tags);
-        }
-        statistic->mSkipBytes += skipBytes;
-        statisticsMap.insert(std::pair<string, LogStoreStatistic*>(key, statistic));
-    }
-}
-
-void LogFileProfiler::AddProfilingReadBytes(const std::string& configName,
-                                            const std::string& region,
-                                            const std::string& projectName,
-                                            const std::string& category,
-                                            const std::string& convertedPath,
-                                            const std::string& hostLogPath,
-                                            const std::vector<sls_logs::LogTag>& tags,
-                                            uint64_t dev,
-                                            uint64_t inode,
-                                            uint64_t fileSize,
-                                            uint64_t readOffset,
-                                            int32_t lastReadTime) {
-    if (!hostLogPath.empty()) {
-        // logstore statistics
-        AddProfilingReadBytes(
-            configName, region, projectName, category, "", "", tags, dev, inode, fileSize, readOffset, lastReadTime);
-    }
-    string key = projectName + "_" + category + "_" + configName + "_" + hostLogPath;
-    std::lock_guard<std::mutex> lock(mStatisticLock);
-    LogstoreSenderStatisticsMap& statisticsMap = *MakesureRegionStatisticsMapUnlocked(region);
-    std::unordered_map<string, LogStoreStatistic*>::iterator iter = statisticsMap.find(key);
-    if (iter != statisticsMap.end()) {
-        (iter->second)->UpdateReadInfo(dev, inode, fileSize, readOffset, lastReadTime);
-    } else {
-        LogStoreStatistic* statistic = NULL;
-        if (hostLogPath.empty()) {
-            std::vector<sls_logs::LogTag> empty;
-            statistic = new LogStoreStatistic(configName, projectName, category, convertedPath, hostLogPath, empty);
-        } else {
-            statistic = new LogStoreStatistic(configName, projectName, category, convertedPath, hostLogPath, tags);
-        }
-        statistic->UpdateReadInfo(dev, inode, fileSize, readOffset, lastReadTime);
-        statisticsMap.insert(std::pair<string, LogStoreStatistic*>(key, statistic));
-    }
-}
-
-void LogFileProfiler::DumpToLocal(int32_t curTime, bool forceSend, Json::Value& detail, Json::Value& logstore) {
-    Json::Value root;
-    root["version"] = ILOGTAIL_VERSION;
-    root["ip"] = mIpAddr;
-    root["begin_time"] = (Json::UInt64)mLastSendTime;
-    root["begin_time_readable"] = GetTimeStamp(mLastSendTime, "%Y-%m-%d %H:%M:%S");
-    root["end_time"] = (Json::UInt64)curTime;
-    root["end_time_readable"] = GetTimeStamp(curTime, "%Y-%m-%d %H:%M:%S");
-
-
-    root["detail"] = detail;
-    root["logstore"] = logstore;
-    string styledRoot = root.toStyledString();
-    if (forceSend) {
-        FILE* pFile = fopen(mBakDumpFileName.c_str(), "w");
-        if (pFile == NULL) {
-            LOG_ERROR(sLogger, ("open file failed", mBakDumpFileName)("errno", errno));
-            return;
-        }
-        fwrite(styledRoot.c_str(), 1, styledRoot.size(), pFile);
-        fclose(pFile);
-#if defined(_MSC_VER)
-        remove(mDumpFileName.c_str());
-#endif
-        if (rename(mBakDumpFileName.c_str(), mDumpFileName.c_str()) == -1)
-            LOG_INFO(sLogger,
-                     ("rename profile snapshot fail, file", mDumpFileName)("error", ErrnoToString(GetErrno())));
-    }
-
-    static auto gProfileLogger = Logger::Instance().GetLogger(GetAgentLoggersPrefix() + "/profile");
-    LOG_INFO(gProfileLogger, ("\n", styledRoot));
-}
-
-void LogFileProfiler::UpdateDumpData(const sls_logs::LogGroup& logGroup, Json::Value& detail, Json::Value& logstore) {
-    for (int32_t logIdx = 0; logIdx < logGroup.logs_size(); ++logIdx) {
-        Json::Value category;
-        const Log& log = logGroup.logs(logIdx);
-        bool logstoreFlag = false;
-        for (int32_t conIdx = 0; conIdx < log.contents_size(); ++conIdx) {
-            const Log_Content& content = log.contents(conIdx);
-            const string& key = content.key();
-            const string& value = content.value();
-            if (key == "logreader_project_name")
-                category["project"] = value;
-            else if (key == "category")
-                category["logstore"] = value;
-            else if (key == "config_name")
-                category["config_name"] = value;
-            else if (key == "file_name") {
-                category["file"] = value;
-                if (value == "logstore_statistics") {
-                    logstoreFlag = true;
-                }
-            } else if (key == "total_bytes")
-                category["read_bytes"] = value;
-            else if (key == "skip_bytes")
-                category["skip_bytes"] = value;
-            else if (key == "succeed_lines")
-                category["split_lines"] = value;
-            else if (key == "parse_failures")
-                category["parse_fail_lines"] = value;
-            else if (key == "file_dev")
-                category["file_dev"] = value;
-            else if (key == "file_inode")
-                category["file_inode"] = value;
-            else if (key == "last_read_time")
-                category["last_read_time"] = value;
-            else if (key == "read_count")
-                category["read_count"] = value;
-            else if (key == "file_size")
-                category["file_size"] = value;
-            else if (key == "read_offset")
-                category["read_offset"] = value;
-            else if (key == "read_avg_delay")
-                category["read_avg_delay"] = value;
-            else if (key == "max_unsend_time")
-                category["max_unsend_time"] = value;
-            else if (key == "min_unsend_time")
-                category["min_unsend_time"] = value;
-            else if (key == "max_send_success_time")
-                category["max_send_success_time"] = value;
-            else if (key == "send_queue_size")
-                category["send_queue_size"] = value;
-            else if (key == "send_network_error")
-                category["send_network_error"] = value;
-            else if (key == "send_quota_error")
-                category["send_quota_error"] = value;
-            else if (key == "send_discard_error")
-                category["send_discard_error"] = value;
-            else if (key == "send_success_count")
-                category["send_success_count"] = value;
-            else if (key == "sender_valid_flag")
-                category["sender_valid_flag"] = value;
-            else if (key == "send_block_flag")
-                category["send_block_flag"] = value;
-        }
-        if (logstoreFlag) {
-            logstore.append(category);
-        } else {
-            detail.append(category);
-        }
-    }
-}
-
-#ifdef APSARA_UNIT_TEST_MAIN
-uint64_t LogFileProfiler::GetProfilingLines(const std::string& projectName,
-                                            const std::string& category,
-                                            const std::string& convertedPath) {
-    std::string key = projectName + "_" + category + "_" + convertedPath;
-    std::lock_guard<std::mutex> lock(mStatisticLock);
-    if (mAllStatisticsMap.size() != (size_t)1) {
-        return 0;
-    }
-    LogstoreSenderStatisticsMap statisticMap = *(mAllStatisticsMap.begin()->second);
-    std::unordered_map<std::string, LogStoreStatistic*>::iterator iter = statisticMap.find(key);
-    if (iter == statisticMap.end())
-        return 0;
-    else
-        return (iter->second->mSplitLines - iter->second->mParseFailures);
-}
-
-void LogFileProfiler::CleanEnviroments() {
-    std::lock_guard<std::mutex> lock(mStatisticLock);
-    // just for test, memory leaks
-    mAllStatisticsMap.clear();
-}
-#endif
-
-} // namespace logtail
diff --git a/core/monitor/LogFileProfiler.h b/core/monitor/LogFileProfiler.h
deleted file mode 100644
index f9ecc4cce7..0000000000
--- a/core/monitor/LogFileProfiler.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Copyright 2022 iLogtail Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-#include <stdint.h>
-#include <string>
-#include <mutex>
-#include <unordered_map>
-#include <map>
-#include <json/json.h>
-#include "protobuf/sls/sls_logs.pb.h"
-
-namespace sls_logs {
-class LogGroup;
-}
-
-namespace logtail {
-// Collect the log file's profile such as lines processed.
-class LogFileProfiler {
-public:
-    static LogFileProfiler* GetInstance() {
-        static LogFileProfiler* ptr = new LogFileProfiler();
-        return ptr;
-    }
-
-    void AddProfilingData(const std::string& configName,
-                          const std::string& region,
-                          const std::string& projectName,
-                          const std::string& category,
-                          const std::string& convertedPath,
-                          const std::string& hostLogPath,
-                          const std::vector<sls_logs::LogTag>& tags,
-                          uint64_t readBytes,
-                          uint64_t skipBytes,
-                          uint64_t splitLines,
-                          uint64_t parseFailures,
-                          uint64_t regexMatchFailures,
-                          uint64_t parseTimeFailures,
-                          uint64_t historyFailures,
-                          uint64_t sendFailures,
-                          const std::string& errorLine);
-    void AddProfilingSkipBytes(const std::string& configName,
-                               const std::string& region,
-                               const std::string& projectName,
-                               const std::string& category,
-                               const std::string& convertedPath,
-                               const std::string& hostLogPath,
-                               const std::vector<sls_logs::LogTag>& tags,
-                               uint64_t skipBytes);
-
-    void AddProfilingReadBytes(const std::string& configName,
-                               const std::string& region,
-                               const std::string& projectName,
-                               const std::string& category,
-                               const std::string& convertedPath,
-                               const std::string& host,
-                               const std::vector<sls_logs::LogTag>& tags,
-                               uint64_t dev,
-                               uint64_t inode,
-                               uint64_t fileSize,
-                               uint64_t readOffset,
-                               int32_t lastReadTime);
-
-    void SendProfileData(bool forceSend = false);
-
-    void SetProfileInterval(int32_t interval) { mSendInterval = interval; }
-
-    int32_t GetProfileInterval() { return mSendInterval; }
-
-    static std::string mHostname;
-    static std::string mIpAddr;
-    static std::string mOsDetail;
-    static std::string mUsername;
-    static int32_t mSystemBootTime;
-    static std::string mECSInstanceID;
-    static std::string mECSUserID;
-    static std::string mECSRegionID;
-    static std::string mStartTime;
-
-private:
-    struct LogStoreStatistic {
-        LogStoreStatistic(const std::string& configName,
-                          const std::string& projectName,
-                          const std::string& category,
-                          const std::string& convertedPath,
-                          const std::string& hostLogPath,
-                          const std::vector<sls_logs::LogTag>& tags,
-                          uint64_t readBytes = 0,
-                          uint64_t skipBytes = 0,
-                          uint64_t splitLines = 0,
-                          uint64_t parseFailures = 0,
-                          uint64_t regexMatchFailures = 0,
-                          uint64_t parseTimeFailures = 0,
-                          uint64_t historyFailures = 0,
-                          uint64_t sendFailures = 0,
-                          const std::string& errorLine = "")
-            : mConfigName(configName),
-              mProjectName(projectName),
-              mCategory(category),
-              mConvertedPath(convertedPath),
-              mHostLogPath(hostLogPath),
-              mTags(tags),
-              mReadBytes(readBytes),
-              mSkipBytes(skipBytes),
-              mSplitLines(splitLines),
-              mParseFailures(parseFailures),
-              mRegexMatchFailures(regexMatchFailures),
-              mParseTimeFailures(parseTimeFailures),
-              mHistoryFailures(historyFailures),
-              mSendFailures(sendFailures),
-              mErrorLine(errorLine) {
-            mLastUpdateTime = time(NULL);
-            mFileDev = 0;
-            mFileInode = 0;
-            mFileSize = 0;
-            mReadOffset = 0;
-            mLastReadTime = 0;
-            mReadCount = 0;
-            mReadDelaySum = 0;
-        }
-
-        void Reset() {
-            mFileDev = 0;
-            mFileInode = 0;
-            mFileSize = 0;
-            mReadOffset = 0;
-            mLastReadTime = 0;
-            mReadCount = 0;
-            mReadDelaySum = 0;
-            mReadBytes = 0;
-            mSkipBytes = 0;
-            mSplitLines = 0;
-            mParseFailures = 0;
-            mRegexMatchFailures = 0;
-            mParseTimeFailures = 0;
-            mHistoryFailures = 0;
-            mSendFailures = 0;
-            mErrorLine.clear();
-        }
-
-        void
-        UpdateReadInfo(uint64_t dev, uint64_t inode, uint64_t fileSize, uint64_t readOffset, int32_t lastReadTime) {
-            mFileDev = dev;
-            mFileInode = inode;
-            mFileSize = fileSize;
-            mReadOffset = readOffset;
-            mLastReadTime = lastReadTime;
-            ++mReadCount;
-            mReadDelaySum += fileSize > readOffset ? fileSize - readOffset : 0;
-        }
-
-        std::string mConfigName;
-        std::string mProjectName;
-        std::string mCategory;
-        std::string mConvertedPath;
-        std::string mHostLogPath;
-        std::vector<sls_logs::LogTag> mTags;
-        // how many bytes processed
-        uint64_t mReadBytes;
-        // how many bytes skiped
-        uint64_t mSkipBytes;
-        // how many lines processed: mSplitLines
-        // how many lines parse failed: mParseFailures
-        // how many lines send failed: mSendFailures
-        // how many lines succeed send: mSplitLines - mParseFailures - mSendFailures
-        uint64_t mSplitLines;
-        // how many lines parse fails (include all failures)
-        uint64_t mParseFailures;
-        // how many lines regex match fail(include boost crash or not match)
-        uint64_t mRegexMatchFailures;
-        // how many lines parse timeformat fail
-        uint64_t mParseTimeFailures;
-        // how many lines history data discarded
-        uint64_t mHistoryFailures;
-        // how many lines send fails
-        uint64_t mSendFailures;
-        // one sample error line
-        std::string mErrorLine;
-        int32_t mLastUpdateTime;
-
-        uint64_t mFileDev;
-        uint64_t mFileInode;
-        uint64_t mFileSize;
-        uint64_t mReadOffset;
-        int32_t mLastReadTime;
-        // ++mReadCount every call
-        uint32_t mReadCount;
-        // mReadDelaySum += mFileSize - mReadOffset every call
-        // then average delay is mReadDelaySum / mReadCount
-        uint64_t mReadDelaySum;
-    };
-
-    std::string mDumpFileName;
-    std::string mBakDumpFileName;
-    int32_t mLastSendTime;
-    int32_t mSendInterval;
-    // key : "project_name" + "_" + "category" + "_" + "filename"
-    typedef std::unordered_map<std::string, LogStoreStatistic*> LogstoreSenderStatisticsMap;
-    // key : region, value :unordered_map<std::string, LogStoreStatistic*>
-    std::map<std::string, LogstoreSenderStatisticsMap*> mAllStatisticsMap;
-    std::mutex mStatisticLock;
-
-    LogFileProfiler();
-    ~LogFileProfiler() {}
-    void DumpToLocal(int32_t curTime, bool forceSend, Json::Value& detail, Json::Value& logstore);
-    void UpdateDumpData(const sls_logs::LogGroup& logGroup, Json::Value& detail, Json::Value& logstore);
-    bool GetProfileData(sls_logs::LogGroup& logGroup, LogStoreStatistic* statistic);
-
-    LogstoreSenderStatisticsMap* MakesureRegionStatisticsMapUnlocked(const std::string& region);
-
-#ifdef APSARA_UNIT_TEST_MAIN
-    friend class EventDispatcherTest;
-    friend class SenderUnittest;
-
-    uint64_t
-    GetProfilingLines(const std::string& projectName, const std::string& category, const std::string& filename);
-    void CleanEnviroments();
-#endif
-};
-
-} // namespace logtail
diff --git a/core/monitor/MetricExportor.cpp b/core/monitor/MetricExportor.cpp
index 2440fe2106..d6417bdc00 100644
--- a/core/monitor/MetricExportor.cpp
+++ b/core/monitor/MetricExportor.cpp
@@ -16,9 +16,8 @@
 
 #include <filesystem>
 
-#include "LogFileProfiler.h"
-#include "MetricManager.h"
 #include "MetricConstants.h"
+#include "MetricManager.h"
 #include "app_config/AppConfig.h"
 #include "common/FileSystemUtil.h"
 #include "common/RuntimeUtil.h"
@@ -88,7 +87,7 @@ void MetricExportor::SendToSLS(std::map<std::string, sls_logs::LogGroup*>& logGr
     for (iter = logGroupMap.begin(); iter != logGroupMap.end(); iter++) {
         sls_logs::LogGroup* logGroup = iter->second;
         logGroup->set_category(METRIC_SLS_LOGSTORE_NAME);
-        logGroup->set_source(LogFileProfiler::mIpAddr);
+        logGroup->set_source(LoongCollectorMonitor::mIpAddr);
         logGroup->set_topic(METRIC_TOPIC_TYPE);
         if (METRIC_REGION_DEFAULT == iter->first) {
             GetProfileSender()->SendToProfileProject(GetProfileSender()->GetDefaultProfileRegion(), *logGroup);
diff --git a/core/monitor/Monitor.cpp b/core/monitor/Monitor.cpp
index e09ba3e0b0..0123fee786 100644
--- a/core/monitor/Monitor.cpp
+++ b/core/monitor/Monitor.cpp
@@ -23,7 +23,7 @@
 #include <functional>
 
 #include "app_config/AppConfig.h"
-#include "constants/Constants.h"
+#include "application/Application.h"
 #include "common/DevInode.h"
 #include "common/ExceptionBase.h"
 #include "common/LogtailCommonFlags.h"
@@ -32,16 +32,15 @@
 #include "common/StringTools.h"
 #include "common/TimeUtil.h"
 #include "common/version.h"
+#include "constants/Constants.h"
 #include "file_server/event_handler/LogInput.h"
 #include "go_pipeline/LogtailPlugin.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/AlarmManager.h"
 #include "monitor/MetricExportor.h"
 #include "plugin/flusher/sls/FlusherSLS.h"
 #include "protobuf/sls/sls_logs.pb.h"
 #include "runner/FlusherRunner.h"
-#include "application/Application.h"
 #include "sdk/Common.h"
 #ifdef __ENTERPRISE__
 #include "config/provider/EnterpriseConfigProvider.h"
@@ -58,6 +57,16 @@ DECLARE_FLAG_BOOL(check_profile_region);
 
 namespace logtail {
 
+string LoongCollectorMonitor::mHostname;
+string LoongCollectorMonitor::mIpAddr;
+string LoongCollectorMonitor::mOsDetail;
+string LoongCollectorMonitor::mUsername;
+int32_t LoongCollectorMonitor::mSystemBootTime = -1;
+string LoongCollectorMonitor::mECSInstanceID;
+string LoongCollectorMonitor::mECSUserID;
+string LoongCollectorMonitor::mECSRegionID;
+string LoongCollectorMonitor::mStartTime;
+
 inline void CpuStat::Reset() {
 #if defined(__linux__)
     mUserTime = 0;
@@ -121,6 +130,9 @@ void LogtailMonitor::Stop() {
         mIsThreadRunning = false;
     }
     mStopCV.notify_one();
+    if (!mThreadRes.valid()) {
+        return;
+    }
     future_status s = mThreadRes.wait_for(chrono::seconds(1));
     if (s == future_status::ready) {
         LOG_INFO(sLogger, ("profiling", "stopped successfully"));
@@ -248,13 +260,13 @@ bool LogtailMonitor::SendStatusProfile(bool suicide) {
     }
 
     // the unique id of current instance
-    std::string id = sdk::Base64Enconde(LogFileProfiler::mHostname + LogFileProfiler::mIpAddr + ILOGTAIL_VERSION
-                                        + GetProcessExecutionDir());
+    std::string id = sdk::Base64Enconde(LoongCollectorMonitor::mHostname + LoongCollectorMonitor::mIpAddr
+                                        + ILOGTAIL_VERSION + GetProcessExecutionDir());
 
     // Collect status information to send.
     LogGroup logGroup;
     logGroup.set_category(category);
-    logGroup.set_source(LogFileProfiler::mIpAddr);
+    logGroup.set_source(LoongCollectorMonitor::mIpAddr);
     Log* logPtr = logGroup.add_logs();
     SetLogTime(logPtr, AppConfig::GetInstance()->EnableLogTimeAutoAdjust() ? now.tv_sec + GetTimeDelta() : now.tv_sec);
     // CPU usage of Logtail process.
@@ -278,11 +290,11 @@ bool LogtailMonitor::SendStatusProfile(bool suicide) {
     AddLogContent(logPtr, "instance_id", Application::GetInstance()->GetInstanceId());
     AddLogContent(logPtr, "instance_key", id);
     // Host informations.
-    AddLogContent(logPtr, "ip", LogFileProfiler::mIpAddr);
-    AddLogContent(logPtr, "hostname", LogFileProfiler::mHostname);
+    AddLogContent(logPtr, "ip", LoongCollectorMonitor::mIpAddr);
+    AddLogContent(logPtr, "hostname", LoongCollectorMonitor::mHostname);
     AddLogContent(logPtr, "os", OS_NAME);
-    AddLogContent(logPtr, "os_detail", LogFileProfiler::mOsDetail);
-    AddLogContent(logPtr, "user", LogFileProfiler::mUsername);
+    AddLogContent(logPtr, "os_detail", LoongCollectorMonitor::mOsDetail);
+    AddLogContent(logPtr, "user", LoongCollectorMonitor::mUsername);
 #if defined(__linux__)
     AddLogContent(logPtr, "load", GetLoadAvg());
 #endif
@@ -310,9 +322,9 @@ bool LogtailMonitor::SendStatusProfile(bool suicide) {
 
     AddLogContent(logPtr, "metric_json", MetricToString());
     AddLogContent(logPtr, "status", CheckLogtailStatus());
-    AddLogContent(logPtr, "ecs_instance_id", LogFileProfiler::mECSInstanceID);
-    AddLogContent(logPtr, "ecs_user_id", LogFileProfiler::mECSUserID);
-    AddLogContent(logPtr, "ecs_regioon_id", LogFileProfiler::mECSRegionID);
+    AddLogContent(logPtr, "ecs_instance_id", LoongCollectorMonitor::mECSInstanceID);
+    AddLogContent(logPtr, "ecs_user_id", LoongCollectorMonitor::mECSUserID);
+    AddLogContent(logPtr, "ecs_regioon_id", LoongCollectorMonitor::mECSRegionID);
     ClearMetric();
 
     if (!mIsThreadRunning)
@@ -348,7 +360,7 @@ bool LogtailMonitor::GetMemStat() {
 
     std::ifstream fin;
     fin.open(SELF_STATM_PATH);
-    if (!fin.good()) {
+    if (!fin) {
         LOG_ERROR(sLogger, ("open stat error", ""));
         return false;
     }
@@ -380,7 +392,7 @@ bool LogtailMonitor::GetCpuStat(CpuStat& cur) {
     std::ifstream fin;
     fin.open(SELF_STAT_PATH);
     uint64_t start = GetCurrentTimeInMilliSeconds();
-    if (!fin.good()) {
+    if (!fin) {
         LOG_ERROR(sLogger, ("open stat error", ""));
         return false;
     }
@@ -480,7 +492,7 @@ void LogtailMonitor::DumpToLocal(const sls_logs::LogGroup& logGroup) {
 }
 
 bool LogtailMonitor::DumpMonitorInfo(time_t monitorTime) {
-    string path = GetAgentLogDir() + "loongcollector_monitor_info";
+    string path = GetAgentLogDir() + GetMonitorInfoFileName();
     ofstream outfile(path.c_str(), ofstream::app);
     if (!outfile)
         return false;
@@ -500,10 +512,10 @@ bool LogtailMonitor::IsHostIpChanged() {
         if (ip.empty()) {
             ip = GetAnyAvailableIP();
         }
-        if (ip != LogFileProfiler::mIpAddr) {
+        if (ip != LoongCollectorMonitor::mIpAddr) {
             LOG_ERROR(sLogger,
                       ("error", "host ip changed during running, prepare to restart Logtail")(
-                          "original ip", LogFileProfiler::mIpAddr)("current ip", ip));
+                          "original ip", LoongCollectorMonitor::mIpAddr)("current ip", ip));
             return true;
         }
         return false;
@@ -528,7 +540,7 @@ std::string LogtailMonitor::GetLoadAvg() {
     std::ifstream fin;
     std::string loadStr;
     fin.open(PROC_LOAD_PATH);
-    if (!fin.good()) {
+    if (!fin) {
         LOG_ERROR(sLogger, ("open load error", ""));
         return loadStr;
     }
@@ -686,17 +698,36 @@ bool LogtailMonitor::CalOsCpuStat() {
 #endif
 
 LoongCollectorMonitor* LoongCollectorMonitor::GetInstance() {
-    static LoongCollectorMonitor instance;
-    return &instance;
+    static LoongCollectorMonitor* instance = new LoongCollectorMonitor();
+    return instance;
+}
+
+LoongCollectorMonitor::LoongCollectorMonitor() {
+    mHostname = GetHostName();
+#if defined(_MSC_VER)
+    mHostname = EncodingConverter::GetInstance()->FromACPToUTF8(mHostname);
+#endif
+    mIpAddr = GetHostIp();
+    mOsDetail = GetOsDetail();
+    mUsername = GetUsername();
+    // TODO: This may take up to 3s to construct the object. This is bad.
+    ECSMeta ecsMeta = FetchECSMeta();
+    mECSInstanceID = ecsMeta.instanceID;
+    mECSUserID = ecsMeta.userID;
+    mECSRegionID = ecsMeta.regionID;
+}
+
+LoongCollectorMonitor::~LoongCollectorMonitor() {
 }
 
 void LoongCollectorMonitor::Init() {
     // create metric record
     MetricLabels labels;
     labels.emplace_back(METRIC_LABEL_KEY_INSTANCE_ID, Application::GetInstance()->GetInstanceId());
-    labels.emplace_back(METRIC_LABEL_KEY_START_TIME, LogFileProfiler::mStartTime);
+    labels.emplace_back(METRIC_LABEL_KEY_START_TIME, mStartTime);
+    labels.emplace_back(METRIC_LABEL_KEY_HOSTNAME, mHostname);
     labels.emplace_back(METRIC_LABEL_KEY_OS, OS_NAME);
-    labels.emplace_back(METRIC_LABEL_KEY_OS_DETAIL, LogFileProfiler::mOsDetail);
+    labels.emplace_back(METRIC_LABEL_KEY_OS_DETAIL, mOsDetail);
     labels.emplace_back(METRIC_LABEL_KEY_UUID, Application::GetInstance()->GetUUID());
     labels.emplace_back(METRIC_LABEL_KEY_VERSION, ILOGTAIL_VERSION);
     DynamicMetricLabels dynamicLabels;
diff --git a/core/monitor/Monitor.h b/core/monitor/Monitor.h
index 4705236d32..c66a47218e 100644
--- a/core/monitor/Monitor.h
+++ b/core/monitor/Monitor.h
@@ -21,8 +21,8 @@
 #include <mutex>
 #include <string>
 
-#include "MetricManager.h"
 #include "MetricConstants.h"
+#include "MetricManager.h"
 #include "MetricStore.h"
 
 #if defined(_MSC_VER)
@@ -199,7 +199,20 @@ class LoongCollectorMonitor {
     void SetAgentOpenFdTotal(uint64_t total) { mAgentOpenFdTotal->Set(total); }
     void SetAgentConfigTotal(uint64_t total) { mAgentConfigTotal->Set(total); }
 
+    static std::string mHostname;
+    static std::string mIpAddr;
+    static std::string mOsDetail;
+    static std::string mUsername;
+    static int32_t mSystemBootTime;
+    static std::string mECSInstanceID;
+    static std::string mECSUserID;
+    static std::string mECSRegionID;
+    static std::string mStartTime;
+
 private:
+    LoongCollectorMonitor();
+    ~LoongCollectorMonitor();
+
     // MetricRecord
     MetricsRecordRef mMetricsRecordRef;
 
diff --git a/core/monitor/metric_constants/AgentMetrics.cpp b/core/monitor/metric_constants/AgentMetrics.cpp
index ad17130e6c..47a6866292 100644
--- a/core/monitor/metric_constants/AgentMetrics.cpp
+++ b/core/monitor/metric_constants/AgentMetrics.cpp
@@ -22,6 +22,7 @@ namespace logtail {
 const string METRIC_LABEL_KEY_ALIUIDS = "aliuids";
 const string METRIC_LABEL_KEY_INSTANCE_ID = "instance_id";
 const string METRIC_LABEL_KEY_START_TIME = "start_time";
+const string METRIC_LABEL_KEY_HOSTNAME = "hostname";
 const string METRIC_LABEL_KEY_OS = "os";
 const string METRIC_LABEL_KEY_OS_DETAIL = "os_detail";
 const string METRIC_LABEL_KEY_PROJECT = "project";
diff --git a/core/monitor/metric_constants/MetricConstants.h b/core/monitor/metric_constants/MetricConstants.h
index 73a62d30bf..95a6eb1af2 100644
--- a/core/monitor/metric_constants/MetricConstants.h
+++ b/core/monitor/metric_constants/MetricConstants.h
@@ -26,6 +26,7 @@ namespace logtail {
 extern const std::string METRIC_LABEL_KEY_ALIUIDS;
 extern const std::string METRIC_LABEL_KEY_INSTANCE_ID;
 extern const std::string METRIC_LABEL_KEY_START_TIME;
+extern const std::string METRIC_LABEL_KEY_HOSTNAME;
 extern const std::string METRIC_LABEL_KEY_OS;
 extern const std::string METRIC_LABEL_KEY_OS_DETAIL;
 extern const std::string METRIC_LABEL_KEY_PROJECT;
diff --git a/core/pipeline/Pipeline.cpp b/core/pipeline/Pipeline.cpp
index 76e49682fc..1f8e4d5868 100644
--- a/core/pipeline/Pipeline.cpp
+++ b/core/pipeline/Pipeline.cpp
@@ -20,6 +20,7 @@
 #include <cstdint>
 #include <utility>
 
+#include "app_config/AppConfig.h"
 #include "common/Flags.h"
 #include "common/ParamExtractor.h"
 #include "go_pipeline/LogtailPlugin.h"
@@ -166,7 +167,7 @@ bool Pipeline::Init(PipelineConfig&& config) {
             = PluginRegistry::GetInstance()->CreateFlusher(pluginType, GenNextPluginMeta(false));
         if (flusher) {
             Json::Value optionalGoPipeline;
-            if (!flusher->Init(detail, mContext, optionalGoPipeline)) {
+            if (!flusher->Init(detail, mContext, i, optionalGoPipeline)) {
                 return false;
             }
             mFlushers.emplace_back(std::move(flusher));
@@ -334,8 +335,8 @@ bool Pipeline::Init(PipelineConfig&& config) {
 }
 
 void Pipeline::Start() {
-#ifndef APSARA_UNIT_TEST_MAIN
-    // TODO: 应该保证指定时间内返回,如果无法返回,将配置放入startDisabled里
+    // #ifndef APSARA_UNIT_TEST_MAIN
+    //  TODO: 应该保证指定时间内返回,如果无法返回,将配置放入startDisabled里
     for (const auto& flusher : mFlushers) {
         flusher->Start();
     }
@@ -355,7 +356,7 @@ void Pipeline::Start() {
     }
 
     mStartTime->Set(chrono::duration_cast<chrono::seconds>(chrono::system_clock::now().time_since_epoch()).count());
-#endif
+    // #endif
     LOG_INFO(sLogger, ("pipeline start", "succeeded")("config", mName));
 }
 
@@ -415,7 +416,6 @@ bool Pipeline::FlushBatch() {
 }
 
 void Pipeline::Stop(bool isRemoving) {
-#ifndef APSARA_UNIT_TEST_MAIN
     // TODO: 应该保证指定时间内返回,如果无法返回,将配置放入stopDisabled里
     for (const auto& input : mInputs) {
         input->Stop(isRemoving);
@@ -439,7 +439,6 @@ void Pipeline::Stop(bool isRemoving) {
     for (const auto& flusher : mFlushers) {
         flusher->Stop(isRemoving);
     }
-#endif
     LOG_INFO(sLogger, ("pipeline stop", "succeeded")("config", mName));
 }
 
@@ -497,7 +496,7 @@ bool Pipeline::LoadGoPipelines() const {
                                                         mContext.GetRegion(),
                                                         mContext.GetLogstoreKey())) {
             LOG_ERROR(mContext.GetLogger(),
-                      ("failed to init pipeline", "Go pipeline is invalid, see go_plugin.LOG for detail")(
+                      ("failed to init pipeline", "Go pipeline is invalid, see " + GetPluginLogName() + " for detail")(
                           "Go pipeline num", "2")("Go pipeline content", content)("config", mName));
             AlarmManager::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM,
                                                    "Go pipeline is invalid, content: " + content + ", config: " + mName,
@@ -516,7 +515,7 @@ bool Pipeline::LoadGoPipelines() const {
                                                         mContext.GetRegion(),
                                                         mContext.GetLogstoreKey())) {
             LOG_ERROR(mContext.GetLogger(),
-                      ("failed to init pipeline", "Go pipeline is invalid, see go_plugin.LOG for detail")(
+                      ("failed to init pipeline", "Go pipeline is invalid, see " + GetPluginLogName() + " for detail")(
                           "Go pipeline num", "1")("Go pipeline content", content)("config", mName));
             AlarmManager::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM,
                                                    "Go pipeline is invalid, content: " + content + ", config: " + mName,
diff --git a/core/pipeline/PipelineContext.h b/core/pipeline/PipelineContext.h
index 7bbafa7b0a..2c4a7f580d 100644
--- a/core/pipeline/PipelineContext.h
+++ b/core/pipeline/PipelineContext.h
@@ -23,7 +23,6 @@
 
 #include "logger/Logger.h"
 #include "models/PipelineEventGroup.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/AlarmManager.h"
 #include "pipeline/GlobalConfig.h"
 #include "pipeline/queue/QueueKey.h"
@@ -83,7 +82,7 @@ class PipelineContext {
     void SetIsFirstProcessorApsaraFlag(bool flag) { mIsFirstProcessorApsara = flag; }
     bool IsFirstProcessorJson() const { return mIsFirstProcessorJson; }
     void SetIsFirstProcessorJsonFlag(bool flag) { mIsFirstProcessorJson = flag; }
-    bool IsExactlyOnceEnabled() const {return mEnableExactlyOnce; }
+    bool IsExactlyOnceEnabled() const { return mEnableExactlyOnce; }
     void SetExactlyOnceFlag(bool flag) { mEnableExactlyOnce = flag; }
     bool HasNativeProcessors() const { return mHasNativeProcessors; }
     void SetHasNativeProcessorsFlag(bool flag) { mHasNativeProcessors = flag; }
@@ -91,7 +90,6 @@ class PipelineContext {
     void SetIsFlushingThroughGoPipelineFlag(bool flag) { mIsFlushingThroughGoPipeline = flag; }
 
     ProcessProfile& GetProcessProfile() const { return mProcessProfile; }
-    // LogFileProfiler& GetProfiler() { return *mProfiler; }
     const Logger::logger& GetLogger() const { return mLogger; }
     AlarmManager& GetAlarm() const { return *mAlarm; };
 
@@ -114,7 +112,6 @@ class PipelineContext {
     bool mIsFlushingThroughGoPipeline = false;
 
     mutable ProcessProfile mProcessProfile;
-    // LogFileProfiler* mProfiler = LogFileProfiler::GetInstance();
     Logger::logger mLogger = sLogger;
     AlarmManager* mAlarm = AlarmManager::GetInstance();
 };
diff --git a/core/pipeline/PipelineManager.cpp b/core/pipeline/PipelineManager.cpp
index 1fe4709564..57abcde874 100644
--- a/core/pipeline/PipelineManager.cpp
+++ b/core/pipeline/PipelineManager.cpp
@@ -40,7 +40,7 @@ PipelineManager::PipelineManager()
     : mInputRunners({
           PrometheusInputRunner::GetInstance(),
 #if defined(__linux__) && !defined(__ANDROID__)
-          ebpf::eBPFServer::GetInstance(),
+              ebpf::eBPFServer::GetInstance(),
 #endif
       }) {
 }
@@ -78,7 +78,8 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) {
         DecreasePluginUsageCnt(iter->second->GetPluginStatistics());
         iter->second->RemoveProcessQueue();
         mPipelineNameEntityMap.erase(iter);
-        ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(name, ConfigFeedbackStatus::DELETED);
+        ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(name,
+                                                                                     ConfigFeedbackStatus::DELETED);
     }
     for (auto& config : diff.mModified) {
         auto p = BuildPipeline(std::move(config)); // auto reuse old pipeline's process queue and sender queue
@@ -92,8 +93,8 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) {
                 config.mProject,
                 config.mLogstore,
                 config.mRegion);
-            ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName,
-                                                                               ConfigFeedbackStatus::FAILED);
+            ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                         ConfigFeedbackStatus::FAILED);
             continue;
         }
         LOG_INFO(sLogger,
@@ -106,7 +107,8 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) {
         mPipelineNameEntityMap[config.mName] = p;
         IncreasePluginUsageCnt(p->GetPluginStatistics());
         p->Start();
-        ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED);
+        ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                     ConfigFeedbackStatus::APPLIED);
     }
     for (auto& config : diff.mAdded) {
         auto p = BuildPipeline(std::move(config));
@@ -119,8 +121,8 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) {
                 config.mProject,
                 config.mLogstore,
                 config.mRegion);
-            ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName,
-                                                                               ConfigFeedbackStatus::FAILED);
+            ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                         ConfigFeedbackStatus::FAILED);
             continue;
         }
         LOG_INFO(sLogger,
@@ -128,7 +130,8 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) {
         mPipelineNameEntityMap[config.mName] = p;
         IncreasePluginUsageCnt(p->GetPluginStatistics());
         p->Start();
-        ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED);
+        ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                     ConfigFeedbackStatus::APPLIED);
     }
 
 #ifndef APSARA_UNIT_TEST_MAIN
diff --git a/core/pipeline/batch/Batcher.h b/core/pipeline/batch/Batcher.h
index 0d47087ffe..a263d82228 100644
--- a/core/pipeline/batch/Batcher.h
+++ b/core/pipeline/batch/Batcher.h
@@ -176,7 +176,7 @@ class Batcher {
                         }
                         if (mGroupQueue->IsEmpty()) {
                             TimeoutFlushManager::GetInstance()->UpdateRecord(mFlusher->GetContext().GetConfigName(),
-                                                                             0,
+                                                                             mFlusher->GetFlusherIndex(),
                                                                              0,
                                                                              mGroupFlushStrategy->GetTimeoutSecs(),
                                                                              mFlusher);
@@ -193,8 +193,11 @@ class Batcher {
                                g.GetSourceBuffer(),
                                g.GetExactlyOnceCheckpoint(),
                                g.GetMetadata(EventGroupMetaKey::SOURCE_ID));
-                    TimeoutFlushManager::GetInstance()->UpdateRecord(
-                        mFlusher->GetContext().GetConfigName(), 0, key, mEventFlushStrategy.GetTimeoutSecs(), mFlusher);
+                    TimeoutFlushManager::GetInstance()->UpdateRecord(mFlusher->GetContext().GetConfigName(),
+                                                                     mFlusher->GetFlusherIndex(),
+                                                                     key,
+                                                                     mEventFlushStrategy.GetTimeoutSecs(),
+                                                                     mFlusher);
                     mBufferedGroupsTotal->Add(1);
                     mBufferedDataSizeByte->Add(item.DataSize());
                 } else if (i == 0) {
@@ -243,8 +246,11 @@ class Batcher {
             mGroupQueue->Flush(res);
         }
         if (mGroupQueue->IsEmpty()) {
-            TimeoutFlushManager::GetInstance()->UpdateRecord(
-                mFlusher->GetContext().GetConfigName(), 0, 0, mGroupFlushStrategy->GetTimeoutSecs(), mFlusher);
+            TimeoutFlushManager::GetInstance()->UpdateRecord(mFlusher->GetContext().GetConfigName(),
+                                                             mFlusher->GetFlusherIndex(),
+                                                             0,
+                                                             mGroupFlushStrategy->GetTimeoutSecs(),
+                                                             mFlusher);
         }
         iter->second.Flush(mGroupQueue.value());
         mEventQueueMap.erase(iter);
diff --git a/core/pipeline/plugin/instance/FlusherInstance.cpp b/core/pipeline/plugin/instance/FlusherInstance.cpp
index 38cb9dd3b8..181e5c21e8 100644
--- a/core/pipeline/plugin/instance/FlusherInstance.cpp
+++ b/core/pipeline/plugin/instance/FlusherInstance.cpp
@@ -20,9 +20,10 @@ using namespace std;
 
 namespace logtail {
 
-bool FlusherInstance::Init(const Json::Value& config, PipelineContext& context, Json::Value& optionalGoPipeline) {
+bool FlusherInstance::Init(const Json::Value& config, PipelineContext& context, size_t flusherIdx, Json::Value& optionalGoPipeline) {
     mPlugin->SetContext(context);
     mPlugin->SetPluginID(PluginID());
+    mPlugin->SetFlusherIndex(flusherIdx);
     mPlugin->SetMetricsRecordRef(Name(), PluginID());
     if (!mPlugin->Init(config, optionalGoPipeline)) {
         return false;
diff --git a/core/pipeline/plugin/instance/FlusherInstance.h b/core/pipeline/plugin/instance/FlusherInstance.h
index 69bbba3db2..68089f60b1 100644
--- a/core/pipeline/plugin/instance/FlusherInstance.h
+++ b/core/pipeline/plugin/instance/FlusherInstance.h
@@ -31,12 +31,13 @@ namespace logtail {
 
 class FlusherInstance : public PluginInstance {
 public:
-    FlusherInstance(Flusher* plugin, const PluginInstance::PluginMeta& pluginMeta) : PluginInstance(pluginMeta), mPlugin(plugin) {}
+    FlusherInstance(Flusher* plugin, const PluginInstance::PluginMeta& pluginMeta)
+        : PluginInstance(pluginMeta), mPlugin(plugin) {}
 
     const std::string& Name() const override { return mPlugin->Name(); };
     const Flusher* GetPlugin() const { return mPlugin.get(); }
 
-    bool Init(const Json::Value& config, PipelineContext& context, Json::Value& optionalGoPipeline);
+    bool Init(const Json::Value& config, PipelineContext& context, size_t flusherIdx, Json::Value& optionalGoPipeline);
     bool Start() { return mPlugin->Start(); }
     bool Stop(bool isPipelineRemoving) { return mPlugin->Stop(isPipelineRemoving); }
     bool Send(PipelineEventGroup&& g);
diff --git a/core/pipeline/plugin/interface/Flusher.h b/core/pipeline/plugin/interface/Flusher.h
index 6bf3301477..232020df34 100644
--- a/core/pipeline/plugin/interface/Flusher.h
+++ b/core/pipeline/plugin/interface/Flusher.h
@@ -44,6 +44,8 @@ class Flusher : public Plugin {
 
     QueueKey GetQueueKey() const { return mQueueKey; }
     void SetPluginID(const std::string& pluginID) { mPluginID = pluginID; }
+    size_t GetFlusherIndex() { return mIndex; }
+    void SetFlusherIndex(size_t idx) { mIndex = idx; }
     const std::string& GetPluginID() const { return mPluginID; }
 
 protected:
@@ -54,6 +56,7 @@ class Flusher : public Plugin {
 
     QueueKey mQueueKey;
     std::string mPluginID;
+    size_t mIndex = 0;
 
 #ifdef APSARA_UNIT_TEST_MAIN
     friend class FlusherInstanceUnittest;
diff --git a/core/pipeline/serializer/SLSSerializer.cpp b/core/pipeline/serializer/SLSSerializer.cpp
index 9ffb6b1541..6b9ec3888d 100644
--- a/core/pipeline/serializer/SLSSerializer.cpp
+++ b/core/pipeline/serializer/SLSSerializer.cpp
@@ -15,9 +15,12 @@
 #include "pipeline/serializer/SLSSerializer.h"
 
 #include "common/Flags.h"
+#include "constants/SpanConstants.h"
 #include "common/compression/CompressType.h"
 #include "plugin/flusher/sls/FlusherSLS.h"
 #include "protobuf/sls/LogGroupSerializer.h"
+#include <json/json.h>
+#include <array>
 
 DECLARE_FLAG_INT32(max_send_log_group_size);
 
@@ -25,6 +28,29 @@ using namespace std;
 
 namespace logtail {
 
+std::string SerializeSpanLinksToString(const SpanEvent& event) {
+    if (event.GetLinks().empty()) {
+        return "";
+    }
+    Json::Value jsonLinks(Json::arrayValue);
+    for (const auto& link : event.GetLinks()) {
+        jsonLinks.append(link.ToJson());
+    }
+    Json::StreamWriterBuilder writer;
+    return Json::writeString(writer, jsonLinks);
+}
+std::string SerializeSpanEventsToString(const SpanEvent& event) {
+    if (event.GetEvents().empty()) {
+        return "";
+    }
+    Json::Value jsonEvents(Json::arrayValue);
+    for (const auto& event : event.GetEvents()) {
+        jsonEvents.append(event.ToJson());
+    }
+    Json::StreamWriterBuilder writer;
+    return Json::writeString(writer, jsonEvents);
+}
+
 template <>
 bool Serializer<vector<CompressedLogGroup>>::DoSerialize(vector<CompressedLogGroup>&& p,
                                                          std::string& output,
@@ -68,9 +94,10 @@ bool SLSEventGroupSerializer::Serialize(BatchedEvents&& group, string& res, stri
     // caculate serialized logGroup size first, where some critical results can be cached
     vector<size_t> logSZ(group.mEvents.size());
     vector<pair<string, size_t>> metricEventContentCache(group.mEvents.size());
+    vector<array<string, 6>> spanEventContentCache(group.mEvents.size());
     size_t logGroupSZ = 0;
     switch (eventType) {
-        case PipelineEvent::Type::LOG:
+        case PipelineEvent::Type::LOG:{
             for (size_t i = 0; i < group.mEvents.size(); ++i) {
                 const auto& e = group.mEvents[i].Cast<LogEvent>();
                 if (e.Empty()) {
@@ -83,7 +110,8 @@ bool SLSEventGroupSerializer::Serialize(BatchedEvents&& group, string& res, stri
                 logGroupSZ += GetLogSize(contentSZ, enableNs && e.GetTimestampNanosecond(), logSZ[i]);
             }
             break;
-        case PipelineEvent::Type::METRIC:
+        }
+        case PipelineEvent::Type::METRIC:{
             for (size_t i = 0; i < group.mEvents.size(); ++i) {
                 const auto& e = group.mEvents[i].Cast<MetricEvent>();
                 if (e.Is<UntypedSingleValue>()) {
@@ -107,7 +135,51 @@ bool SLSEventGroupSerializer::Serialize(BatchedEvents&& group, string& res, stri
                 logGroupSZ += GetLogSize(contentSZ, false, logSZ[i]);
             }
             break;
+        }
         case PipelineEvent::Type::SPAN:
+            for (size_t i = 0; i < group.mEvents.size(); ++i) {
+                const auto& e = group.mEvents[i].Cast<SpanEvent>();
+                size_t contentSZ = 0;
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_TRACE_ID.size(), e.GetTraceId().size());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_SPAN_ID.size(), e.GetSpanId().size());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_PARENT_ID.size(), e.GetParentSpanId().size());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_SPAN_NAME.size(), e.GetName().size());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_SPAN_KIND.size(), GetKindString(e.GetKind()).size());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_STATUS_CODE.size(), GetStatusString(e.GetStatus()).size());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_TRACE_STATE.size(), e.GetTraceState().size());
+                // 
+                // set tags and scope tags
+                Json::Value jsonVal;
+                for (auto it = e.TagsBegin(); it != e.TagsEnd(); ++it) {
+                    jsonVal[it->first.to_string()] = it->second.to_string();
+                }
+                for (auto it = e.ScopeTagsBegin(); it != e.ScopeTagsEnd(); ++it) {
+                    jsonVal[it->first.to_string()] = it->second.to_string();
+                }
+                Json::StreamWriterBuilder writer;
+                std::string attrString = Json::writeString(writer, jsonVal);
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_ATTRIBUTES.size(), attrString.size());
+                spanEventContentCache[i][0] = std::move(attrString);
+
+                auto linkString = SerializeSpanLinksToString(e);
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_LINKS.size(), linkString.size());
+                spanEventContentCache[i][1] = std::move(linkString);
+                auto eventString = SerializeSpanEventsToString(e);
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_EVENTS.size(), eventString.size());
+                spanEventContentCache[i][2] = std::move(eventString);
+
+                // time related
+                auto startTsNs = std::to_string(e.GetStartTimeNs());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_START_TIME_NANO.size(), startTsNs.size());
+                spanEventContentCache[i][3] = std::move(startTsNs);
+                auto endTsNs = std::to_string(e.GetEndTimeNs());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_END_TIME_NANO.size(), endTsNs.size());
+                spanEventContentCache[i][4] = std::move(endTsNs);
+                auto durationNs = std::to_string(e.GetEndTimeNs() - e.GetStartTimeNs());
+                contentSZ += GetLogContentSize(DEFAULT_TRACE_TAG_DURATION.size(), durationNs.size());
+                spanEventContentCache[i][5] = std::move(durationNs);
+                logGroupSZ += GetLogSize(contentSZ, false, logSZ[i]);
+            }
             break;
         case PipelineEvent::Type::RAW:
             for (size_t i = 0; i < group.mEvents.size(); ++i) {
@@ -174,6 +246,37 @@ bool SLSEventGroupSerializer::Serialize(BatchedEvents&& group, string& res, stri
             }
             break;
         case PipelineEvent::Type::SPAN:
+            for (size_t i = 0; i < group.mEvents.size(); ++i) {
+                const auto& spanEvent = group.mEvents[i].Cast<SpanEvent>();
+
+                serializer.StartToAddLog(logSZ[i]);
+                serializer.AddLogTime(spanEvent.GetTimestamp());
+                // set trace_id span_id span_kind status etc
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_TRACE_ID, spanEvent.GetTraceId());
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_SPAN_ID, spanEvent.GetSpanId());
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_PARENT_ID, spanEvent.GetParentSpanId());
+                // span_name
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_SPAN_NAME, spanEvent.GetName());
+                // span_kind
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_SPAN_KIND, GetKindString(spanEvent.GetKind()));
+                // status_code
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_STATUS_CODE, GetStatusString(spanEvent.GetStatus()));
+                // trace state
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_TRACE_STATE, spanEvent.GetTraceState());
+
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_ATTRIBUTES, spanEventContentCache[i][0]);
+                
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_LINKS, spanEventContentCache[i][1]);
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_EVENTS, spanEventContentCache[i][2]);
+
+                // start_time
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_START_TIME_NANO, spanEventContentCache[i][3]);
+                // end_time
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_END_TIME_NANO, spanEventContentCache[i][4]);
+                // duration
+                serializer.AddLogContent(DEFAULT_TRACE_TAG_DURATION, spanEventContentCache[i][5]);
+                
+            }
             break;
         case PipelineEvent::Type::RAW:
             for (size_t i = 0; i < group.mEvents.size(); ++i) {
diff --git a/core/plugin/flusher/sls/DiskBufferWriter.cpp b/core/plugin/flusher/sls/DiskBufferWriter.cpp
index 2285658a1d..dd60ba7211 100644
--- a/core/plugin/flusher/sls/DiskBufferWriter.cpp
+++ b/core/plugin/flusher/sls/DiskBufferWriter.cpp
@@ -67,7 +67,7 @@ void DiskBufferWriter::Stop() {
         mIsSendBufferThreadRunning = false;
     }
     mStopCV.notify_one();
-    {
+    if (mBufferWriterThreadRes.valid()) {
         future_status s = mBufferWriterThreadRes.wait_for(chrono::seconds(5));
         if (s == future_status::ready) {
             LOG_INFO(sLogger, ("disk buffer writer", "stopped successfully"));
@@ -75,7 +75,7 @@ void DiskBufferWriter::Stop() {
             LOG_WARNING(sLogger, ("disk buffer writer", "forced to stopped"));
         }
     }
-    {
+    if (mBufferSenderThreadRes.valid()) {
         // timeout should be larger than network timeout, which is 15 for now
         future_status s = mBufferSenderThreadRes.wait_for(chrono::seconds(20));
         if (s == future_status::ready) {
diff --git a/core/plugin/flusher/sls/SLSClientManager.cpp b/core/plugin/flusher/sls/SLSClientManager.cpp
index 33a0381bef..697dedb45b 100644
--- a/core/plugin/flusher/sls/SLSClientManager.cpp
+++ b/core/plugin/flusher/sls/SLSClientManager.cpp
@@ -21,7 +21,7 @@
 #include "common/StringTools.h"
 #include "common/TimeUtil.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
+#include "monitor/Monitor.h"
 #include "plugin/flusher/sls/FlusherSLS.h"
 #include "plugin/flusher/sls/SendResult.h"
 #include "sdk/Exception.h"
@@ -122,7 +122,7 @@ void SLSClientManager::Init() {
                                                   STRING_FLAG(default_access_key_id),
                                                   STRING_FLAG(default_access_key),
                                                   INT32_FLAG(sls_client_send_timeout),
-                                                  LogFileProfiler::mIpAddr,
+                                                  LoongCollectorMonitor::mIpAddr,
                                                   AppConfig::GetInstance()->GetBindInterface()));
         SLSControl::GetInstance()->SetSlsSendClientCommonParam(mProbeNetworkClient.get());
         mProbeNetworkThreadRes = async(launch::async, &SLSClientManager::ProbeNetworkThread, this);
@@ -132,7 +132,7 @@ void SLSClientManager::Init() {
                                                   STRING_FLAG(default_access_key_id),
                                                   STRING_FLAG(default_access_key),
                                                   INT32_FLAG(sls_client_send_timeout),
-                                                  LogFileProfiler::mIpAddr,
+                                                  LoongCollectorMonitor::mIpAddr,
                                                   AppConfig::GetInstance()->GetBindInterface()));
         SLSControl::GetInstance()->SetSlsSendClientCommonParam(mUpdateRealIpClient.get());
         mUpdateRealIpThreadRes = async(launch::async, &SLSClientManager::UpdateRealIpThread, this);
@@ -149,7 +149,7 @@ void SLSClientManager::Stop() {
         mIsUpdateRealIpThreadRunning = false;
     }
     mStopCV.notify_all();
-    if (mDataServerSwitchPolicy == EndpointSwitchPolicy::DESIGNATED_FIRST) {
+    if (mDataServerSwitchPolicy == EndpointSwitchPolicy::DESIGNATED_FIRST && mProbeNetworkThreadRes.valid()) {
         future_status s = mProbeNetworkThreadRes.wait_for(chrono::seconds(1));
         if (s == future_status::ready) {
             LOG_INFO(sLogger, ("sls endpoint probe", "stopped successfully"));
@@ -157,7 +157,7 @@ void SLSClientManager::Stop() {
             LOG_WARNING(sLogger, ("sls endpoint probe", "forced to stopped"));
         }
     }
-    if (BOOL_FLAG(send_prefer_real_ip)) {
+    if (BOOL_FLAG(send_prefer_real_ip) && mUpdateRealIpThreadRes.valid()) {
         future_status s = mUpdateRealIpThreadRes.wait_for(chrono::seconds(1));
         if (s == future_status::ready) {
             LOG_INFO(sLogger, ("sls real ip update", "stopped successfully"));
@@ -232,7 +232,7 @@ sdk::Client* SLSClientManager::GetClient(const string& region, const string& ali
                                                               "",
                                                               "",
                                                               INT32_FLAG(sls_client_send_timeout),
-                                                              LogFileProfiler::mIpAddr,
+                                                              LoongCollectorMonitor::mIpAddr,
                                                               AppConfig::GetInstance()->GetBindInterface());
     SLSControl::GetInstance()->SetSlsSendClientCommonParam(client.get());
     ResetClientPort(region, client.get());
diff --git a/core/plugin/flusher/sls/SLSResponse.cpp b/core/plugin/flusher/sls/SLSResponse.cpp
index b55896aa1a..d9fa405479 100644
--- a/core/plugin/flusher/sls/SLSResponse.cpp
+++ b/core/plugin/flusher/sls/SLSResponse.cpp
@@ -34,7 +34,7 @@ bool SLSResponse::Parse(const HttpResponse& response) {
     mStatusCode = response.GetStatusCode();
 
     if (mStatusCode == 0) {
-        mErrorCode = sdk::LOG_REQUEST_TIMEOUT;
+        mErrorCode = sdk::LOGE_REQUEST_TIMEOUT;
         mErrorMsg = "Request timeout";
     } else if (mStatusCode != 200) {
         try {
diff --git a/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp b/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp
index 06de253a4c..95b81a569a 100644
--- a/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp
+++ b/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp
@@ -3,10 +3,10 @@
 #include <json/json.h>
 
 #include "common/StringTools.h"
-#include "models/LogEvent.h"
 #include "models/MetricEvent.h"
 #include "models/PipelineEventGroup.h"
 #include "models/PipelineEventPtr.h"
+#include "models/RawEvent.h"
 #include "prometheus/Constants.h"
 
 using namespace std;
@@ -43,7 +43,7 @@ void ProcessorPromParseMetricNative::Process(PipelineEventGroup& eGroup) {
 }
 
 bool ProcessorPromParseMetricNative::IsSupportedEvent(const PipelineEventPtr& e) const {
-    return e.Is<LogEvent>();
+    return e.Is<RawEvent>();
 }
 
 bool ProcessorPromParseMetricNative::ProcessEvent(PipelineEventPtr& e,
@@ -53,9 +53,9 @@ bool ProcessorPromParseMetricNative::ProcessEvent(PipelineEventPtr& e,
     if (!IsSupportedEvent(e)) {
         return false;
     }
-    auto& sourceEvent = e.Cast<LogEvent>();
+    auto& sourceEvent = e.Cast<RawEvent>();
     std::unique_ptr<MetricEvent> metricEvent = eGroup.CreateMetricEvent(true);
-    if (parser.ParseLine(sourceEvent.GetContent(prometheus::PROMETHEUS), *metricEvent)) {
+    if (parser.ParseLine(sourceEvent.GetContent(), *metricEvent)) {
         metricEvent->SetTag(string(prometheus::NAME), metricEvent->GetName());
         newEvents.emplace_back(std::move(metricEvent), true, nullptr);
     }
diff --git a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp
index c5deeb149c..f6cbb0b79b 100644
--- a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp
+++ b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp
@@ -206,7 +206,6 @@ void ProcessorPromRelabelMetricNative::AddMetric(PipelineEventGroup& metricGroup
     metricEvent->SetValue<UntypedSingleValue>(value);
     metricEvent->SetTimestamp(timestamp, nanoSec);
     metricEvent->SetTag(prometheus::NAME, name);
-    metricEvent->SetTag(prometheus::LC_SCRAPER, mLoongCollectorScraper);
     for (const auto& [k, v] : targetTags) {
         metricEvent->SetTag(k, v);
     }
diff --git a/core/plugin/processor/inner/ProcessorTagNative.cpp b/core/plugin/processor/inner/ProcessorTagNative.cpp
index e9f9926339..dbabe3fc34 100644
--- a/core/plugin/processor/inner/ProcessorTagNative.cpp
+++ b/core/plugin/processor/inner/ProcessorTagNative.cpp
@@ -21,8 +21,9 @@
 #include "app_config/AppConfig.h"
 #include "application/Application.h"
 #include "common/Flags.h"
-#include "protobuf/sls/sls_logs.pb.h"
+#include "monitor/Monitor.h"
 #include "pipeline/Pipeline.h"
+#include "protobuf/sls/sls_logs.pb.h"
 #ifdef __ENTERPRISE__
 #include "config/provider/EnterpriseConfigProvider.h"
 #endif
@@ -69,8 +70,8 @@ void ProcessorTagNative::Process(PipelineEventGroup& logGroup) {
     }
 
     // process level
-    logGroup.SetTagNoCopy(LOG_RESERVED_KEY_HOSTNAME, LogFileProfiler::mHostname);
-    logGroup.SetTagNoCopy(LOG_RESERVED_KEY_SOURCE, LogFileProfiler::mIpAddr);
+    logGroup.SetTagNoCopy(LOG_RESERVED_KEY_HOSTNAME, LoongCollectorMonitor::mHostname);
+    logGroup.SetTagNoCopy(LOG_RESERVED_KEY_SOURCE, LoongCollectorMonitor::mIpAddr);
     auto sb = logGroup.GetSourceBuffer()->CopyString(Application::GetInstance()->GetUUID());
     logGroup.SetTagNoCopy(LOG_RESERVED_KEY_MACHINE_UUID, StringView(sb.data, sb.size));
     static const vector<sls_logs::LogTag>& sEnvTags = AppConfig::GetInstance()->GetEnvTags();
diff --git a/core/prometheus/Constants.h b/core/prometheus/Constants.h
index f4a823b4f4..55c6712e12 100644
--- a/core/prometheus/Constants.h
+++ b/core/prometheus/Constants.h
@@ -98,7 +98,6 @@ const char* const SCRAPE_SAMPLES_POST_METRIC_RELABELING = "scrape_samples_post_m
 const char* const SCRAPE_SAMPLES_SCRAPED = "scrape_samples_scraped";
 const char* const SCRAPE_TIMEOUT_SECONDS = "scrape_timeout_seconds";
 const char* const UP = "up";
-const char* const LC_SCRAPER = "lc_scraper";
 
 const char* const SCRAPE_TIMESTAMP_MILLISEC = "scrape_timestamp_millisec";
 
diff --git a/core/prometheus/PrometheusInputRunner.cpp b/core/prometheus/PrometheusInputRunner.cpp
index 59cc58d4d5..ca49e35091 100644
--- a/core/prometheus/PrometheusInputRunner.cpp
+++ b/core/prometheus/PrometheusInputRunner.cpp
@@ -47,6 +47,7 @@ PrometheusInputRunner::PrometheusInputRunner()
     : mServiceHost(STRING_FLAG(loong_collector_operator_service)),
       mServicePort(INT32_FLAG(loong_collector_operator_service_port)),
       mPodName(STRING_FLAG(_pod_name_)),
+      mEventPool(true),
       mUnRegisterMs(0) {
     mClient = std::make_unique<sdk::CurlClient>();
     mTimer = std::make_shared<Timer>();
@@ -83,7 +84,7 @@ void PrometheusInputRunner::UpdateScrapeInput(std::shared_ptr<TargetSubscriberSc
     targetSubscriber->InitSelfMonitor(defaultLabels);
 
     targetSubscriber->mUnRegisterMs = mUnRegisterMs.load();
-    targetSubscriber->SetTimer(mTimer);
+    targetSubscriber->SetComponent(mTimer, &mEventPool);
     auto randSleepMilliSec = GetRandSleepMilliSec(
         targetSubscriber->GetId(), prometheus::RefeshIntervalSeconds, GetCurrentTimeInMilliSeconds());
     auto firstExecTime = std::chrono::steady_clock::now() + std::chrono::milliseconds(randSleepMilliSec);
@@ -294,4 +295,8 @@ string PrometheusInputRunner::GetAllProjects() {
     }
     return result;
 }
+
+void PrometheusInputRunner::CheckGC() {
+    mEventPool.CheckGC();
+}
 }; // namespace logtail
\ No newline at end of file
diff --git a/core/prometheus/PrometheusInputRunner.h b/core/prometheus/PrometheusInputRunner.h
index 89ae961ce1..2823d562a4 100644
--- a/core/prometheus/PrometheusInputRunner.h
+++ b/core/prometheus/PrometheusInputRunner.h
@@ -22,7 +22,6 @@
 
 #include "common/Lock.h"
 #include "common/timer/Timer.h"
-#include "monitor/MetricManager.h"
 #include "monitor/MetricTypes.h"
 #include "prometheus/schedulers/TargetSubscriberScheduler.h"
 #include "runner/InputRunner.h"
@@ -42,6 +41,7 @@ class PrometheusInputRunner : public InputRunner {
         static PrometheusInputRunner sInstance;
         return &sInstance;
     }
+    void CheckGC();
 
     // input plugin update
     void UpdateScrapeInput(std::shared_ptr<TargetSubscriberScheduler> targetSubscriber,
@@ -70,13 +70,13 @@ class PrometheusInputRunner : public InputRunner {
     std::atomic<bool> mIsThreadRunning = true;
     std::future<void> mThreadRes;
 
-    std::unique_ptr<sdk::CurlClient> mClient;
-
     std::string mServiceHost;
     int32_t mServicePort;
     std::string mPodName;
 
+    std::unique_ptr<sdk::CurlClient> mClient;
     std::shared_ptr<Timer> mTimer;
+    EventPool mEventPool;
 
     mutable ReadWriteLock mSubscriberMapRWLock;
     std::map<std::string, std::shared_ptr<TargetSubscriberScheduler>> mTargetSubscriberSchedulerMap;
diff --git a/core/prometheus/schedulers/BaseScheduler.cpp b/core/prometheus/schedulers/BaseScheduler.cpp
index db7de4ae79..af564f1622 100644
--- a/core/prometheus/schedulers/BaseScheduler.cpp
+++ b/core/prometheus/schedulers/BaseScheduler.cpp
@@ -1,5 +1,10 @@
 #include "prometheus/schedulers/BaseScheduler.h"
 
+#include "common/timer/Timer.h"
+#include "models/EventPool.h"
+
+using namespace std;
+
 namespace logtail {
 void BaseScheduler::ExecDone() {
     mExecCount++;
@@ -28,4 +33,9 @@ bool BaseScheduler::IsCancelled() {
     ReadLock lock(mLock);
     return !mValidState;
 }
+
+void BaseScheduler::SetComponent(shared_ptr<Timer> timer, EventPool* eventPool) {
+    mTimer = std::move(timer);
+    mEventPool = eventPool;
+}
 } // namespace logtail
\ No newline at end of file
diff --git a/core/prometheus/schedulers/BaseScheduler.h b/core/prometheus/schedulers/BaseScheduler.h
index e66e612d78..26739cdcd0 100644
--- a/core/prometheus/schedulers/BaseScheduler.h
+++ b/core/prometheus/schedulers/BaseScheduler.h
@@ -4,6 +4,8 @@
 #include <memory>
 
 #include "common/http/HttpResponse.h"
+#include "common/timer/Timer.h"
+#include "models/EventPool.h"
 #include "prometheus/async/PromFuture.h"
 
 namespace logtail {
@@ -20,9 +22,10 @@ class BaseScheduler {
 
     void SetFirstExecTime(std::chrono::steady_clock::time_point firstExecTime);
     void DelayExecTime(uint64_t delaySeconds);
-
     virtual void Cancel();
 
+    void SetComponent(std::shared_ptr<Timer> timer, EventPool* eventPool);
+
 protected:
     bool IsCancelled();
 
@@ -35,5 +38,8 @@ class BaseScheduler {
     bool mValidState = true;
     std::shared_ptr<PromFuture<HttpResponse&, uint64_t>> mFuture;
     std::shared_ptr<PromFuture<>> mIsContextValidFuture;
+
+    std::shared_ptr<Timer> mTimer;
+    EventPool* mEventPool = nullptr;
 };
 } // namespace logtail
\ No newline at end of file
diff --git a/core/prometheus/schedulers/ScrapeScheduler.cpp b/core/prometheus/schedulers/ScrapeScheduler.cpp
index a830558227..02fca2caec 100644
--- a/core/prometheus/schedulers/ScrapeScheduler.cpp
+++ b/core/prometheus/schedulers/ScrapeScheduler.cpp
@@ -24,13 +24,11 @@
 #include "common/StringTools.h"
 #include "common/TimeUtil.h"
 #include "common/timer/HttpRequestTimerEvent.h"
-#include "common/timer/Timer.h"
 #include "logger/Logger.h"
 #include "pipeline/queue/ProcessQueueItem.h"
 #include "pipeline/queue/ProcessQueueManager.h"
 #include "pipeline/queue/QueueKey.h"
 #include "prometheus/Constants.h"
-#include "prometheus/Utils.h"
 #include "prometheus/async/PromFuture.h"
 #include "prometheus/async/PromHttpRequest.h"
 #include "sdk/Common.h"
@@ -86,8 +84,6 @@ ScrapeScheduler::ScrapeScheduler(std::shared_ptr<ScrapeConfig> scrapeConfigPtr,
     mHash = mScrapeConfigPtr->mJobName + tmpTargetURL + ToString(mTargetLabels.Hash());
     mInstance = mHost + ":" + ToString(mPort);
     mInterval = mScrapeConfigPtr->mScrapeIntervalSeconds;
-
-    mParser = make_unique<TextParser>();
 }
 
 void ScrapeScheduler::OnMetricResult(HttpResponse& response, uint64_t timestampMilliSec) {
@@ -214,7 +210,7 @@ std::unique_ptr<TimerEvent> ScrapeScheduler::BuildScrapeTimerEvent(std::chrono::
                                             mScrapeConfigPtr->mRequestHeaders,
                                             "",
                                             HttpResponse(
-                                                new PromMetricResponseBody(),
+                                                new PromMetricResponseBody(mEventPool),
                                                 [](void* ptr) { delete static_cast<PromMetricResponseBody*>(ptr); },
                                                 PromMetricWriteCallback),
                                             mScrapeConfigPtr->mScrapeTimeoutSeconds,
@@ -238,10 +234,6 @@ void ScrapeScheduler::Cancel() {
     }
 }
 
-void ScrapeScheduler::SetTimer(std::shared_ptr<Timer> timer) {
-    mTimer = std::move(timer);
-}
-
 void ScrapeScheduler::InitSelfMonitor(const MetricLabels& defaultLabels) {
     mSelfMonitor = std::make_shared<PromSelfMonitorUnsafe>();
     MetricLabels labels = defaultLabels;
diff --git a/core/prometheus/schedulers/ScrapeScheduler.h b/core/prometheus/schedulers/ScrapeScheduler.h
index 6a606627f2..00ac2d989a 100644
--- a/core/prometheus/schedulers/ScrapeScheduler.h
+++ b/core/prometheus/schedulers/ScrapeScheduler.h
@@ -21,14 +21,11 @@
 
 #include "BaseScheduler.h"
 #include "common/http/HttpResponse.h"
-#include "common/timer/Timer.h"
 #include "models/PipelineEventGroup.h"
 #include "monitor/MetricTypes.h"
 #include "pipeline/queue/QueueKey.h"
-#include "prometheus/Constants.h"
 #include "prometheus/PromSelfMonitor.h"
 #include "prometheus/Utils.h"
-#include "prometheus/labels/TextParser.h"
 #include "prometheus/schedulers/ScrapeConfig.h"
 
 #ifdef APSARA_UNIT_TEST_MAIN
@@ -43,13 +40,15 @@ struct PromMetricResponseBody {
     PipelineEventGroup mEventGroup;
     std::string mCache;
     size_t mRawSize = 0;
+    EventPool* mEventPool = nullptr;
 
-    PromMetricResponseBody() : mEventGroup(std::make_shared<SourceBuffer>()) {};
+    explicit PromMetricResponseBody(EventPool* eventPool)
+        : mEventGroup(std::make_shared<SourceBuffer>()), mEventPool(eventPool) {};
     void AddEvent(char* line, size_t len) {
         if (IsValidMetric(StringView(line, len))) {
-            auto* e = mEventGroup.AddLogEvent();
+            auto* e = mEventGroup.AddRawEvent(true, mEventPool);
             auto sb = mEventGroup.GetSourceBuffer()->CopyString(line, len);
-            e->SetContentNoCopy(prometheus::PROMETHEUS, StringView(sb.data, sb.size));
+            e->SetContentNoCopy(sb);
         }
     }
     void FlushCache() {
@@ -70,7 +69,6 @@ class ScrapeScheduler : public BaseScheduler {
     ~ScrapeScheduler() override = default;
 
     void OnMetricResult(HttpResponse&, uint64_t timestampMilliSec);
-    void SetTimer(std::shared_ptr<Timer> timer);
 
     std::string GetId() const;
 
@@ -94,11 +92,8 @@ class ScrapeScheduler : public BaseScheduler {
     std::string mInstance;
     Labels mTargetLabels;
 
-    std::unique_ptr<TextParser> mParser;
-
     QueueKey mQueueKey;
     size_t mInputIndex;
-    std::shared_ptr<Timer> mTimer;
 
     // auto metrics
     uint64_t mScrapeTimestampMilliSec = 0;
diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp
index 178c952dba..42381f1229 100644
--- a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp
+++ b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp
@@ -223,7 +223,7 @@ TargetSubscriberScheduler::BuildScrapeSchedulerSet(std::vector<Labels>& targetGr
         auto scrapeScheduler
             = std::make_shared<ScrapeScheduler>(mScrapeConfigPtr, host, port, resultLabel, mQueueKey, mInputIndex);
 
-        scrapeScheduler->SetTimer(mTimer);
+        scrapeScheduler->SetComponent(mTimer, mEventPool);
 
         auto randSleepMilliSec = GetRandSleepMilliSec(
             scrapeScheduler->GetId(), mScrapeConfigPtr->mScrapeIntervalSeconds, GetCurrentTimeInMilliSeconds());
@@ -236,9 +236,6 @@ TargetSubscriberScheduler::BuildScrapeSchedulerSet(std::vector<Labels>& targetGr
     return scrapeSchedulerMap;
 }
 
-void TargetSubscriberScheduler::SetTimer(shared_ptr<Timer> timer) {
-    mTimer = std::move(timer);
-}
 
 string TargetSubscriberScheduler::GetId() const {
     return mJobName;
diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.h b/core/prometheus/schedulers/TargetSubscriberScheduler.h
index f6dc0af7aa..af5e1493fd 100644
--- a/core/prometheus/schedulers/TargetSubscriberScheduler.h
+++ b/core/prometheus/schedulers/TargetSubscriberScheduler.h
@@ -41,7 +41,6 @@ class TargetSubscriberScheduler : public BaseScheduler {
     bool operator<(const TargetSubscriberScheduler& other) const;
 
     void OnSubscription(HttpResponse&, uint64_t);
-    void SetTimer(std::shared_ptr<Timer> timer);
     void SubscribeOnce(std::chrono::steady_clock::time_point execTime);
 
     std::string GetId() const;
@@ -79,7 +78,6 @@ class TargetSubscriberScheduler : public BaseScheduler {
     std::unordered_map<std::string, std::shared_ptr<ScrapeScheduler>> mScrapeSchedulerMap;
 
     std::string mJobName;
-    std::shared_ptr<Timer> mTimer;
 
     std::string mETag;
 
diff --git a/core/runner/FlusherRunner.cpp b/core/runner/FlusherRunner.cpp
index 1f9f808371..303e183885 100644
--- a/core/runner/FlusherRunner.cpp
+++ b/core/runner/FlusherRunner.cpp
@@ -94,6 +94,9 @@ void FlusherRunner::UpdateSendFlowControl() {
 void FlusherRunner::Stop() {
     mIsFlush = true;
     SenderQueueManager::GetInstance()->Trigger();
+    if (!mThreadRes.valid()) {
+        return;
+    }
     future_status s = mThreadRes.wait_for(chrono::seconds(INT32_FLAG(flusher_runner_exit_timeout_secs)));
     if (s == future_status::ready) {
         LOG_INFO(sLogger, ("flusher runner", "stopped successfully"));
diff --git a/core/runner/ProcessorRunner.cpp b/core/runner/ProcessorRunner.cpp
index 37e41da27b..e7a42ae204 100644
--- a/core/runner/ProcessorRunner.cpp
+++ b/core/runner/ProcessorRunner.cpp
@@ -19,7 +19,6 @@
 #include "common/Flags.h"
 #include "go_pipeline/LogtailPlugin.h"
 #include "models/EventPool.h"
-#include "monitor/LogFileProfiler.h"
 #include "monitor/AlarmManager.h"
 #include "monitor/metric_constants/MetricConstants.h"
 #include "pipeline/PipelineManager.h"
@@ -56,6 +55,9 @@ void ProcessorRunner::Stop() {
     mIsFlush = true;
     ProcessQueueManager::GetInstance()->Trigger();
     for (uint32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) {
+        if (!mThreadRes[threadNo].valid()) {
+            continue;
+        }
         future_status s
             = mThreadRes[threadNo].wait_for(chrono::seconds(INT32_FLAG(processor_runner_exit_timeout_secs)));
         if (s == future_status::ready) {
diff --git a/core/runner/sink/http/HttpSink.cpp b/core/runner/sink/http/HttpSink.cpp
index 4429ce40ec..b7ef0beac7 100644
--- a/core/runner/sink/http/HttpSink.cpp
+++ b/core/runner/sink/http/HttpSink.cpp
@@ -62,6 +62,9 @@ bool HttpSink::Init() {
 
 void HttpSink::Stop() {
     mIsFlush = true;
+    if (!mThreadRes.valid()) {
+        return;
+    }
     future_status s = mThreadRes.wait_for(chrono::seconds(INT32_FLAG(http_sink_exit_timeout_secs)));
     if (s == future_status::ready) {
         LOG_INFO(sLogger, ("http sink", "stopped successfully"));
diff --git a/core/sls_control/SLSControl.cpp b/core/sls_control/SLSControl.cpp
index ff15b99fe8..edc43ee76c 100644
--- a/core/sls_control/SLSControl.cpp
+++ b/core/sls_control/SLSControl.cpp
@@ -20,13 +20,12 @@
 #include <sys/utsname.h>
 #endif
 
-#include "curl/curl.h"
-
 #include "app_config/AppConfig.h"
 #include "common/Flags.h"
 #include "common/version.h"
+#include "curl/curl.h"
 #include "logger/Logger.h"
-#include "monitor/LogFileProfiler.h"
+#include "monitor/Monitor.h"
 #ifdef __ENTERPRISE__
 #include "sls_control/EnterpriseSLSControl.h"
 #endif
@@ -78,8 +77,8 @@ void SLSControl::GenerateUserAgent() {
     if (-1 == uname(buf)) {
         LOG_WARNING(
             sLogger,
-            ("get os info part of user agent failed", errno)("use default os info", LogFileProfiler::mOsDetail));
-        os = LogFileProfiler::mOsDetail;
+            ("get os info part of user agent failed", errno)("use default os info", LoongCollectorMonitor::mOsDetail));
+        os = LoongCollectorMonitor::mOsDetail;
     } else {
         char* pch = strchr(buf->release, '-');
         if (pch) {
@@ -93,10 +92,10 @@ void SLSControl::GenerateUserAgent() {
     }
     delete buf;
 #elif defined(_MSC_VER)
-    os = LogFileProfiler::mOsDetail;
+    os = LoongCollectorMonitor::mOsDetail;
 #endif
 
-    mUserAgent = string("ilogtail/") + ILOGTAIL_VERSION + " (" + os + ") ip/" + LogFileProfiler::mIpAddr + " env/"
+    mUserAgent = string("ilogtail/") + ILOGTAIL_VERSION + " (" + os + ") ip/" + LoongCollectorMonitor::mIpAddr + " env/"
         + GetRunningEnvironment();
     if (!STRING_FLAG(custom_user_agent).empty()) {
         mUserAgent += " " + STRING_FLAG(custom_user_agent);
diff --git a/core/task_pipeline/Task.h b/core/task_pipeline/Task.h
new file mode 100644
index 0000000000..c3d8eedb5b
--- /dev/null
+++ b/core/task_pipeline/Task.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <json/json.h>
+
+#include <string>
+
+#include "logger/Logger.h"
+#include "monitor/AlarmManager.h"
+
+namespace logtail {
+
+class Task {
+public:
+    virtual ~Task() {}
+
+    virtual const std::string& Name() const = 0;
+    virtual bool Init(const Json::Value& config) = 0;
+    virtual void Start() = 0;
+    virtual void Stop(bool isRemoving) = 0;
+
+protected:
+    Logger::logger mLogger = sLogger;
+    AlarmManager* mAlarm = AlarmManager::GetInstance();
+};
+
+} // namespace logtail
diff --git a/core/task_pipeline/TaskPipeline.cpp b/core/task_pipeline/TaskPipeline.cpp
new file mode 100644
index 0000000000..515741e89d
--- /dev/null
+++ b/core/task_pipeline/TaskPipeline.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "task_pipeline/TaskPipeline.h"
+
+#include "task_pipeline/TaskRegistry.h"
+
+using namespace std;
+
+namespace logtail {
+
+bool TaskPipeline::Init(TaskConfig&& config) {
+    mConfigName = config.mName;
+    mCreateTime = config.mCreateTime;
+    mConfig = std::move(config.mDetail);
+
+    const auto& detail = (*mConfig)["task"];
+    mPlugin = TaskRegistry::GetInstance()->CreateTask(detail["Type"].asString());
+    return mPlugin->Init(detail);
+}
+
+} // namespace logtail
diff --git a/core/task_pipeline/TaskPipeline.h b/core/task_pipeline/TaskPipeline.h
new file mode 100644
index 0000000000..fc5b95a1f3
--- /dev/null
+++ b/core/task_pipeline/TaskPipeline.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <json/json.h>
+
+#include <memory>
+#include <string>
+
+#include "config/TaskConfig.h"
+#include "task_pipeline/Task.h"
+
+namespace logtail {
+
+class TaskPipeline {
+public:
+    const std::string& Name() const { return mConfigName; }
+    bool Init(TaskConfig&& config);
+    void Start() { mPlugin->Start(); }
+    void Stop(bool isRemoving) { mPlugin->Stop(isRemoving); }
+    const Json::Value& GetConfig() const { return *mConfig; }
+
+#ifdef APSARA_UNIT_TEST_MAIN
+    Task* GetPlugin() const { return mPlugin.get(); }
+#endif
+
+private:
+    std::unique_ptr<Task> mPlugin;
+
+    std::string mConfigName;
+    std::unique_ptr<Json::Value> mConfig;
+    uint32_t mCreateTime = 0;
+
+#ifdef APSARA_UNIT_TEST_MAIN
+    friend class TaskPipelineUnittest;
+#endif
+};
+
+} // namespace logtail
diff --git a/core/task_pipeline/TaskPipelineManager.cpp b/core/task_pipeline/TaskPipelineManager.cpp
new file mode 100644
index 0000000000..44d9b50195
--- /dev/null
+++ b/core/task_pipeline/TaskPipelineManager.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "task_pipeline/TaskPipelineManager.h"
+
+#include "config/feedbacker/ConfigFeedbackReceiver.h"
+
+using namespace std;
+
+namespace logtail {
+
+static unique_ptr<TaskPipeline> sEmptyTask;
+
+void TaskPipelineManager::UpdatePipelines(TaskConfigDiff& diff) {
+    for (const auto& name : diff.mRemoved) {
+        auto iter = mPipelineNameEntityMap.find(name);
+        iter->second->Stop(true);
+        mPipelineNameEntityMap.erase(iter);
+        ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(name,
+                                                                                     ConfigFeedbackStatus::DELETED);
+    }
+    for (auto& config : diff.mModified) {
+        auto p = BuildPipeline(std::move(config));
+        if (!p) {
+            LOG_WARNING(
+                sLogger,
+                ("failed to build task for existing config", "keep current task running")("config", config.mName));
+            AlarmManager::GetInstance()->SendAlarm(
+                CATEGORY_CONFIG_ALARM,
+                "failed to build task for existing config: keep current task running, config: " + config.mName);
+            ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                         ConfigFeedbackStatus::FAILED);
+            continue;
+        }
+        LOG_INFO(sLogger,
+                 ("task building for existing config succeeded",
+                  "stop the old task and start the new one")("config", config.mName));
+        auto iter = mPipelineNameEntityMap.find(config.mName);
+        iter->second->Stop(false);
+        mPipelineNameEntityMap[config.mName] = std::move(p);
+        mPipelineNameEntityMap[config.mName]->Start();
+        ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                     ConfigFeedbackStatus::APPLIED);
+    }
+    for (auto& config : diff.mAdded) {
+        auto p = BuildPipeline(std::move(config));
+        if (!p) {
+            LOG_WARNING(sLogger,
+                        ("failed to build task for new config", "skip current object")("config", config.mName));
+            AlarmManager::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM,
+                                                   "failed to build task for new config: skip current object, config: "
+                                                       + config.mName);
+            ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                         ConfigFeedbackStatus::FAILED);
+            continue;
+        }
+        LOG_INFO(sLogger, ("task building for new config succeeded", "begin to start task")("config", config.mName));
+        mPipelineNameEntityMap[config.mName] = std::move(p);
+        mPipelineNameEntityMap[config.mName]->Start();
+        ConfigFeedbackReceiver::GetInstance().FeedbackContinuousPipelineConfigStatus(config.mName,
+                                                                                     ConfigFeedbackStatus::APPLIED);
+    }
+}
+
+void TaskPipelineManager::StopAllPipelines() {
+    for (auto& item : mPipelineNameEntityMap) {
+        item.second->Stop(true);
+    }
+    mPipelineNameEntityMap.clear();
+}
+
+const unique_ptr<TaskPipeline>& TaskPipelineManager::FindPipelineByName(const string& configName) const {
+    auto it = mPipelineNameEntityMap.find(configName);
+    if (it != mPipelineNameEntityMap.end()) {
+        return it->second;
+    }
+    return sEmptyTask;
+}
+
+vector<string> TaskPipelineManager::GetAllPipelineNames() const {
+    vector<string> res;
+    for (const auto& item : mPipelineNameEntityMap) {
+        res.push_back(item.first);
+    }
+    return res;
+}
+
+unique_ptr<TaskPipeline> TaskPipelineManager::BuildPipeline(TaskConfig&& config) {
+    auto p = make_unique<TaskPipeline>();
+    if (!p->Init(std::move(config))) {
+        return nullptr;
+    }
+    return p;
+}
+
+} // namespace logtail
diff --git a/core/task_pipeline/TaskPipelineManager.h b/core/task_pipeline/TaskPipelineManager.h
new file mode 100644
index 0000000000..60776f6fd4
--- /dev/null
+++ b/core/task_pipeline/TaskPipelineManager.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "config/ConfigDiff.h"
+#include "config/TaskConfig.h"
+#include "task_pipeline/TaskPipeline.h"
+
+namespace logtail {
+
+class TaskPipelineManager {
+public:
+    TaskPipelineManager(const TaskPipelineManager&) = delete;
+    TaskPipelineManager& operator=(const TaskPipelineManager&) = delete;
+
+    static TaskPipelineManager* GetInstance() {
+        static TaskPipelineManager instance;
+        return &instance;
+    }
+
+    void UpdatePipelines(TaskConfigDiff& diff);
+    void StopAllPipelines();
+    const std::unique_ptr<TaskPipeline>& FindPipelineByName(const std::string& configName) const;
+    std::vector<std::string> GetAllPipelineNames() const;
+
+#ifdef APSARA_UNIT_TEST_MAIN
+    void ClearEnvironment() { mPipelineNameEntityMap.clear(); }
+#endif
+
+private:
+    TaskPipelineManager() = default;
+    ~TaskPipelineManager() = default;
+
+    std::unique_ptr<TaskPipeline> BuildPipeline(TaskConfig&& config);
+
+    std::unordered_map<std::string, std::unique_ptr<TaskPipeline>> mPipelineNameEntityMap;
+
+#ifdef APSARA_UNIT_TEST_MAIN
+    friend class TaskPipelineManagerUnittest;
+#endif
+};
+
+} // namespace logtail
diff --git a/core/task_pipeline/TaskRegistry.cpp b/core/task_pipeline/TaskRegistry.cpp
new file mode 100644
index 0000000000..b55132e4d4
--- /dev/null
+++ b/core/task_pipeline/TaskRegistry.cpp
@@ -0,0 +1,45 @@
+// Copyright 2024 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "task_pipeline/TaskRegistry.h"
+
+using namespace std;
+
+namespace logtail {
+
+void TaskRegistry::LoadPlugins() {
+    // RegisterCreator(TaskMock::sName, []() { return make_unique<TaskMock>(); });
+}
+
+void TaskRegistry::UnloadPlugins() {
+    mPluginDict.clear();
+}
+
+unique_ptr<Task> TaskRegistry::CreateTask(const std::string& name) {
+    auto it = mPluginDict.find(name);
+    if (it == mPluginDict.end()) {
+        return nullptr;
+    }
+    return it->second();
+}
+
+bool TaskRegistry::IsValidPlugin(const std::string& name) const {
+    return mPluginDict.find(name) != mPluginDict.end();
+}
+
+void TaskRegistry::RegisterCreator(const std::string& name, std::function<std::unique_ptr<Task>()>&& creator) {
+    mPluginDict[name] = std::move(creator);
+}
+
+} // namespace logtail
diff --git a/core/task_pipeline/TaskRegistry.h b/core/task_pipeline/TaskRegistry.h
new file mode 100644
index 0000000000..26c70a7a71
--- /dev/null
+++ b/core/task_pipeline/TaskRegistry.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2024 iLogtail Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <unordered_map>
+
+#include "task_pipeline/Task.h"
+
+namespace logtail {
+
+class TaskRegistry {
+public:
+    TaskRegistry(const TaskRegistry&) = delete;
+    TaskRegistry& operator=(const TaskRegistry&) = delete;
+
+    static TaskRegistry* GetInstance() {
+        static TaskRegistry instance;
+        return &instance;
+    }
+
+    void LoadPlugins();
+    void UnloadPlugins();
+    std::unique_ptr<Task> CreateTask(const std::string& name);
+    bool IsValidPlugin(const std::string& name) const;
+
+private:
+    TaskRegistry() = default;
+    ~TaskRegistry() = default;
+
+    void RegisterCreator(const std::string& name, std::function<std::unique_ptr<Task>()>&& creator);
+
+    std::unordered_map<std::string, std::function<std::unique_ptr<Task>()>> mPluginDict;
+
+#ifdef APSARA_UNIT_TEST_MAIN
+    friend class TaskConfigUnittest;
+    friend void LoadTaskMock();
+#endif
+};
+
+} // namespace logtail
diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt
index 6b41223ab5..41f1601069 100644
--- a/core/unittest/CMakeLists.txt
+++ b/core/unittest/CMakeLists.txt
@@ -15,6 +15,9 @@
 cmake_minimum_required(VERSION 3.22)
 project(unittest_base)
 
+# Unittest should be able to visit private members
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-access-control")
+
 add_definitions(-DAPSARA_UNIT_TEST_MAIN)
 set(NO_TCMALLOC TRUE)
 add_definitions(-DLOGTAIL_NO_TC_MALLOC)
@@ -54,6 +57,7 @@ macro(add_core_subdir)
     add_subdirectory(serializer)
     add_subdirectory(prometheus)
     add_subdirectory(route)
+    add_subdirectory(task_pipeline)
 endmacro()
 
 macro(add_spl_subdir)
diff --git a/core/unittest/common/FileSystemUtilUnittest.h b/core/unittest/common/FileSystemUtilUnittest.h
index b1427a4409..c507d8a647 100644
--- a/core/unittest/common/FileSystemUtilUnittest.h
+++ b/core/unittest/common/FileSystemUtilUnittest.h
@@ -14,13 +14,14 @@
  * limitations under the License.
  */
 
-#include "unittest/Unittest.h"
-#include <string>
-#include <fstream>
 #include <boost/format.hpp>
+#include <fstream>
+#include <string>
+
 #include "common/FileSystemUtil.h"
-#include "common/RuntimeUtil.h"
 #include "common/LogtailCommonFlags.h"
+#include "common/RuntimeUtil.h"
+#include "unittest/Unittest.h"
 
 namespace logtail {
 
@@ -129,7 +130,9 @@ TEST_F(FileSystemUtilUnittest, TestDirNormal) {
 
 #ifndef _MSC_VER
 TEST_F(FileSystemUtilUnittest, TestDirSymbolic) {
-    { std::ofstream((mTestRoot / "f1").string()); }
+    {
+        std::ofstream((mTestRoot / "f1").string());
+    }
     bfs::create_directory(mTestRoot / "d1");
     std::map<std::string, std::string> symbolics = {{"s1", "f1"}, {"s2", "d1"}};
     for (auto& s : symbolics) {
@@ -181,7 +184,9 @@ TEST_F(FileSystemUtilUnittest, TestPathStat_stat) {
 
     {
         auto filePath = ((mTestRoot / "file").string());
-        { std::ofstream(filePath).write("xxx", 3); }
+        {
+            std::ofstream(filePath).write("xxx", 3);
+        }
         fsutil::PathStat stat;
         EXPECT_TRUE(fsutil::PathStat::stat(filePath, stat));
         DevInode devInode = stat.GetDevInode();
@@ -221,7 +226,9 @@ TEST_F(FileSystemUtilUnittest, TestPathStat_stat) {
 TEST_F(FileSystemUtilUnittest, TestPathStat_fstat) {
     auto currentTime = time(NULL);
     auto filePath = ((mTestRoot / "file").string());
-    { std::ofstream(filePath).write("xxx", 3); }
+    {
+        std::ofstream(filePath).write("xxx", 3);
+    }
 
     FILE* file = fopen(filePath.c_str(), "r");
     EXPECT_TRUE(file != NULL);
@@ -262,7 +269,9 @@ TEST_F(FileSystemUtilUnittest, TestPathStat_fstat) {
 
 TEST_F(FileSystemUtilUnittest, TestPathStat_GetLastWriteTime) {
     auto filePath = ((mTestRoot / "file").string());
-    { std::ofstream(filePath).write("xxx", 3); }
+    {
+        std::ofstream(filePath).write("xxx", 3);
+    }
 
     {
         int64_t sec = -1, nsec = -1;
@@ -286,7 +295,9 @@ TEST_F(FileSystemUtilUnittest, TestPathStat_GetLastWriteTime) {
 TEST_F(FileSystemUtilUnittest, TestFileReadOnlyOpen) {
     auto filePath = ((mTestRoot / "file").string());
     const std::string fileContent{"xxx"};
-    { std::ofstream(filePath) << fileContent; }
+    {
+        std::ofstream(filePath) << fileContent;
+    }
 
     // Open the file and delete it before closing.
     // File can still be read after deleting.
@@ -319,7 +330,7 @@ TEST_F(FileSystemUtilUnittest, TestFileWriteOnlyOpen) {
         fflush(file);
         // Open with C++ fstream.
         std::ifstream in(filePath);
-        EXPECT_TRUE(in.good());
+        EXPECT_TRUE(in);
         {
             EXPECT_TRUE(in.read(buffer.data(), fileContentLen));
             EXPECT_EQ(std::string(buffer.data(), buffer.size()), fileContent);
@@ -353,7 +364,9 @@ TEST_F(FileSystemUtilUnittest, TestFileWriteOnlyOpen) {
 
     // Case #2: File is existing, open will truncate it.
     {
-        { std::ofstream(filePath) << fileContent; }
+        {
+            std::ofstream(filePath) << fileContent;
+        }
 
         auto file = FileWriteOnlyOpen(filePath.c_str(), "w");
         ASSERT_TRUE(file != NULL);
@@ -405,7 +418,9 @@ TEST_F(FileSystemUtilUnittest, TestFileAppendOpen) {
 
     // Case #3: Open existing file, check its cursor position.
     {
-        { std::ofstream(filePath) << fileContent; }
+        {
+            std::ofstream(filePath) << fileContent;
+        }
 
         auto file = FileAppendOpen(filePath.c_str(), "a");
         EXPECT_EQ(ftell(file), fileContentLen);
diff --git a/core/unittest/config/CMakeLists.txt b/core/unittest/config/CMakeLists.txt
index 823419036e..560d47d393 100644
--- a/core/unittest/config/CMakeLists.txt
+++ b/core/unittest/config/CMakeLists.txt
@@ -21,8 +21,11 @@ project(config_unittest)
 # add_executable(config_updator_unittest ConfigUpdatorUnittest.cpp)
 # target_link_libraries(config_updator_unittest ${UT_BASE_TARGET})
 
-add_executable(config_unittest PipelineConfigUnittest.cpp)
-target_link_libraries(config_unittest ${UT_BASE_TARGET})
+add_executable(pipeline_config_unittest PipelineConfigUnittest.cpp)
+target_link_libraries(pipeline_config_unittest ${UT_BASE_TARGET})
+
+add_executable(task_config_unittest TaskConfigUnittest.cpp)
+target_link_libraries(task_config_unittest ${UT_BASE_TARGET})
 
 add_executable(config_watcher_unittest ConfigWatcherUnittest.cpp)
 target_link_libraries(config_watcher_unittest ${UT_BASE_TARGET})
@@ -42,7 +45,8 @@ add_executable(common_config_provider_unittest CommonConfigProviderUnittest.cpp)
 target_link_libraries(common_config_provider_unittest ${UT_BASE_TARGET})
 
 include(GoogleTest)
-gtest_discover_tests(config_unittest)
+gtest_discover_tests(pipeline_config_unittest)
+gtest_discover_tests(task_config_unittest)
 gtest_discover_tests(config_watcher_unittest)
 gtest_discover_tests(config_update_unittest)
 if (ENABLE_ENTERPRISE)
diff --git a/core/unittest/config/CommonConfigProviderUnittest.cpp b/core/unittest/config/CommonConfigProviderUnittest.cpp
index fe41260944..0621cf0f26 100644
--- a/core/unittest/config/CommonConfigProviderUnittest.cpp
+++ b/core/unittest/config/CommonConfigProviderUnittest.cpp
@@ -21,9 +21,10 @@
 #include "config/ConfigDiff.h"
 #include "config/InstanceConfigManager.h"
 #include "config/common_provider/CommonConfigProvider.h"
-#include "config/watcher/ConfigWatcher.h"
 #include "config/watcher/InstanceConfigWatcher.h"
+#include "config/watcher/PipelineConfigWatcher.h"
 #include "gmock/gmock.h"
+#include "monitor/Monitor.h"
 #include "pipeline/PipelineManager.h"
 #include "unittest/Unittest.h"
 
@@ -79,7 +80,7 @@ class CommonConfigProviderUnittest : public ::testing::Test {
             MockCommonConfigProvider provider;
             provider.Init("common_v2");
             provider.Stop();
-            bfs::remove_all(provider.mPipelineSourceDir.string());
+            bfs::remove_all(provider.mContinuousPipelineConfigDir.string());
             bfs::remove_all(provider.mInstanceSourceDir.string());
         } else {
             CreateAgentDir();
@@ -90,7 +91,7 @@ class CommonConfigProviderUnittest : public ::testing::Test {
             MockCommonConfigProvider provider;
             provider.Init("common_v2");
             provider.Stop();
-            bfs::remove_all(provider.mPipelineSourceDir.string());
+            bfs::remove_all(provider.mContinuousPipelineConfigDir.string());
             bfs::remove_all(provider.mInstanceSourceDir.string());
         }
     }
@@ -100,7 +101,7 @@ class CommonConfigProviderUnittest : public ::testing::Test {
         MockCommonConfigProvider provider;
         provider.Init("common_v2");
         provider.Stop();
-        bfs::remove_all(provider.mPipelineSourceDir.string());
+        bfs::remove_all(provider.mContinuousPipelineConfigDir.string());
         bfs::remove_all(provider.mInstanceSourceDir.string());
     }
 
@@ -293,11 +294,12 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
                   APSARA_TEST_EQUAL(heartbeatReq.sequence_num(), sequence_num);
                   sequence_num++;
                   APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsInstanceConfig);
-                  APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsPipelineConfig);
+                  APSARA_TEST_TRUE(heartbeatReq.capabilities()
+                                   & configserver::proto::v2::AcceptsContinuousPipelineConfig);
                   APSARA_TEST_EQUAL(heartbeatReq.instance_id(), provider.GetInstanceId());
                   APSARA_TEST_EQUAL(heartbeatReq.agent_type(), "LoongCollector");
-                  APSARA_TEST_EQUAL(heartbeatReq.attributes().ip(), LogFileProfiler::mIpAddr);
-                  APSARA_TEST_EQUAL(heartbeatReq.attributes().hostname(), LogFileProfiler::mHostname);
+                  APSARA_TEST_EQUAL(heartbeatReq.attributes().ip(), LoongCollectorMonitor::mIpAddr);
+                  APSARA_TEST_EQUAL(heartbeatReq.attributes().hostname(), LoongCollectorMonitor::mHostname);
                   APSARA_TEST_EQUAL(heartbeatReq.attributes().version(), ILOGTAIL_VERSION);
                   auto it = heartbeatReq.tags().begin();
                   APSARA_TEST_EQUAL(it->name(), "key1");
@@ -313,7 +315,7 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
                   configserver::proto::v2::HeartbeatResponse heartbeatRespPb;
                   heartbeatRespPb.set_capabilities(configserver::proto::v2::ResponseFlags::ReportFullState);
                   {
-                      auto pipeline = heartbeatRespPb.mutable_pipeline_config_updates();
+                      auto pipeline = heartbeatRespPb.mutable_continuous_pipeline_config_updates();
                       auto configDetail = pipeline->Add();
                       configDetail->set_name("config1");
                       configDetail->set_version(1);
@@ -381,10 +383,9 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
                         )");
                   }
                   {
-                      auto commandconfig = heartbeatRespPb.mutable_custom_command_updates();
+                      auto commandconfig = heartbeatRespPb.mutable_onetime_pipeline_config_updates();
                       auto configDetail = commandconfig->Add();
                       configDetail->set_name("commandconfig1");
-                      configDetail->set_type("history");
                       configDetail->set_detail(R"(
                         {
                                 "enable": true,
@@ -426,23 +427,23 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
         configserver::proto::v2::HeartbeatResponse heartbeatResponse;
         provider.GetConfigUpdate();
 
-        APSARA_TEST_EQUAL(provider.mPipelineConfigInfoMap.size(), 2);
-        APSARA_TEST_EQUAL(provider.mPipelineConfigInfoMap["config1"].status, ConfigFeedbackStatus::APPLYING);
-        APSARA_TEST_EQUAL(provider.mPipelineConfigInfoMap["config2"].status, ConfigFeedbackStatus::FAILED);
+        APSARA_TEST_EQUAL(provider.mContinuousPipelineConfigInfoMap.size(), 2);
+        APSARA_TEST_EQUAL(provider.mContinuousPipelineConfigInfoMap["config1"].status, ConfigFeedbackStatus::APPLYING);
+        APSARA_TEST_EQUAL(provider.mContinuousPipelineConfigInfoMap["config2"].status, ConfigFeedbackStatus::FAILED);
 
         // 处理 pipelineconfig
-        PipelineConfigDiff pipelineConfigDiff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff);
-        APSARA_TEST_TRUE(!pipelineConfigDiff.IsEmpty());
-        APSARA_TEST_EQUAL(1U, pipelineConfigDiff.mAdded.size());
-        APSARA_TEST_EQUAL(pipelineConfigDiff.mAdded[0].mName, "config1");
+        auto pipelineConfigDiff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff.first);
+        APSARA_TEST_TRUE(!pipelineConfigDiff.first.IsEmpty());
+        APSARA_TEST_EQUAL(1U, pipelineConfigDiff.first.mAdded.size());
+        APSARA_TEST_EQUAL(pipelineConfigDiff.first.mAdded[0].mName, "config1");
         APSARA_TEST_EQUAL(PipelineManager::GetInstance()->GetAllConfigNames().size(), 1);
         APSARA_TEST_EQUAL(PipelineManager::GetInstance()->GetAllConfigNames()[0], "config1");
         // 再次处理 pipelineconfig
-        pipelineConfigDiff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff);
-        APSARA_TEST_TRUE(pipelineConfigDiff.IsEmpty());
-        APSARA_TEST_TRUE(pipelineConfigDiff.mAdded.empty());
+        pipelineConfigDiff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff.first);
+        APSARA_TEST_TRUE(pipelineConfigDiff.first.IsEmpty());
+        APSARA_TEST_TRUE(pipelineConfigDiff.first.mAdded.empty());
         APSARA_TEST_EQUAL(PipelineManager::GetInstance()->GetAllConfigNames().size(), 1);
         APSARA_TEST_EQUAL(PipelineManager::GetInstance()->GetAllConfigNames()[0], "config1");
 
@@ -473,8 +474,8 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
     {
         MockCommonConfigProvider provider;
         provider.Init("common_v2");
-        APSARA_TEST_EQUAL(provider.mPipelineConfigInfoMap.size(), 1);
-        APSARA_TEST_EQUAL(provider.mPipelineConfigInfoMap["config1"].status, ConfigFeedbackStatus::APPLYING);
+        APSARA_TEST_EQUAL(provider.mContinuousPipelineConfigInfoMap.size(), 1);
+        APSARA_TEST_EQUAL(provider.mContinuousPipelineConfigInfoMap["config1"].status, ConfigFeedbackStatus::APPLYING);
         APSARA_TEST_EQUAL(provider.mInstanceConfigInfoMap.size(), 1);
         APSARA_TEST_EQUAL(provider.mInstanceConfigInfoMap["instanceconfig1"].status, ConfigFeedbackStatus::APPLYING);
         provider.Stop();
@@ -508,11 +509,12 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
                   APSARA_TEST_EQUAL(heartbeatReq.sequence_num(), sequence_num);
                   sequence_num++;
                   APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsInstanceConfig);
-                  APSARA_TEST_TRUE(heartbeatReq.capabilities() & configserver::proto::v2::AcceptsPipelineConfig);
+                  APSARA_TEST_TRUE(heartbeatReq.capabilities()
+                                   & configserver::proto::v2::AcceptsContinuousPipelineConfig);
                   APSARA_TEST_EQUAL(heartbeatReq.instance_id(), provider.GetInstanceId());
                   APSARA_TEST_EQUAL(heartbeatReq.agent_type(), "LoongCollector");
-                  APSARA_TEST_EQUAL(heartbeatReq.attributes().ip(), LogFileProfiler::mIpAddr);
-                  APSARA_TEST_EQUAL(heartbeatReq.attributes().hostname(), LogFileProfiler::mHostname);
+                  APSARA_TEST_EQUAL(heartbeatReq.attributes().ip(), LoongCollectorMonitor::mIpAddr);
+                  APSARA_TEST_EQUAL(heartbeatReq.attributes().hostname(), LoongCollectorMonitor::mHostname);
                   APSARA_TEST_EQUAL(heartbeatReq.attributes().version(), ILOGTAIL_VERSION);
                   auto it = heartbeatReq.tags().begin();
                   APSARA_TEST_EQUAL(it->name(), "key1");
@@ -529,7 +531,7 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
                   heartbeatRespPb.set_capabilities(configserver::proto::v2::ResponseFlags::ReportFullState);
                   // pipeline
                   {
-                      auto pipeline = heartbeatRespPb.mutable_pipeline_config_updates();
+                      auto pipeline = heartbeatRespPb.mutable_continuous_pipeline_config_updates();
                       auto configDetail = pipeline->Add();
                       configDetail->set_name("config1");
                       configDetail->set_version(-1);
@@ -599,10 +601,9 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
                   }
                   // commandconfig
                   {
-                      auto commandconfig = heartbeatRespPb.mutable_custom_command_updates();
+                      auto commandconfig = heartbeatRespPb.mutable_onetime_pipeline_config_updates();
                       auto configDetail = commandconfig->Add();
                       configDetail->set_name("commandconfig1");
-                      configDetail->set_type("history");
                       configDetail->set_detail(R"(
                         {
                                 "enable": true,
@@ -644,20 +645,20 @@ void CommonConfigProviderUnittest::TestGetConfigUpdateAndConfigWatcher() {
         configserver::proto::v2::HeartbeatResponse heartbeatResponse;
         provider.GetConfigUpdate();
 
-        APSARA_TEST_TRUE(provider.mPipelineConfigInfoMap.empty());
+        APSARA_TEST_TRUE(provider.mContinuousPipelineConfigInfoMap.empty());
 
         // 处理pipelineConfigDiff
-        PipelineConfigDiff pipelineConfigDiff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff);
-        APSARA_TEST_TRUE(!pipelineConfigDiff.IsEmpty());
-        APSARA_TEST_EQUAL(1U, pipelineConfigDiff.mRemoved.size());
-        APSARA_TEST_EQUAL(pipelineConfigDiff.mRemoved[0], "config1");
+        auto pipelineConfigDiff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff.first);
+        APSARA_TEST_TRUE(!pipelineConfigDiff.first.IsEmpty());
+        APSARA_TEST_EQUAL(1U, pipelineConfigDiff.first.mRemoved.size());
+        APSARA_TEST_EQUAL(pipelineConfigDiff.first.mRemoved[0], "config1");
         APSARA_TEST_TRUE(PipelineManager::GetInstance()->GetAllConfigNames().empty());
         // 再次处理pipelineConfigDiff
-        pipelineConfigDiff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff);
-        APSARA_TEST_TRUE(pipelineConfigDiff.IsEmpty());
-        APSARA_TEST_TRUE(pipelineConfigDiff.mRemoved.empty());
+        pipelineConfigDiff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        PipelineManager::GetInstance()->UpdatePipelines(pipelineConfigDiff.first);
+        APSARA_TEST_TRUE(pipelineConfigDiff.first.IsEmpty());
+        APSARA_TEST_TRUE(pipelineConfigDiff.first.mRemoved.empty());
         APSARA_TEST_TRUE(PipelineManager::GetInstance()->GetAllConfigNames().empty());
 
         APSARA_TEST_TRUE(provider.mInstanceConfigInfoMap.empty());
diff --git a/core/unittest/config/ConfigUpdateUnittest.cpp b/core/unittest/config/ConfigUpdateUnittest.cpp
index 45b6fc4c7f..c8bab49d76 100644
--- a/core/unittest/config/ConfigUpdateUnittest.cpp
+++ b/core/unittest/config/ConfigUpdateUnittest.cpp
@@ -19,11 +19,13 @@
 #include <vector>
 
 #include "config/PipelineConfig.h"
-#include "config/watcher/ConfigWatcher.h"
+#include "config/watcher/PipelineConfigWatcher.h"
 #include "pipeline/Pipeline.h"
 #include "pipeline/PipelineManager.h"
 #include "pipeline/plugin/PluginRegistry.h"
+#include "task_pipeline/TaskPipelineManager.h"
 #include "unittest/Unittest.h"
+#include "unittest/plugin/PluginMock.h"
 
 using namespace std;
 
@@ -33,6 +35,11 @@ class PipelineMock : public Pipeline {
 public:
     bool Init(PipelineConfig&& config) {
         mConfig = std::move(config.mDetail);
+        WriteMetrics::GetInstance()->PrepareMetricsRecordRef(
+            mMetricsRecordRef,
+            MetricCategory::METRIC_CATEGORY_PIPELINE,
+            {{METRIC_LABEL_KEY_PROJECT, mContext.GetProjectName()}, {METRIC_LABEL_KEY_PIPELINE_NAME, mName}});
+        mStartTime = mMetricsRecordRef.CreateIntGauge(METRIC_PIPELINE_START_TIME);
         return (*mConfig)["valid"].asBool();
     }
 };
@@ -74,19 +81,24 @@ class ConfigUpdateUnittest : public testing::Test {
 protected:
     static void SetUpTestCase() {
         PluginRegistry::GetInstance()->LoadPlugins();
-        ConfigWatcher::GetInstance()->SetPipelineManager(PipelineManagerMock::GetInstance());
+        LoadTaskMock();
+        PipelineConfigWatcher::GetInstance()->SetPipelineManager(PipelineManagerMock::GetInstance());
     }
 
-    static void TearDownTestCase() { PluginRegistry::GetInstance()->UnloadPlugins(); }
+    static void TearDownTestCase() {
+        PluginRegistry::GetInstance()->UnloadPlugins();
+        TaskRegistry::GetInstance()->UnloadPlugins();
+    }
 
     void SetUp() override {
         filesystem::create_directories(configDir);
-        ConfigWatcher::GetInstance()->AddSource(configDir.string());
+        PipelineConfigWatcher::GetInstance()->AddSource(configDir.string());
     }
 
     void TearDown() override {
         PipelineManagerMock::GetInstance()->ClearEnvironment();
-        ConfigWatcher::GetInstance()->ClearEnvironment();
+        TaskPipelineManager::GetInstance()->ClearEnvironment();
+        PipelineConfigWatcher::GetInstance()->ClearEnvironment();
         filesystem::remove_all(configDir);
     }
 
@@ -94,13 +106,17 @@ class ConfigUpdateUnittest : public testing::Test {
     void PrepareInitialSettings() const;
     void GenerateInitialConfigs() const;
 
-    filesystem::path configDir = "./config";
-    vector<filesystem::path> configPaths = {configDir / "invalid_format.json",
-                                            configDir / "invalid_detail.json",
-                                            configDir / "enabled_valid.json",
-                                            configDir / "disabled_valid.json"};
-    const string invalidConfigWithInvalidFormat = R"({"inputs":{}})";
-    const string invalidConfigWithInvalidDetail = R"(
+    filesystem::path configDir = "./continuous_pipeline_config";
+    vector<filesystem::path> pipelineConfigPaths = {configDir / "pipeline_invalid_format.json",
+                                                    configDir / "pipeline_invalid_detail.json",
+                                                    configDir / "pipeline_enabled_valid.json",
+                                                    configDir / "pipeline_disabled_valid.json"};
+    vector<filesystem::path> taskConfigPaths = {configDir / "task_invalid_format.json",
+                                                configDir / "task_invalid_detail.json",
+                                                configDir / "task_enabled_valid.json",
+                                                configDir / "task_disabled_valid.json"};
+    const string invalidPipelineConfigWithInvalidFormat = R"({"inputs":{}})";
+    const string invalidPipelineConfigWithInvalidDetail = R"(
 {
     "valid": false,
     "inputs": [
@@ -115,7 +131,7 @@ class ConfigUpdateUnittest : public testing::Test {
     ]
 }
     )";
-    const string enabledValidConfig = R"(
+    const string enabledValidPipelineConfig = R"(
 {
     "valid": true,
     "inputs": [
@@ -130,7 +146,7 @@ class ConfigUpdateUnittest : public testing::Test {
     ]
 }
     )";
-    const string disabledValidConfig = R"(
+    const string disabledValidPipelineConfig = R"(
 {
     "valid": true,
     "enable": false,
@@ -147,8 +163,8 @@ class ConfigUpdateUnittest : public testing::Test {
 }
     )";
 
-    const string newInvalidConfigWithInvalidFormat = R"({"flushers":{}})";
-    const string newInvalidConfigWithInvalidDetail = R"(
+    const string newInvalidPipelineConfigWithInvalidFormat = R"({"flushers":{}})";
+    const string newInvalidPipelineConfigWithInvalidDetail = R"(
 {
     "valid": false,
     "inputs": [
@@ -163,7 +179,7 @@ class ConfigUpdateUnittest : public testing::Test {
     ]
 }
     )";
-    const string newEnabledValidConfig = R"(
+    const string newEnabledValidPipelineConfig = R"(
 {
     "valid": true,
     "inputs": [
@@ -178,7 +194,7 @@ class ConfigUpdateUnittest : public testing::Test {
     ]
 }
     )";
-    const string newDisabledValidConfig = R"(
+    const string newDisabledValidPipelineConfig = R"(
 {
     "valid": true,
     "enable": false,
@@ -192,182 +208,386 @@ class ConfigUpdateUnittest : public testing::Test {
             "Type": "flusher_sls"
         }
     ]
+}
+    )";
+
+    const string invalidTaskConfigWithInvalidFormat = R"({"task":[]})";
+    const string invalidTaskConfigWithInvalidDetail = R"(
+{
+    "task": {
+        "Type": "task_mock",
+        "Valid": false
+    }
+}
+    )";
+    const string enabledValidTaskConfig = R"(
+{
+    "task": {
+        "Type": "task_mock"
+    }
+}
+    )";
+    const string disabledValidTaskConfig = R"(
+{
+    "enable": false,
+    "task": {
+        "Type": "task_mock"
+    }
+}
+    )";
+
+    const string newInvalidTaskConfigWithInvalidFormat = R"({"task":""})";
+    const string newInvalidTaskConfigWithInvalidDetail = R"(
+{
+    "task": {
+        "Type": "task_mock",
+        "Valid": false,
+        "Other": true
+    }
+}
+    )";
+    const string newEnabledValidTaskConfig = R"(
+{
+    "task": {
+        "Type": "task_mock",
+        "Other": true
+    }
+}
+    )";
+    const string newDisabledValidTaskConfig = R"(
+{
+    "enable": false,
+    "task": {
+        "Type": "task_mock",
+        "Other": true
+    }
 }
     )";
 };
 
 void ConfigUpdateUnittest::OnStartUp() const {
-    PipelineConfigDiff diff;
-    diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_TRUE(diff.IsEmpty());
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_TRUE(diff.first.IsEmpty());
+    APSARA_TEST_TRUE(diff.second.IsEmpty());
 
     GenerateInitialConfigs();
-    diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_FALSE(diff.IsEmpty());
-    APSARA_TEST_EQUAL(2U, diff.mAdded.size());
-    APSARA_TEST_TRUE(diff.mModified.empty());
-    APSARA_TEST_TRUE(diff.mRemoved.empty());
-
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_FALSE(diff.first.IsEmpty());
+    APSARA_TEST_EQUAL(2U, diff.first.mAdded.size());
+    APSARA_TEST_TRUE(diff.first.mModified.empty());
+    APSARA_TEST_TRUE(diff.first.mRemoved.empty());
+    APSARA_TEST_FALSE(diff.second.IsEmpty());
+    APSARA_TEST_EQUAL(2U, diff.second.mAdded.size());
+    APSARA_TEST_TRUE(diff.second.mModified.empty());
+    APSARA_TEST_TRUE(diff.second.mRemoved.empty());
+
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
+    auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+    APSARA_TEST_NOT_EQUAL(nullptr, ptr);
+    APSARA_TEST_EQUAL(TaskMock::sName, ptr->GetPlugin()->Name());
+    APSARA_TEST_TRUE(static_cast<TaskMock*>(ptr->GetPlugin())->mIsRunning);
 }
 
 void ConfigUpdateUnittest::OnConfigDelete() const {
     PrepareInitialSettings();
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
 
     filesystem::remove_all(configDir);
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_FALSE(diff.IsEmpty());
-    APSARA_TEST_TRUE(diff.mAdded.empty());
-    APSARA_TEST_TRUE(diff.mModified.empty());
-    APSARA_TEST_EQUAL(1U, diff.mRemoved.size());
-
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_FALSE(diff.first.IsEmpty());
+    APSARA_TEST_TRUE(diff.first.mAdded.empty());
+    APSARA_TEST_TRUE(diff.first.mModified.empty());
+    APSARA_TEST_EQUAL(1U, diff.first.mRemoved.size());
+    APSARA_TEST_FALSE(diff.second.IsEmpty());
+    APSARA_TEST_TRUE(diff.second.mAdded.empty());
+    APSARA_TEST_TRUE(diff.second.mModified.empty());
+    APSARA_TEST_EQUAL(1U, diff.second.mRemoved.size());
+
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
     APSARA_TEST_TRUE(PipelineManagerMock::GetInstance()->GetAllConfigNames().empty());
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
+    APSARA_TEST_TRUE(TaskPipelineManager::GetInstance()->GetAllPipelineNames().empty());
 }
 
 void ConfigUpdateUnittest::OnConfigToInvalidFormat() const {
     PrepareInitialSettings();
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
 
-    for (const auto& path : configPaths) {
+    for (const auto& path : pipelineConfigPaths) {
+        ofstream fout(path, ios::trunc);
+        fout << newInvalidPipelineConfigWithInvalidFormat;
+    }
+    for (const auto& path : taskConfigPaths) {
         ofstream fout(path, ios::trunc);
-        fout << newInvalidConfigWithInvalidFormat;
+        fout << newInvalidTaskConfigWithInvalidFormat;
     }
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_TRUE(diff.IsEmpty());
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_TRUE(diff.first.IsEmpty());
+    APSARA_TEST_TRUE(diff.second.IsEmpty());
 }
 
 void ConfigUpdateUnittest::OnConfigToInvalidDetail() const {
     PrepareInitialSettings();
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
 
-    for (const auto& path : configPaths) {
+    for (const auto& path : pipelineConfigPaths) {
         ofstream fout(path, ios::trunc);
-        fout << newInvalidConfigWithInvalidDetail;
+        fout << newInvalidPipelineConfigWithInvalidDetail;
     }
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_FALSE(diff.IsEmpty());
-    APSARA_TEST_EQUAL(3U, diff.mAdded.size());
-    APSARA_TEST_EQUAL(1U, diff.mModified.size());
-    APSARA_TEST_TRUE(diff.mRemoved.empty());
-
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    for (const auto& path : taskConfigPaths) {
+        ofstream fout(path, ios::trunc);
+        fout << newInvalidTaskConfigWithInvalidDetail;
+    }
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_FALSE(diff.first.IsEmpty());
+    APSARA_TEST_EQUAL(3U, diff.first.mAdded.size());
+    APSARA_TEST_EQUAL(1U, diff.first.mModified.size());
+    APSARA_TEST_TRUE(diff.first.mRemoved.empty());
+    APSARA_TEST_FALSE(diff.second.IsEmpty());
+    APSARA_TEST_EQUAL(3U, diff.second.mAdded.size());
+    APSARA_TEST_EQUAL(1U, diff.second.mModified.size());
+    APSARA_TEST_TRUE(diff.second.mRemoved.empty());
+
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
+    auto& newPtr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+    APSARA_TEST_EQUAL(ptr, newPtr);
+    APSARA_TEST_EQUAL(TaskMock::sName, newPtr->GetPlugin()->Name());
+    APSARA_TEST_TRUE(static_cast<TaskMock*>(newPtr->GetPlugin())->mIsRunning);
 }
 
 void ConfigUpdateUnittest::OnConfigToEnabledValid() const {
     PrepareInitialSettings();
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
 
-    for (const auto& path : configPaths) {
+    for (const auto& path : pipelineConfigPaths) {
         ofstream fout(path, ios::trunc);
-        fout << newEnabledValidConfig;
+        fout << newEnabledValidPipelineConfig;
     }
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_FALSE(diff.IsEmpty());
-    APSARA_TEST_EQUAL(3U, diff.mAdded.size());
-    APSARA_TEST_EQUAL(1U, diff.mModified.size());
-    APSARA_TEST_TRUE(diff.mRemoved.empty());
-
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    for (const auto& path : taskConfigPaths) {
+        ofstream fout(path, ios::trunc);
+        fout << newEnabledValidTaskConfig;
+    }
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_FALSE(diff.first.IsEmpty());
+    APSARA_TEST_EQUAL(3U, diff.first.mAdded.size());
+    APSARA_TEST_EQUAL(1U, diff.first.mModified.size());
+    APSARA_TEST_TRUE(diff.first.mRemoved.empty());
+    APSARA_TEST_FALSE(diff.second.IsEmpty());
+    APSARA_TEST_EQUAL(3U, diff.second.mAdded.size());
+    APSARA_TEST_EQUAL(1U, diff.second.mModified.size());
+    APSARA_TEST_TRUE(diff.second.mRemoved.empty());
+
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
     APSARA_TEST_EQUAL(4U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
+    APSARA_TEST_EQUAL(4U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
+    {
+        auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_invalid_format");
+        APSARA_TEST_NOT_EQUAL(nullptr, ptr);
+        APSARA_TEST_EQUAL(TaskMock::sName, ptr->GetPlugin()->Name());
+        APSARA_TEST_TRUE(static_cast<TaskMock*>(ptr->GetPlugin())->mIsRunning);
+    }
+    {
+        auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_invalid_detail");
+        APSARA_TEST_NOT_EQUAL(nullptr, ptr);
+        APSARA_TEST_EQUAL(TaskMock::sName, ptr->GetPlugin()->Name());
+        APSARA_TEST_TRUE(static_cast<TaskMock*>(ptr->GetPlugin())->mIsRunning);
+    }
+    {
+        auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+        APSARA_TEST_NOT_EQUAL(nullptr, ptr);
+        APSARA_TEST_EQUAL(TaskMock::sName, ptr->GetPlugin()->Name());
+        APSARA_TEST_TRUE(static_cast<TaskMock*>(ptr->GetPlugin())->mIsRunning);
+    }
+    {
+        auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_disabled_valid");
+        APSARA_TEST_NOT_EQUAL(nullptr, ptr);
+        APSARA_TEST_EQUAL(TaskMock::sName, ptr->GetPlugin()->Name());
+        APSARA_TEST_TRUE(static_cast<TaskMock*>(ptr->GetPlugin())->mIsRunning);
+    }
 }
 
 void ConfigUpdateUnittest::OnConfigToDisabledValid() const {
     PrepareInitialSettings();
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
 
-    for (const auto& path : configPaths) {
+    for (const auto& path : pipelineConfigPaths) {
         ofstream fout(path, ios::trunc);
-        fout << newDisabledValidConfig;
+        fout << newDisabledValidPipelineConfig;
     }
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_FALSE(diff.IsEmpty());
-    APSARA_TEST_TRUE(diff.mAdded.empty());
-    APSARA_TEST_TRUE(diff.mModified.empty());
-    APSARA_TEST_EQUAL(1U, diff.mRemoved.size());
+    for (const auto& path : taskConfigPaths) {
+        ofstream fout(path, ios::trunc);
+        fout << newDisabledValidTaskConfig;
+    }
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_FALSE(diff.first.IsEmpty());
+    APSARA_TEST_TRUE(diff.first.mAdded.empty());
+    APSARA_TEST_TRUE(diff.first.mModified.empty());
+    APSARA_TEST_EQUAL(1U, diff.first.mRemoved.size());
 
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
     APSARA_TEST_TRUE(PipelineManagerMock::GetInstance()->GetAllConfigNames().empty());
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
+    APSARA_TEST_TRUE(TaskPipelineManager::GetInstance()->GetAllPipelineNames().empty());
 }
 
 void ConfigUpdateUnittest::OnConfigUnchanged() const {
     PrepareInitialSettings();
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
 
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_TRUE(diff.IsEmpty());
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_TRUE(diff.first.IsEmpty());
+    APSARA_TEST_TRUE(diff.second.IsEmpty());
 
     GenerateInitialConfigs();
     // mandatorily overwrite modify time in case of no update when file content remains the same.
-    for (const auto& path : configPaths) {
+    for (const auto& path : pipelineConfigPaths) {
         filesystem::file_time_type fTime = filesystem::last_write_time(path);
         filesystem::last_write_time(path, fTime + 1s);
     }
-    diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_FALSE(diff.IsEmpty());
-    APSARA_TEST_EQUAL(1U, diff.mAdded.size());
-    APSARA_TEST_TRUE(diff.mModified.empty());
-    APSARA_TEST_TRUE(diff.mRemoved.empty());
-
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    for (const auto& path : taskConfigPaths) {
+        filesystem::file_time_type fTime = filesystem::last_write_time(path);
+        filesystem::last_write_time(path, fTime + 1s);
+    }
+    diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_FALSE(diff.first.IsEmpty());
+    APSARA_TEST_EQUAL(1U, diff.first.mAdded.size());
+    APSARA_TEST_TRUE(diff.first.mModified.empty());
+    APSARA_TEST_TRUE(diff.first.mRemoved.empty());
+    APSARA_TEST_FALSE(diff.second.IsEmpty());
+    APSARA_TEST_EQUAL(1U, diff.second.mAdded.size());
+    APSARA_TEST_TRUE(diff.second.mModified.empty());
+    APSARA_TEST_TRUE(diff.second.mRemoved.empty());
+
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
+    auto& newPtr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+    APSARA_TEST_EQUAL(ptr, newPtr);
+    APSARA_TEST_EQUAL(TaskMock::sName, newPtr->GetPlugin()->Name());
+    APSARA_TEST_TRUE(static_cast<TaskMock*>(newPtr->GetPlugin())->mIsRunning);
 }
 
 void ConfigUpdateUnittest::OnConfigAdded() const {
     PrepareInitialSettings();
     APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    APSARA_TEST_EQUAL(1U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
 
     {
-        ofstream fout(configDir / "add_invalid_format.json", ios::trunc);
-        fout << invalidConfigWithInvalidFormat;
+        ofstream fout(configDir / "add_pipeline_invalid_format.json", ios::trunc);
+        fout << invalidPipelineConfigWithInvalidFormat;
     }
     {
-        ofstream fout(configDir / "add_invalid_detail.json", ios::trunc);
-        fout << invalidConfigWithInvalidDetail;
+        ofstream fout(configDir / "add_pipeline_invalid_detail.json", ios::trunc);
+        fout << invalidPipelineConfigWithInvalidDetail;
     }
     {
-        ofstream fout(configDir / "add_enabled_valid.json", ios::trunc);
-        fout << enabledValidConfig;
+        ofstream fout(configDir / "add_pipeline_enabled_valid.json", ios::trunc);
+        fout << enabledValidPipelineConfig;
     }
     {
-        ofstream fout(configDir / "add_disabled_valid.json", ios::trunc);
-        fout << disabledValidConfig;
+        ofstream fout(configDir / "add_pipeline_disabled_valid.json", ios::trunc);
+        fout << disabledValidPipelineConfig;
     }
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    APSARA_TEST_FALSE(diff.IsEmpty());
-    APSARA_TEST_EQUAL(2U, diff.mAdded.size());
-    APSARA_TEST_TRUE(diff.mModified.empty());
-    APSARA_TEST_TRUE(diff.mRemoved.empty());
-
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    {
+        ofstream fout(configDir / "add_task_invalid_format.json", ios::trunc);
+        fout << invalidTaskConfigWithInvalidFormat;
+    }
+    {
+        ofstream fout(configDir / "add_task_invalid_detail.json", ios::trunc);
+        fout << invalidTaskConfigWithInvalidDetail;
+    }
+    {
+        ofstream fout(configDir / "add_task_enabled_valid.json", ios::trunc);
+        fout << enabledValidTaskConfig;
+    }
+    {
+        ofstream fout(configDir / "add_task_disabled_valid.json", ios::trunc);
+        fout << disabledValidTaskConfig;
+    }
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    APSARA_TEST_FALSE(diff.first.IsEmpty());
+    APSARA_TEST_EQUAL(2U, diff.first.mAdded.size());
+    APSARA_TEST_TRUE(diff.first.mModified.empty());
+    APSARA_TEST_TRUE(diff.first.mRemoved.empty());
+    APSARA_TEST_FALSE(diff.second.IsEmpty());
+    APSARA_TEST_EQUAL(2U, diff.second.mAdded.size());
+    APSARA_TEST_TRUE(diff.second.mModified.empty());
+    APSARA_TEST_TRUE(diff.second.mRemoved.empty());
+
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
     APSARA_TEST_EQUAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size());
+    auto& ptr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
+    APSARA_TEST_EQUAL(2U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
+    {
+        auto& newPtr = TaskPipelineManager::GetInstance()->FindPipelineByName("task_enabled_valid");
+        APSARA_TEST_EQUAL(ptr, newPtr);
+        APSARA_TEST_EQUAL(TaskMock::sName, newPtr->GetPlugin()->Name());
+        APSARA_TEST_TRUE(static_cast<TaskMock*>(newPtr->GetPlugin())->mIsRunning);
+    }
+    {
+        auto& newPtr = TaskPipelineManager::GetInstance()->FindPipelineByName("add_task_enabled_valid");
+        APSARA_TEST_NOT_EQUAL(nullptr, newPtr);
+        APSARA_TEST_EQUAL(TaskMock::sName, newPtr->GetPlugin()->Name());
+        APSARA_TEST_TRUE(static_cast<TaskMock*>(newPtr->GetPlugin())->mIsRunning);
+    }
 }
 
 void ConfigUpdateUnittest::PrepareInitialSettings() const {
     GenerateInitialConfigs();
-    PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-    PipelineManagerMock::GetInstance()->UpdatePipelines(diff);
+    auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+    PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first);
+    TaskPipelineManager::GetInstance()->UpdatePipelines(diff.second);
 }
 
 void ConfigUpdateUnittest::GenerateInitialConfigs() const {
     {
-        ofstream fout(configPaths[0], ios::trunc);
-        fout << invalidConfigWithInvalidFormat;
+        ofstream fout(pipelineConfigPaths[0], ios::trunc);
+        fout << invalidPipelineConfigWithInvalidFormat;
+    }
+    {
+        ofstream fout(pipelineConfigPaths[1], ios::trunc);
+        fout << invalidPipelineConfigWithInvalidDetail;
+    }
+    {
+        ofstream fout(pipelineConfigPaths[2], ios::trunc);
+        fout << enabledValidPipelineConfig;
+    }
+    {
+        ofstream fout(pipelineConfigPaths[3], ios::trunc);
+        fout << disabledValidPipelineConfig;
+    }
+    {
+        ofstream fout(taskConfigPaths[0], ios::trunc);
+        fout << invalidTaskConfigWithInvalidFormat;
     }
     {
-        ofstream fout(configPaths[1], ios::trunc);
-        fout << invalidConfigWithInvalidDetail;
+        ofstream fout(taskConfigPaths[1], ios::trunc);
+        fout << invalidTaskConfigWithInvalidDetail;
     }
     {
-        ofstream fout(configPaths[2], ios::trunc);
-        fout << enabledValidConfig;
+        ofstream fout(taskConfigPaths[2], ios::trunc);
+        fout << enabledValidTaskConfig;
     }
     {
-        ofstream fout(configPaths[3], ios::trunc);
-        fout << disabledValidConfig;
+        ofstream fout(taskConfigPaths[3], ios::trunc);
+        fout << disabledValidTaskConfig;
     }
 }
 
diff --git a/core/unittest/config/ConfigUpdatorUnittest.cpp b/core/unittest/config/ConfigUpdatorUnittest.cpp
index 13e4846bd3..1ef9a6825f 100644
--- a/core/unittest/config/ConfigUpdatorUnittest.cpp
+++ b/core/unittest/config/ConfigUpdatorUnittest.cpp
@@ -17,29 +17,31 @@
 #include <unistd.h>
 #endif
 #include <fcntl.h>
-#include <cstdlib>
+#include <json/json.h>
 #include <string.h>
-#include <thread>
-#include <memory>
+
+#include <cstdlib>
 #include <fstream>
-#include <json/json.h>
-#include "file_server/event_handler/EventHandler.h"
-#include "file_server/ConfigManager.h"
-#include "file_server/reader/LogFileReader.h"
+#include <memory>
+#include <thread>
+
+#include "AlarmManager.h"
 #include "AppConfig.h"
-#include "Monitor.h"
-#include "EventDispatcher.h"
 #include "CheckPointManager.h"
-#include "file_server/event_handler/LogInput.h"
+#include "EventDispatcher.h"
+#include "Monitor.h"
 #include "Sender.h"
-#include "sls_logs.pb.h"
-#include "AlarmManager.h"
+#include "common/FileSystemUtil.h"
 #include "common/Flags.h"
 #include "common/Lock.h"
 #include "constants/Constants.h"
-#include "common/FileSystemUtil.h"
+#include "file_server/ConfigManager.h"
+#include "file_server/event_handler/EventHandler.h"
+#include "file_server/event_handler/LogInput.h"
+#include "file_server/reader/LogFileReader.h"
 #include "logger/Logger.h"
 #include "sdk/Common.h"
+#include "sls_logs.pb.h"
 
 using namespace std;
 using namespace logtail::sdk;
@@ -1704,7 +1706,7 @@ void ConfigUpdatorUnittest::GetLogContent(LogType logType, char* buffer) {
 
     if (logType == REGEX_LOG) {
         strftime(timeBuffer, sizeof(timeBuffer), " - - [%d/%b/%Y:%R:%S +0800] ", &timeInfo);
-        sprintf(buffer, "%s%s%s", LogFileProfiler::GetInstance()->mIpAddr.c_str(), timeBuffer, buffer2);
+        sprintf(buffer, "%s%s%s", LoongCollectorMonitor::GetInstance()->mIpAddr.c_str(), timeBuffer, buffer2);
         if ((rand()) % 2 == 0)
             strcat(buffer, buffer3);
         strcat(buffer, "\n");
@@ -2637,16 +2639,16 @@ void ConfigUpdatorUnittest::TestValidWildcardPath2() {
 
 void ConfigUpdatorUnittest::TestWithinMaxDepth() {
     // No wildcard.
-    PipelineConfig* cfg_1
-        = new PipelineConfig(PS + "abc" + PS + "de" + PS + "f", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 0, "cat");
+    PipelineConfig* cfg_1 = new PipelineConfig(
+        PS + "abc" + PS + "de" + PS + "f", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 0, "cat");
     EXPECT_EQ(cfg_1->WithinMaxDepth(PS + "abc"), false);
     EXPECT_EQ(cfg_1->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f"), true);
     EXPECT_EQ(cfg_1->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "fx"), false);
     EXPECT_EQ(cfg_1->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f" + PS + "ghi"), false);
     delete cfg_1;
     // To be compatible with old settings
-    PipelineConfig* cfg_2
-        = new PipelineConfig(PS + "abc" + PS + "de" + PS + "f", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, -1, "cat");
+    PipelineConfig* cfg_2 = new PipelineConfig(
+        PS + "abc" + PS + "de" + PS + "f", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, -1, "cat");
     EXPECT_EQ(cfg_2->WithinMaxDepth(PS + "abc"), true);
     EXPECT_EQ(cfg_2->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f"), true);
     EXPECT_EQ(cfg_2->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "fx"), true);
@@ -2654,8 +2656,8 @@ void ConfigUpdatorUnittest::TestWithinMaxDepth() {
     EXPECT_EQ(cfg_2->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f" + PS + "ghi" + PS + "agec" + PS + "egegt"), true);
     delete cfg_2;
 
-    PipelineConfig* cfg_3
-        = new PipelineConfig(PS + "abc" + PS + "de" + PS + "f", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 3, "cat");
+    PipelineConfig* cfg_3 = new PipelineConfig(
+        PS + "abc" + PS + "de" + PS + "f", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 3, "cat");
     EXPECT_EQ(cfg_3->WithinMaxDepth(PS + "abc"), false);
     EXPECT_EQ(cfg_3->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f"), true);
     EXPECT_EQ(cfg_3->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "fx"), false);
@@ -2671,16 +2673,27 @@ void ConfigUpdatorUnittest::TestWithinMaxDepth() {
     delete cfg_3;
 
     // Wildcard.
-    PipelineConfig* cfg_4
-        = new PipelineConfig(PS + "ab?" + PS + "de" + PS + "*", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 0, "cat");
+    PipelineConfig* cfg_4 = new PipelineConfig(
+        PS + "ab?" + PS + "de" + PS + "*", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 0, "cat");
     EXPECT_EQ(cfg_4->WithinMaxDepth(PS + "abc"), false);
     EXPECT_EQ(cfg_4->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f"), true);
     EXPECT_EQ(cfg_4->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "xyz"), true);
     EXPECT_EQ(cfg_4->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f" + PS + "ghi"), false);
     delete cfg_4;
     // To be compatible with old settings.
-    PipelineConfig* cfg_5 = new PipelineConfig(
-        PS + "abc" + PS + "de?" + PS + "f*" + PS + "xyz", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, -1, "cat", "");
+    PipelineConfig* cfg_5 = new PipelineConfig(PS + "abc" + PS + "de?" + PS + "f*" + PS + "xyz",
+                                               "x.log",
+                                               REGEX_LOG,
+                                               "a",
+                                               "",
+                                               "",
+                                               "",
+                                               "prj",
+                                               true,
+                                               0,
+                                               -1,
+                                               "cat",
+                                               "");
     EXPECT_EQ(cfg_5->WithinMaxDepth(PS + "abc"), true);
     EXPECT_EQ(cfg_5->WithinMaxDepth(PS + "abc" + PS + "def" + PS + "fgz"), true);
     EXPECT_EQ(cfg_5->WithinMaxDepth(PS + "abc" + PS + "def" + PS + "fgz" + PS + "xyz0"), true);
@@ -2691,8 +2704,8 @@ void ConfigUpdatorUnittest::TestWithinMaxDepth() {
               true);
     delete cfg_5;
 
-    PipelineConfig* cfg_6
-        = new PipelineConfig(PS + "abc" + PS + "d?" + PS + "f*", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 3, "cat");
+    PipelineConfig* cfg_6 = new PipelineConfig(
+        PS + "abc" + PS + "d?" + PS + "f*", "x.log", REGEX_LOG, "a", "", "", "", "prj", true, 0, 3, "cat");
     EXPECT_EQ(cfg_6->WithinMaxDepth(PS + "abc"), false);
     EXPECT_EQ(cfg_6->WithinMaxDepth(PS + "abc" + PS + "de"), false);
     EXPECT_EQ(cfg_6->WithinMaxDepth(PS + "abc" + PS + "de" + PS + "f"), true);
diff --git a/core/unittest/config/ConfigWatcherUnittest.cpp b/core/unittest/config/ConfigWatcherUnittest.cpp
index 20df15856f..7ad1cc1fe0 100644
--- a/core/unittest/config/ConfigWatcherUnittest.cpp
+++ b/core/unittest/config/ConfigWatcherUnittest.cpp
@@ -16,8 +16,8 @@
 #include <fstream>
 
 #include "config/ConfigDiff.h"
-#include "config/watcher/ConfigWatcher.h"
 #include "config/watcher/InstanceConfigWatcher.h"
+#include "config/watcher/PipelineConfigWatcher.h"
 #include "pipeline/plugin/PluginRegistry.h"
 #include "unittest/Unittest.h"
 
@@ -33,29 +33,31 @@ class ConfigWatcherUnittest : public testing::Test {
 
 protected:
     void SetUp() override {
-        ConfigWatcher::GetInstance()->AddSource(configDir.string());
+        PipelineConfigWatcher::GetInstance()->AddSource(configDir.string());
         InstanceConfigWatcher::GetInstance()->AddSource(instanceConfigDir.string());
     }
 
-    void TearDown() override { ConfigWatcher::GetInstance()->ClearEnvironment(); }
+    void TearDown() override { PipelineConfigWatcher::GetInstance()->ClearEnvironment(); }
 
 private:
     static const filesystem::path configDir;
     static const filesystem::path instanceConfigDir;
 };
 
-const filesystem::path ConfigWatcherUnittest::configDir = "./pipeline_config";
+const filesystem::path ConfigWatcherUnittest::configDir = "./continuous_pipeline_config";
 const filesystem::path ConfigWatcherUnittest::instanceConfigDir = "./instance_config";
 
 void ConfigWatcherUnittest::InvalidConfigDirFound() const {
     {
-        PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        APSARA_TEST_TRUE(diff.IsEmpty());
-
-        { ofstream fout("config"); }
-        diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        APSARA_TEST_TRUE(diff.IsEmpty());
-        filesystem::remove_all("config");
+        auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        APSARA_TEST_TRUE(diff.first.IsEmpty());
+        APSARA_TEST_TRUE(diff.second.IsEmpty());
+
+        { ofstream fout("continuous_pipeline_config"); }
+        diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        APSARA_TEST_TRUE(diff.first.IsEmpty());
+        APSARA_TEST_TRUE(diff.second.IsEmpty());
+        filesystem::remove_all("continuous_pipeline_config");
     }
     {
         InstanceConfigDiff diff = InstanceConfigWatcher::GetInstance()->CheckConfigDiff();
@@ -79,8 +81,9 @@ void ConfigWatcherUnittest::InvalidConfigFileFound() const {
             ofstream fout(configDir / "invalid_format.json");
             fout << "[}";
         }
-        PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        APSARA_TEST_TRUE(diff.IsEmpty());
+        auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        APSARA_TEST_TRUE(diff.first.IsEmpty());
+        APSARA_TEST_TRUE(diff.second.IsEmpty());
         filesystem::remove_all(configDir);
     }
     {
@@ -102,8 +105,8 @@ void ConfigWatcherUnittest::InvalidConfigFileFound() const {
 void ConfigWatcherUnittest::DuplicateConfigs() const {
     {
         PluginRegistry::GetInstance()->LoadPlugins();
-        ConfigWatcher::GetInstance()->AddSource("dir1");
-        ConfigWatcher::GetInstance()->AddSource("dir2");
+        PipelineConfigWatcher::GetInstance()->AddSource("dir1");
+        PipelineConfigWatcher::GetInstance()->AddSource("dir2");
 
         filesystem::create_directories("config");
         filesystem::create_directories("dir1");
@@ -127,9 +130,9 @@ void ConfigWatcherUnittest::DuplicateConfigs() const {
         )";
         }
         { ofstream fout("dir2/config.json"); }
-        PipelineConfigDiff diff = ConfigWatcher::GetInstance()->CheckConfigDiff();
-        APSARA_TEST_FALSE(diff.IsEmpty());
-        APSARA_TEST_EQUAL(1U, diff.mAdded.size());
+        auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff();
+        APSARA_TEST_FALSE(diff.first.IsEmpty());
+        APSARA_TEST_EQUAL(1U, diff.first.mAdded.size());
 
         filesystem::remove_all("dir1");
         filesystem::remove_all("dir2");
diff --git a/core/unittest/config/TaskConfigUnittest.cpp b/core/unittest/config/TaskConfigUnittest.cpp
new file mode 100644
index 0000000000..658b459914
--- /dev/null
+++ b/core/unittest/config/TaskConfigUnittest.cpp
@@ -0,0 +1,151 @@
+// Copyright 2023 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <json/json.h>
+
+#include <memory>
+#include <string>
+
+#include "common/JsonUtil.h"
+#include "config/TaskConfig.h"
+#include "task_pipeline/TaskRegistry.h"
+#include "unittest/Unittest.h"
+#include "unittest/plugin/PluginMock.h"
+
+using namespace std;
+
+namespace logtail {
+
+class TaskConfigUnittest : public testing::Test {
+public:
+    void HandleValidConfig() const;
+    void HandleInvalidCreateTime() const;
+    void HandleInvalidTask() const;
+
+protected:
+    static void SetUpTestCase() { LoadTaskMock(); }
+    static void TearDownTestCase() { TaskRegistry::GetInstance()->UnloadPlugins(); }
+
+private:
+    const string configName = "test";
+};
+
+void TaskConfigUnittest::HandleValidConfig() const {
+    unique_ptr<Json::Value> configJson;
+    string configStr, errorMsg;
+    unique_ptr<TaskConfig> config;
+
+    configStr = R"(
+        {
+            "createTime": 1234567890,
+            "task": {
+                "Type": "task_mock"
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_TRUE(config->Parse());
+    APSARA_TEST_EQUAL(configName, config->mName);
+    APSARA_TEST_EQUAL(1234567890U, config->mCreateTime);
+}
+
+void TaskConfigUnittest::HandleInvalidCreateTime() const {
+    unique_ptr<Json::Value> configJson;
+    string configStr, errorMsg;
+    unique_ptr<TaskConfig> config;
+
+    configStr = R"(
+        {
+            "createTime": "1234567890",
+            "task": {
+                "Type": "task_mock"
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_TRUE(config->Parse());
+    APSARA_TEST_EQUAL(0U, config->mCreateTime);
+}
+
+void TaskConfigUnittest::HandleInvalidTask() const {
+    unique_ptr<Json::Value> configJson;
+    string configStr, errorMsg;
+    unique_ptr<TaskConfig> config;
+
+    // task is not of type object
+    configStr = R"(
+        {
+            "createTime": "123456789",
+            "task": []
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_FALSE(config->Parse());
+
+    // no Type
+    configStr = R"(
+        {
+            "createTime": "123456789",
+            "task": {
+                "Name": "task_mock"
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_FALSE(config->Parse());
+
+    // Type is not of type string
+    configStr = R"(
+        {
+            "createTime": "123456789",
+            "task": {
+                "Type": true
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_FALSE(config->Parse());
+
+    // unsupported Task
+    configStr = R"(
+        {
+            "createTime": "123456789",
+            "task": {
+                "Type": "task_unknown"
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_FALSE(config->Parse());
+}
+
+UNIT_TEST_CASE(TaskConfigUnittest, HandleValidConfig)
+UNIT_TEST_CASE(TaskConfigUnittest, HandleInvalidCreateTime)
+UNIT_TEST_CASE(TaskConfigUnittest, HandleInvalidTask)
+
+} // namespace logtail
+
+UNIT_TEST_MAIN
diff --git a/core/unittest/event/BlockedEventManagerUnittest.cpp b/core/unittest/event/BlockedEventManagerUnittest.cpp
index ec5489e2d7..f30646f905 100644
--- a/core/unittest/event/BlockedEventManagerUnittest.cpp
+++ b/core/unittest/event/BlockedEventManagerUnittest.cpp
@@ -40,6 +40,9 @@ void BlockedEventManagerUnittest::OnFeedback() const {
     APSARA_TEST_EQUAL("dir", res[0]->GetSource());
     APSARA_TEST_EQUAL("file", res[0]->GetObject());
     APSARA_TEST_EQUAL(1U, BlockedEventManager::GetInstance()->mEventMap.size());
+    for (auto* e : res) {
+        delete e;
+    }
 }
 
 UNIT_TEST_CASE(BlockedEventManagerUnittest, OnFeedback)
diff --git a/core/unittest/metadata/K8sMetadataUnittest.cpp b/core/unittest/metadata/K8sMetadataUnittest.cpp
index ba0fe76607..46b077e186 100644
--- a/core/unittest/metadata/K8sMetadataUnittest.cpp
+++ b/core/unittest/metadata/K8sMetadataUnittest.cpp
@@ -170,7 +170,7 @@ class k8sMetadataUnittest : public ::testing::Test {
     })";
         eventGroup.FromJsonString(eventStr);
         eventGroup.AddMetricEvent();
-        LabelingK8sMetadata& processor = *(new LabelingK8sMetadata);
+        LabelingK8sMetadata processor;
         processor.AddLabelToLogGroup(eventGroup);
         EventsContainer& eventsEnd = eventGroup.MutableEvents();
         auto& metricEvent = eventsEnd[0].Cast<MetricEvent>();
diff --git a/core/unittest/models/PipelineEventPtrUnittest.cpp b/core/unittest/models/PipelineEventPtrUnittest.cpp
index cfdd125c4b..1f5b40b05a 100644
--- a/core/unittest/models/PipelineEventPtrUnittest.cpp
+++ b/core/unittest/models/PipelineEventPtrUnittest.cpp
@@ -80,9 +80,10 @@ void PipelineEventPtrUnittest::TestCast() {
 
 void PipelineEventPtrUnittest::TestRelease() {
     auto logUPtr = mEventGroup->CreateLogEvent();
-    auto addr = logUPtr.get();
+    auto* addr = logUPtr.get();
     PipelineEventPtr logEventPtr(std::move(logUPtr), false, nullptr);
     APSARA_TEST_EQUAL_FATAL(addr, logEventPtr.Release());
+    delete addr;
 }
 
 void PipelineEventPtrUnittest::TestCopy() {
diff --git a/core/unittest/models/SpanEventUnittest.cpp b/core/unittest/models/SpanEventUnittest.cpp
index 768097a65b..693538324d 100644
--- a/core/unittest/models/SpanEventUnittest.cpp
+++ b/core/unittest/models/SpanEventUnittest.cpp
@@ -319,13 +319,13 @@ void SpanEventUnittest::TestToJson() {
         "kind": 3,
         "startTimeNs": 1715826723000000000,
         "endTimeNs": 1715826725000000000,
-        "tags": {
+        "attributes": {
             "key1": "value1"
         },
         "events": [
             {
                 "name": "test_event",
-                "timestampNs": 1715826724000000000
+                "timestamp": 1715826724000000000
             }
         ],
         "links": [
@@ -358,13 +358,13 @@ void SpanEventUnittest::TestFromJson() {
         "kind": 3,
         "startTimeNs": 1715826723000000000,
         "endTimeNs": 1715826725000000000,
-        "tags": {
+        "attributes": {
             "key1": "value1"
         },
         "events": [
             {
                 "name": "test_event",
-                "timestampNs": 1715826724000000000
+                "timestamp": 1715826724000000000
             }
         ],
         "links": [
@@ -513,8 +513,8 @@ void InnerEventUnittest::TestToJson() {
     Json::Value eventJson;
     string eventStr = R"({
         "name": "test",
-        "timestampNs": 1715826723000000000,
-        "tags": {
+        "timestamp": 1715826723000000000,
+        "attributes": {
             "key1": "value1"
         }
     })";
@@ -528,8 +528,8 @@ void InnerEventUnittest::TestFromJson() {
     Json::Value eventJson;
     string eventStr = R"({
         "name": "test",
-        "timestampNs": 1715826723000000000,
-        "tags": {
+        "timestamp": 1715826723000000000,
+        "attributes": {
             "key1": "value1"
         }
     })";
@@ -659,7 +659,7 @@ void SpanLinkUnittest::TestToJson() {
         "traceId": "test_trace_id",
         "spanId": "test_span_id",
         "traceState": "normal",
-        "tags": {
+        "attributes": {
             "key1": "value1"
         }
     })";
@@ -675,7 +675,7 @@ void SpanLinkUnittest::TestFromJson() {
         "traceId": "test_trace_id",
         "spanId": "test_span_id",
         "traceState": "normal",
-        "tags": {
+        "attributes": {
             "key1": "value1"
         }
     })";
diff --git a/core/unittest/pipeline/PipelineUnittest.cpp b/core/unittest/pipeline/PipelineUnittest.cpp
index 8134336fc5..0f9b273a37 100644
--- a/core/unittest/pipeline/PipelineUnittest.cpp
+++ b/core/unittest/pipeline/PipelineUnittest.cpp
@@ -2736,13 +2736,13 @@ void PipelineUnittest::TestSend() const {
         {
             auto flusher
                 = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, pipeline.GenNextPluginMeta(false));
-            flusher->Init(Json::Value(), ctx, tmp);
+            flusher->Init(Json::Value(), ctx, 0, tmp);
             pipeline.mFlushers.emplace_back(std::move(flusher));
         }
         {
             auto flusher
                 = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, pipeline.GenNextPluginMeta(false));
-            flusher->Init(Json::Value(), ctx, tmp);
+            flusher->Init(Json::Value(), ctx, 0, tmp);
             pipeline.mFlushers.emplace_back(std::move(flusher));
         }
         vector<pair<size_t, const Json::Value*>> configs;
@@ -2788,13 +2788,13 @@ void PipelineUnittest::TestSend() const {
         {
             auto flusher
                 = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, pipeline.GenNextPluginMeta(false));
-            flusher->Init(Json::Value(), ctx, tmp);
+            flusher->Init(Json::Value(), ctx, 0, tmp);
             pipeline.mFlushers.emplace_back(std::move(flusher));
         }
         {
             auto flusher
                 = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, pipeline.GenNextPluginMeta(false));
-            flusher->Init(Json::Value(), ctx, tmp);
+            flusher->Init(Json::Value(), ctx, 0, tmp);
             pipeline.mFlushers.emplace_back(std::move(flusher));
         }
 
@@ -2855,13 +2855,13 @@ void PipelineUnittest::TestFlushBatch() const {
     {
         auto flusher
             = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, pipeline.GenNextPluginMeta(false));
-        flusher->Init(Json::Value(), ctx, tmp);
+        flusher->Init(Json::Value(), ctx, 0, tmp);
         pipeline.mFlushers.emplace_back(std::move(flusher));
     }
     {
         auto flusher
             = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, pipeline.GenNextPluginMeta(false));
-        flusher->Init(Json::Value(), ctx, tmp);
+        flusher->Init(Json::Value(), ctx, 0, tmp);
         pipeline.mFlushers.emplace_back(std::move(flusher));
     }
     {
diff --git a/core/unittest/plugin/FlusherInstanceUnittest.cpp b/core/unittest/plugin/FlusherInstanceUnittest.cpp
index b6fcc33633..800529df70 100644
--- a/core/unittest/plugin/FlusherInstanceUnittest.cpp
+++ b/core/unittest/plugin/FlusherInstanceUnittest.cpp
@@ -46,7 +46,7 @@ void FlusherInstanceUnittest::TestInit() const {
         = make_unique<FlusherInstance>(new FlusherMock(), PluginInstance::PluginMeta("0"));
     Json::Value config, opt;
     PipelineContext context;
-    APSARA_TEST_TRUE(flusher->Init(config, context, opt));
+    APSARA_TEST_TRUE(flusher->Init(config, context, 0, opt));
     APSARA_TEST_EQUAL(&context, &flusher->GetPlugin()->GetContext());
 }
 
diff --git a/core/unittest/plugin/FlusherUnittest.cpp b/core/unittest/plugin/FlusherUnittest.cpp
index 199ebfe306..a3deca71a1 100644
--- a/core/unittest/plugin/FlusherUnittest.cpp
+++ b/core/unittest/plugin/FlusherUnittest.cpp
@@ -39,12 +39,12 @@ void FlusherUnittest::TestStop() const {
     auto ctx = PipelineContext();
     ctx.SetConfigName("test_config");
 
-    FlusherMock* mock = new FlusherMock();
+    auto mock = make_unique<FlusherMock>();
     mock->SetContext(ctx);
     Json::Value tmp;
     mock->Init(Json::Value(), tmp);
 
-    auto q = SenderQueueManager::GetInstance()->GetQueue(mock->GetQueueKey());
+    auto* q = SenderQueueManager::GetInstance()->GetQueue(mock->GetQueueKey());
     // push items to queue
     for (size_t i = 0; i < q->mCapacity; ++i) {
         auto item = make_unique<SenderQueueItem>("content", 0, nullptr, mock->GetQueueKey());
diff --git a/core/unittest/plugin/PluginMock.h b/core/unittest/plugin/PluginMock.h
index 0713b08bcf..dbc9072ed7 100644
--- a/core/unittest/plugin/PluginMock.h
+++ b/core/unittest/plugin/PluginMock.h
@@ -28,6 +28,8 @@
 #include "pipeline/plugin/interface/Input.h"
 #include "pipeline/plugin/interface/Processor.h"
 #include "pipeline/queue/SenderQueueManager.h"
+#include "task_pipeline/Task.h"
+#include "task_pipeline/TaskRegistry.h"
 
 namespace logtail {
 
@@ -138,6 +140,25 @@ class FlusherHttpMock : public HttpFlusher {
 
 const std::string FlusherHttpMock::sName = "flusher_http_mock";
 
+class TaskMock : public Task {
+public:
+    static const std::string sName;
+
+    const std::string& Name() const override { return sName; }
+    bool Init(const Json::Value& config) override {
+        if (config.isMember("Valid")) {
+            return config["Valid"].asBool();
+        }
+        return true;
+    }
+    void Start() override { mIsRunning = true; }
+    void Stop(bool isRemoving) { mIsRunning = false; }
+
+    bool mIsRunning = false;
+};
+
+const std::string TaskMock::sName = "task_mock";
+
 void LoadPluginMock() {
     PluginRegistry::GetInstance()->RegisterInputCreator(new StaticInputCreator<InputMock>());
     PluginRegistry::GetInstance()->RegisterProcessorCreator(new StaticProcessorCreator<ProcessorInnerMock>());
@@ -146,4 +167,8 @@ void LoadPluginMock() {
     PluginRegistry::GetInstance()->RegisterFlusherCreator(new StaticFlusherCreator<FlusherHttpMock>());
 }
 
+void LoadTaskMock() {
+    TaskRegistry::GetInstance()->RegisterCreator(TaskMock::sName, []() { return std::make_unique<TaskMock>(); });
+}
+
 } // namespace logtail
diff --git a/core/unittest/polling/CMakeLists.txt b/core/unittest/polling/CMakeLists.txt
index 82b1b54a17..23843d1891 100644
--- a/core/unittest/polling/CMakeLists.txt
+++ b/core/unittest/polling/CMakeLists.txt
@@ -16,4 +16,10 @@ cmake_minimum_required(VERSION 3.22)
 project(polling_unittest)
 
 # add_executable(polling_unittest PollingUnittest.cpp)
-# target_link_libraries(polling_unittest ${UT_BASE_TARGET})
\ No newline at end of file
+# target_link_libraries(polling_unittest ${UT_BASE_TARGET})
+
+add_executable(polling_preserved_dir_depth_unittest PollingPreservedDirDepthUnittest.cpp)
+target_link_libraries(polling_preserved_dir_depth_unittest ${UT_BASE_TARGET})
+
+include(GoogleTest)
+gtest_discover_tests(polling_preserved_dir_depth_unittest)
\ No newline at end of file
diff --git a/core/unittest/polling/PollingPreservedDirDepthUnittest.cpp b/core/unittest/polling/PollingPreservedDirDepthUnittest.cpp
new file mode 100644
index 0000000000..1e860271bd
--- /dev/null
+++ b/core/unittest/polling/PollingPreservedDirDepthUnittest.cpp
@@ -0,0 +1,337 @@
+
+#include <json/json.h>
+
+#include <chrono> // Include the <chrono> header for sleep_for
+#include <thread> // Include the <thread> header for this_thread
+
+#include "application/Application.h"
+#include "common/Flags.h"
+#include "common/JsonUtil.h"
+#include "file_server/EventDispatcher.h"
+#include "file_server/event_handler/LogInput.h"
+#include "file_server/polling/PollingDirFile.h"
+#include "file_server/polling/PollingEventQueue.h"
+#include "file_server/polling/PollingModify.h"
+#include "pipeline/PipelineManager.h"
+#include "pipeline/plugin/PluginRegistry.h"
+#include "runner/FlusherRunner.h"
+#include "runner/ProcessorRunner.h"
+#include "unittest/Unittest.h"
+
+using namespace std;
+
+DECLARE_FLAG_INT32(default_max_inotify_watch_num);
+DECLARE_FLAG_BOOL(enable_polling_discovery);
+DECLARE_FLAG_INT32(check_timeout_interval);
+DECLARE_FLAG_INT32(log_input_thread_wait_interval);
+DECLARE_FLAG_INT32(check_not_exist_file_dir_round);
+DECLARE_FLAG_INT32(polling_check_timeout_interval);
+
+namespace logtail {
+
+struct TestVector {
+    string mConfigInputDir;
+    string mTestDir0;
+    string mTestDir1;
+    int mPreservedDirDepth;
+    bool mLetTimeoutBefore2ndWriteTestFile0;
+    // expected results
+    bool mCollectTestFile1stWrite;
+    bool mCollectTestFile2ndWrite;
+    bool mCollectTestFile3rdWrite;
+    bool mCollectTestFile2;
+};
+
+// clang-format off
+// |  用例  |  PreservedDirDepth  |  Path  |  第一次文件和目录变化  |  预期采集结果  |  第二次变化时间  |  第二次文件和目录变化  |  预期采集结果  |  第三次变化时间  |  第三次文件和目录变化  |  预期采集结果 /var/log/0/0.log  |  预期采集结果 /var/log/1/0.log  |
+// | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
+// |  0  |  0  |  /var/log  |  /var/log/app/0/0.log  |  采集  |  <timeout  |  在原有文件上追加数据  |  采集  |  \>timeout  |  /var/log/app/1/0.log  |  不采集  |  采集  |
+// |  1  |  0  |  /var/\*/log  |  /var/app/log/0/0.log  |  采集  |  \>timeout  |  在原有文件上追加数据  |  不采集  |  \>timeout  |  /var/app/log/1/0.log  |  不采集  |  采集  |
+// |  2  |  1  |  /var/log  |  /var/app/log/0/0.log  |  不采集  |  <timeout  |  在原有文件上追加数据  |  不采集  |  \>timeout  |  /var/app/log/1/0.log  |  不采集  |  不采集  |
+// |  3  |  0  |  /var/log  |  /var/log/0/0.log  |  采集  |  <timeout  |  在原有文件上追加数据  |  采集  |  \>timeout  |  /var/log/1/0.log  |  不采集  |  采集  |
+// |  4  |  1  |  /var/\*/log  |  /var/log/0/0.log  |  不采集  |  \>timeout  |  在原有文件上追加数据  |  不采集  |  \>timeout  |  /var/log/1/0.log  |  不采集  |  不采集  |
+// |  5  |  1  |  /var/\*/log  |  /var/app/log/0/0.log  |  采集  |  \>timeout  |  在原有文件上追加数据  |  采集  |  \>timeout  |  /var/app/log/1/0.log  |  采集  |  采集  |
+// |  6  |  0  |  /var/log  |  /var/log/app/0/0.log  |  采集  |  <timeout  |  在原有文件上追加数据  |  采集  |  \>timeout  |  /var/log/app/0/1/0.log  |  不采集  |  不采集  |
+// clang-format on
+
+class PollingPreservedDirDepthUnittest : public ::testing::Test {
+    static std::string gRootDir;
+    static std::string gCheckpoint;
+    static vector<TestVector> gTestMatrix;
+
+public:
+    static void SetUpTestCase() {
+        gRootDir = GetProcessExecutionDir() + "var" + PATH_SEPARATOR;
+        gTestMatrix = {
+            {"log", "log/app/0", "log/app/1", 0, false, true, true, false, true},
+            {"*/log", "app/log/0", "app/log/1", 0, true, true, false, false, true},
+            {"log", "app/log/0", "app/log/1", 1, false, false, false, false, false},
+            {"log", "log/0", "log/1", 0, false, true, true, false, true},
+            {"*/log", "log/0", "log/1", 1, true, false, false, false, false},
+            {"*/log", "app/log/0", "app/log/1", 1, true, true, true, true, true},
+            {"log", "log/app/0", "log/app/0/1", 0, false, true, true, false, false},
+        };
+
+        sLogger->set_level(spdlog::level::trace);
+        srand(time(nullptr));
+        INT32_FLAG(default_max_inotify_watch_num) = 0;
+        BOOL_FLAG(enable_polling_discovery) = false; // we will call poll manually
+        INT32_FLAG(timeout_interval) = 1;
+        INT32_FLAG(check_timeout_interval) = 0;
+        INT32_FLAG(check_not_exist_file_dir_round) = 1;
+        INT32_FLAG(polling_check_timeout_interval) = 0;
+        AppConfig::GetInstance()->mCheckPointFilePath = GetProcessExecutionDir() + gCheckpoint;
+        if (bfs::exists(AppConfig::GetInstance()->mCheckPointFilePath)) {
+            bfs::remove_all(AppConfig::GetInstance()->mCheckPointFilePath);
+        }
+        LoongCollectorMonitor::GetInstance()->Init();
+        FlusherRunner::GetInstance()->Init(); // reference: Application::Start
+        PluginRegistry::GetInstance()->LoadPlugins();
+        ProcessorRunner::GetInstance()->Init();
+        PipelineManager::GetInstance();
+        FileServer::GetInstance()->Start();
+        PollingDirFile::GetInstance()->Start();
+        PollingModify::GetInstance()->Start();
+        PollingModify::GetInstance()->Stop();
+        PollingDirFile::GetInstance()->Stop();
+        PollingDirFile::GetInstance()->mRuningFlag = true;
+        PollingModify::GetInstance()->mRuningFlag = true;
+    }
+
+    static void TearDownTestCase() {
+        PollingDirFile::GetInstance()->mRuningFlag = false;
+        PollingModify::GetInstance()->mRuningFlag = false;
+        Application::GetInstance()->Exit();
+    }
+
+    void SetUp() override {
+        if (bfs::exists(AppConfig::GetInstance()->mCheckPointFilePath)) {
+            bfs::remove_all(AppConfig::GetInstance()->mCheckPointFilePath);
+        }
+        if (bfs::exists(gRootDir)) {
+            bfs::remove_all(gRootDir);
+        }
+        bfs::create_directories(gRootDir);
+    }
+
+    void TearDown() override {
+        FileServer::GetInstance()->Pause();
+        for (auto& p : PipelineManager::GetInstance()->mPipelineNameEntityMap) {
+            p.second->Stop(true);
+        }
+        PipelineManager::GetInstance()->mPipelineNameEntityMap.clear();
+        // EventDispatcher::GetInstance()->CleanEnviroments();
+        // ConfigManager::GetInstance()->CleanEnviroments();
+        PollingDirFile::GetInstance()->ClearCache();
+        PollingModify::GetInstance()->ClearCache();
+        CheckPointManager::Instance()->RemoveAllCheckPoint();
+        // PollingEventQueue::GetInstance()->Clear();
+        bfs::remove_all(gRootDir);
+        if (bfs::exists(AppConfig::GetInstance()->mCheckPointFilePath)) {
+            bfs::remove_all(AppConfig::GetInstance()->mCheckPointFilePath);
+        }
+        FileServer::GetInstance()->Resume();
+    }
+
+private:
+    unique_ptr<Json::Value> createPipelineConfig(const string& filePath, int preservedDirDepth) {
+        const char* confCstr = R"({
+            "inputs": [
+                {
+                    "Type": "input_file",
+                    "FilePaths": ["/var/log/**/0.log"],
+                    "MaxDirSearchDepth": 2,
+                    "PreservedDirDepth": -1
+                }
+            ],
+            "flushers": [
+                {
+                    "Type": "flusher_blackhole"
+                }
+            ]
+        })";
+        unique_ptr<Json::Value> conf(new Json::Value(Json::objectValue));
+        string errorMsg;
+        ParseJsonTable(confCstr, *conf, errorMsg);
+        auto& input = (*conf)["inputs"][0];
+        input["FilePaths"][0] = filePath;
+        input["PreservedDirDepth"] = preservedDirDepth;
+        return conf;
+    }
+
+    void generateLog(const string& testFile) {
+        LOG_DEBUG(sLogger, ("Generate log", testFile));
+        auto pos = testFile.rfind(PATH_SEPARATOR);
+        auto dir = testFile.substr(0, pos);
+        bfs::create_directories(dir);
+        ofstream ofs(testFile, std::ios::app);
+        ofs << "0\n";
+    }
+
+    bool isFileDirRegistered(const string& testFile) {
+        auto pos = testFile.rfind(PATH_SEPARATOR);
+        auto dir = testFile.substr(0, pos);
+        auto registerStatus = EventDispatcher::GetInstance()->IsDirRegistered(dir);
+        return registerStatus == DirRegisterStatus::PATH_INODE_REGISTERED;
+    }
+
+    void testPollingDirFile(const TestVector& testVector) {
+        auto configInputFilePath
+            = gRootDir + testVector.mConfigInputDir + PATH_SEPARATOR + "**" + PATH_SEPARATOR + "0.log";
+        auto testFile1 = gRootDir + testVector.mTestDir0 + PATH_SEPARATOR + "0.log";
+        auto testFile2 = gRootDir + testVector.mTestDir1 + PATH_SEPARATOR + "0.log";
+        FileServer::GetInstance()->Pause();
+        auto configJson = createPipelineConfig(configInputFilePath, testVector.mPreservedDirDepth);
+        PipelineConfig pipelineConfig("polling", std::move(configJson));
+        APSARA_TEST_TRUE_FATAL(pipelineConfig.Parse());
+        auto p = PipelineManager::GetInstance()->BuildPipeline(
+            std::move(pipelineConfig)); // reference: PipelineManager::UpdatePipelines
+        APSARA_TEST_FALSE_FATAL(p.get() == nullptr);
+        PipelineManager::GetInstance()->mPipelineNameEntityMap[pipelineConfig.mName] = p;
+        p->Start();
+        FileServer::GetInstance()->Resume();
+
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        std::this_thread::sleep_for(std::chrono::microseconds(
+            10 * INT32_FLAG(log_input_thread_wait_interval))); // give enough time to consume event
+
+        // write testFile1 for the 1st time
+        generateLog(testFile1);
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        std::this_thread::sleep_for(std::chrono::microseconds(
+            10 * INT32_FLAG(log_input_thread_wait_interval))); // give enough time to consume event
+        if (testVector.mCollectTestFile1stWrite) {
+            APSARA_TEST_TRUE_FATAL(isFileDirRegistered(testFile1));
+        } else {
+            APSARA_TEST_FALSE_FATAL(isFileDirRegistered(testFile1));
+        }
+
+        if (testVector.mLetTimeoutBefore2ndWriteTestFile0) {
+            std::this_thread::sleep_for(std::chrono::seconds(
+                2
+                * INT32_FLAG(
+                    timeout_interval))); // let timeout happen, must *2 since timeout happen only if time interval > 1s
+        }
+
+        // trigger clean timeout polling cache
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        // write testFile1 for the 2nd time
+        generateLog(testFile1);
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        std::this_thread::sleep_for(std::chrono::microseconds(
+            10 * INT32_FLAG(log_input_thread_wait_interval))); // give enough time to consume event
+        if (testVector.mCollectTestFile2ndWrite) {
+            APSARA_TEST_TRUE_FATAL(isFileDirRegistered(testFile1));
+        } else {
+            APSARA_TEST_FALSE_FATAL(isFileDirRegistered(testFile1));
+        }
+
+        std::this_thread::sleep_for(std::chrono::seconds(
+            2
+            * INT32_FLAG(
+                timeout_interval))); // let timeout happen, must *2 since timeout happen only if time interval > 1s
+
+        // trigger clean timeout polling cache
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        generateLog(testFile1);
+        generateLog(testFile2);
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        std::this_thread::sleep_for(std::chrono::microseconds(
+            10 * INT32_FLAG(log_input_thread_wait_interval))); // give enough time to consume event
+        if (testVector.mCollectTestFile3rdWrite) {
+            APSARA_TEST_TRUE_FATAL(isFileDirRegistered(testFile1));
+        } else {
+            APSARA_TEST_FALSE_FATAL(isFileDirRegistered(testFile1));
+        }
+        if (testVector.mCollectTestFile2) {
+            APSARA_TEST_TRUE_FATAL(isFileDirRegistered(testFile2));
+        } else {
+            APSARA_TEST_FALSE_FATAL(isFileDirRegistered(testFile2));
+        }
+    }
+
+public:
+    void TestPollingDirFile0() { testPollingDirFile(gTestMatrix[0]); }
+    void TestPollingDirFile1() { testPollingDirFile(gTestMatrix[1]); }
+    void TestPollingDirFile2() { testPollingDirFile(gTestMatrix[2]); }
+    void TestPollingDirFile3() { testPollingDirFile(gTestMatrix[3]); }
+    void TestPollingDirFile4() { testPollingDirFile(gTestMatrix[4]); }
+    void TestPollingDirFile5() { testPollingDirFile(gTestMatrix[5]); }
+
+    void TestPollingDirFile6() { testPollingDirFile(gTestMatrix[6]); }
+
+    void TestCheckpoint() {
+        auto configInputFilePath = gRootDir + "log/**/0.log";
+        auto testFile = gRootDir + "log/0/0.log";
+        FileServer::GetInstance()->Pause();
+        auto configJson = createPipelineConfig(configInputFilePath, 0);
+        PipelineConfig pipelineConfig("polling", std::move(configJson));
+        APSARA_TEST_TRUE_FATAL(pipelineConfig.Parse());
+        auto p = PipelineManager::GetInstance()->BuildPipeline(
+            std::move(pipelineConfig)); // reference: PipelineManager::UpdatePipelines
+        APSARA_TEST_FALSE_FATAL(p.get() == nullptr);
+        PipelineManager::GetInstance()->mPipelineNameEntityMap[pipelineConfig.mName] = p;
+        p->Start();
+        FileServer::GetInstance()->Resume();
+
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        std::this_thread::sleep_for(std::chrono::microseconds(
+            10 * INT32_FLAG(log_input_thread_wait_interval))); // give enough time to consume event
+
+        // generate log for testFile1 for the 1st time
+        generateLog(testFile);
+        PollingDirFile::GetInstance()->PollingIteration();
+        PollingModify::GetInstance()->PollingIteration();
+        std::this_thread::sleep_for(std::chrono::microseconds(
+            10 * INT32_FLAG(log_input_thread_wait_interval))); // give enough time to consume event
+        APSARA_TEST_TRUE_FATAL(isFileDirRegistered(testFile));
+
+        // Dump and load checkpoint
+        FileServer::GetInstance()->Pause(true);
+        std::this_thread::sleep_for(std::chrono::seconds(
+            2
+            * INT32_FLAG(
+                timeout_interval))); // let timeout happen, must *2 since timeout happen only if time interval > 1s
+        FileServer::GetInstance()->Resume(true);
+        // Should remain registered after checkpoint
+        APSARA_TEST_TRUE_FATAL(isFileDirRegistered(testFile));
+
+        std::this_thread::sleep_for(std::chrono::seconds(
+            2
+            * INT32_FLAG(
+                timeout_interval))); // let timeout happen, must *2 since timeout happen only if time interval > 1s
+
+        APSARA_TEST_FALSE_FATAL(isFileDirRegistered(testFile));
+        // Dump and load checkpoint
+        FileServer::GetInstance()->Pause(true);
+        FileServer::GetInstance()->Resume(true);
+        // Should remain unregistered after checkpoint
+        APSARA_TEST_FALSE_FATAL(isFileDirRegistered(testFile));
+    }
+};
+
+UNIT_TEST_CASE(PollingPreservedDirDepthUnittest, TestPollingDirFile0);
+UNIT_TEST_CASE(PollingPreservedDirDepthUnittest, TestPollingDirFile1);
+UNIT_TEST_CASE(PollingPreservedDirDepthUnittest, TestPollingDirFile2);
+UNIT_TEST_CASE(PollingPreservedDirDepthUnittest, TestPollingDirFile3);
+UNIT_TEST_CASE(PollingPreservedDirDepthUnittest, TestPollingDirFile4);
+UNIT_TEST_CASE(PollingPreservedDirDepthUnittest, TestPollingDirFile5);
+UNIT_TEST_CASE(PollingPreservedDirDepthUnittest, TestCheckpoint);
+
+std::string PollingPreservedDirDepthUnittest::gRootDir;
+std::string PollingPreservedDirDepthUnittest::gCheckpoint = "checkpoint";
+vector<TestVector> PollingPreservedDirDepthUnittest::gTestMatrix;
+} // namespace logtail
+
+int main(int argc, char** argv) {
+    logtail::Logger::Instance().InitGlobalLoggers();
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/core/unittest/polling/PollingUnittest.cpp b/core/unittest/polling/PollingUnittest.cpp
index fe273688bd..9dd2ea3c1d 100644
--- a/core/unittest/polling/PollingUnittest.cpp
+++ b/core/unittest/polling/PollingUnittest.cpp
@@ -4,9 +4,9 @@
 // you may not use this file except in compliance with the License.
 // You may obtain a copy of the License at
 //
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
+//      ht
+// Unless required by applicable law or agr.h"
+#include "unittest/Unittesteed to in writing, software
 // distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
diff --git a/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp b/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp
index 62bf5e3dc2..c0481a1b94 100644
--- a/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp
+++ b/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp
@@ -85,8 +85,8 @@ void ProcessorParsePrometheusMetricUnittest::TestProcess() {
             if (newLine.empty() || newLine[0] == '#') {
                 continue;
             }
-            auto* MetricEvent = eGroup.AddLogEvent();
-            MetricEvent->SetContent(prometheus::PROMETHEUS, newLine);
+            auto* metricEvent = eGroup.AddRawEvent();
+            metricEvent->SetContent(newLine);
         }
 
         return eGroup;
diff --git a/core/unittest/processor/ProcessorTagNativeUnittest.cpp b/core/unittest/processor/ProcessorTagNativeUnittest.cpp
index 3e784f935e..d634c215c8 100644
--- a/core/unittest/processor/ProcessorTagNativeUnittest.cpp
+++ b/core/unittest/processor/ProcessorTagNativeUnittest.cpp
@@ -14,9 +14,10 @@
 
 #include <cstdlib>
 
-#include "constants/Constants.h"
 #include "config/PipelineConfig.h"
+#include "constants/Constants.h"
 #include "file_server/ConfigManager.h"
+#include "monitor/Monitor.h"
 #include "pipeline/Pipeline.h"
 #include "plugin/processor/inner/ProcessorTagNative.h"
 #include "unittest/Unittest.h"
@@ -34,7 +35,7 @@ class ProcessorTagNativeUnittest : public ::testing::Test {
 protected:
     void SetUp() override {
         mContext.SetConfigName("project##config_0");
-        LogFileProfiler::GetInstance();
+        LoongCollectorMonitor::GetInstance();
 #ifdef __ENTERPRISE__
         EnterpriseConfigProvider::GetInstance()->SetUserDefinedIdSet(std::vector<std::string>{"machine_group"});
 #endif
@@ -102,7 +103,7 @@ void ProcessorTagNativeUnittest::TestProcess() {
         APSARA_TEST_EQUAL_FATAL(eventGroup.GetMetadata(EventGroupMetaKey::LOG_FILE_PATH),
                                 eventGroup.GetTag(LOG_RESERVED_KEY_PATH));
         APSARA_TEST_TRUE_FATAL(eventGroup.HasTag(LOG_RESERVED_KEY_HOSTNAME));
-        APSARA_TEST_EQUAL_FATAL(LogFileProfiler::mHostname, eventGroup.GetTag(LOG_RESERVED_KEY_HOSTNAME));
+        APSARA_TEST_EQUAL_FATAL(LoongCollectorMonitor::mHostname, eventGroup.GetTag(LOG_RESERVED_KEY_HOSTNAME));
 #ifdef __ENTERPRISE__
         APSARA_TEST_TRUE_FATAL(eventGroup.HasTag(LOG_RESERVED_KEY_USER_DEFINED_ID));
         APSARA_TEST_EQUAL_FATAL(EnterpriseConfigProvider::GetInstance()->GetUserDefinedIdSet(),
diff --git a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp
index d51cc4e57e..2c5138768b 100644
--- a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp
+++ b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp
@@ -21,6 +21,7 @@
 #include "common/StringTools.h"
 #include "common/http/HttpResponse.h"
 #include "common/timer/Timer.h"
+#include "models/RawEvent.h"
 #include "prometheus/Constants.h"
 #include "prometheus/async/PromFuture.h"
 #include "prometheus/labels/Labels.h"
@@ -65,8 +66,11 @@ void ScrapeSchedulerUnittest::TestInitscrapeScheduler() {
 }
 
 void ScrapeSchedulerUnittest::TestProcess() {
+    EventPool eventPool{true};
     HttpResponse httpResponse = HttpResponse(
-        new PromMetricResponseBody(), [](void* ptr) { delete static_cast<PromMetricResponseBody*>(ptr); }, PromMetricWriteCallback);
+        new PromMetricResponseBody(&eventPool),
+        [](void* ptr) { delete static_cast<PromMetricResponseBody*>(ptr); },
+        PromMetricWriteCallback);
     Labels labels;
     labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080");
     labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080");
@@ -112,8 +116,11 @@ void ScrapeSchedulerUnittest::TestProcess() {
 }
 
 void ScrapeSchedulerUnittest::TestStreamMetricWriteCallback() {
+    EventPool eventPool{true};
     HttpResponse httpResponse = HttpResponse(
-        new PromMetricResponseBody(), [](void* ptr) { delete static_cast<PromMetricResponseBody*>(ptr); }, PromMetricWriteCallback);
+        new PromMetricResponseBody(&eventPool),
+        [](void* ptr) { delete static_cast<PromMetricResponseBody*>(ptr); },
+        PromMetricWriteCallback);
     Labels labels;
     labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080");
     labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080");
@@ -148,33 +155,27 @@ void ScrapeSchedulerUnittest::TestStreamMetricWriteCallback() {
     auto& res = httpResponse.GetBody<PromMetricResponseBody>()->mEventGroup;
     APSARA_TEST_EQUAL(7UL, res.GetEvents().size());
     APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05",
-                      res.GetEvents()[0].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
+                      res.GetEvents()[0].Cast<RawEvent>().GetContent());
     APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.25\"} 3.9357e-05",
-                      res.GetEvents()[1].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
+                      res.GetEvents()[1].Cast<RawEvent>().GetContent());
     APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.5\"} 4.1114e-05",
-                      res.GetEvents()[2].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
+                      res.GetEvents()[2].Cast<RawEvent>().GetContent());
     APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.75\"} 4.3372e-05",
-                      res.GetEvents()[3].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
+                      res.GetEvents()[3].Cast<RawEvent>().GetContent());
     APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"1\"} 0.000112326",
-                      res.GetEvents()[4].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
-    APSARA_TEST_EQUAL("go_gc_duration_seconds_sum 0.034885631",
-                      res.GetEvents()[5].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
-    APSARA_TEST_EQUAL("go_gc_duration_seconds_count 850",
-                      res.GetEvents()[6].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
+                      res.GetEvents()[4].Cast<RawEvent>().GetContent());
+    APSARA_TEST_EQUAL("go_gc_duration_seconds_sum 0.034885631", res.GetEvents()[5].Cast<RawEvent>().GetContent());
+    APSARA_TEST_EQUAL("go_gc_duration_seconds_count 850", res.GetEvents()[6].Cast<RawEvent>().GetContent());
     // httpResponse.GetBody<MetricResponseBody>()->mEventGroup = PipelineEventGroup(std::make_shared<SourceBuffer>());
     PromMetricWriteCallback(
         body2.data(), (size_t)1, (size_t)body2.length(), (void*)httpResponse.GetBody<PromMetricResponseBody>());
     httpResponse.GetBody<PromMetricResponseBody>()->FlushCache();
     APSARA_TEST_EQUAL(11UL, res.GetEvents().size());
 
-    APSARA_TEST_EQUAL("go_goroutines 7",
-                      res.GetEvents()[7].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
-    APSARA_TEST_EQUAL("go_info{version=\"go1.22.3\"} 1",
-                      res.GetEvents()[8].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
-    APSARA_TEST_EQUAL("go_memstats_alloc_bytes 6.742688e+06",
-                      res.GetEvents()[9].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
-    APSARA_TEST_EQUAL("go_memstats_alloc_bytes_total 1.5159292e+08",
-                      res.GetEvents()[10].Cast<LogEvent>().GetContent(prometheus::PROMETHEUS).to_string());
+    APSARA_TEST_EQUAL("go_goroutines 7", res.GetEvents()[7].Cast<RawEvent>().GetContent());
+    APSARA_TEST_EQUAL("go_info{version=\"go1.22.3\"} 1", res.GetEvents()[8].Cast<RawEvent>().GetContent());
+    APSARA_TEST_EQUAL("go_memstats_alloc_bytes 6.742688e+06", res.GetEvents()[9].Cast<RawEvent>().GetContent());
+    APSARA_TEST_EQUAL("go_memstats_alloc_bytes_total 1.5159292e+08", res.GetEvents()[10].Cast<RawEvent>().GetContent());
 }
 
 void ScrapeSchedulerUnittest::TestReceiveMessage() {
@@ -197,7 +198,8 @@ void ScrapeSchedulerUnittest::TestScheduler() {
     labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080");
     ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0);
     auto timer = make_shared<Timer>();
-    event.SetTimer(timer);
+    EventPool eventPool{true};
+    event.SetComponent(timer, &eventPool);
     event.ScheduleNext();
 
     APSARA_TEST_TRUE(timer->mQueue.size() == 1);
@@ -215,7 +217,8 @@ void ScrapeSchedulerUnittest::TestQueueIsFull() {
     auto defaultLabels = MetricLabels();
     event.InitSelfMonitor(defaultLabels);
     auto timer = make_shared<Timer>();
-    event.SetTimer(timer);
+    EventPool eventPool{true};
+    event.SetComponent(timer, &eventPool);
     auto now = std::chrono::steady_clock::now();
     event.SetFirstExecTime(now);
     event.ScheduleNext();
diff --git a/core/unittest/reader/ForceReadUnittest.cpp b/core/unittest/reader/ForceReadUnittest.cpp
index 6a40c5dc06..db306614ad 100644
--- a/core/unittest/reader/ForceReadUnittest.cpp
+++ b/core/unittest/reader/ForceReadUnittest.cpp
@@ -147,7 +147,7 @@ void ForceReadUnittest::TestTimeoutForceRead() {
         reader.InitReader(true, LogFileReader::BACKWARD_TO_BEGINNING);
         reader.CheckFileSignatureAndOffset(true);
 
-        ModifyHandler* pHanlder = new ModifyHandler(mConfigName, mConfig);
+        auto pHanlder = make_unique<ModifyHandler>(mConfigName, mConfig);
         pHanlder->mReadFileTimeSlice = 0; // force one read for one event
 
         Event e1 = Event(reader.mHostLogPathDir,
@@ -188,7 +188,7 @@ void ForceReadUnittest::TestTimeoutForceRead() {
         std::string expectedPart1(expectedContent.get());
         expectedPart1.resize(expectedPart1.find("\n"));
         LogFileReader::BUFFER_SIZE = expectedPart1.size() + 1;
-        ModifyHandler* pHanlder = new ModifyHandler(mConfigName, mConfig);
+        auto pHanlder = make_unique<ModifyHandler>(mConfigName, mConfig);
         pHanlder->mReadFileTimeSlice = 0; // force one read for one event
 
         Event e1 = Event(reader.mHostLogPathDir,
@@ -230,7 +230,7 @@ void ForceReadUnittest::TestTimeoutForceRead() {
         std::string expectedPart1(expectedContent.get());
         expectedPart1.resize(expectedPart1.find("\n"));
         LogFileReader::BUFFER_SIZE = expectedPart1.size() + 1;
-        ModifyHandler* pHanlder = new ModifyHandler(mConfigName, mConfig);
+        auto pHanlder = make_unique<ModifyHandler>(mConfigName, mConfig);
         pHanlder->mReadFileTimeSlice = 0; // force one read for one event
 
         Event e1 = Event(reader.mHostLogPathDir,
@@ -302,7 +302,7 @@ void ForceReadUnittest::TestFileCloseForceRead() {
         reader.CheckFileSignatureAndOffset(true);
         LogFileReader::BUFFER_SIZE = 1024 * 512;
 
-        ModifyHandler* pHanlder = new ModifyHandler(mConfigName, mConfig);
+        auto pHanlder = make_unique<ModifyHandler>(mConfigName, mConfig);
         pHanlder->mReadFileTimeSlice = 0; // force one read for one event
 
         Event e1 = Event(reader.mHostLogPathDir,
@@ -348,7 +348,7 @@ void ForceReadUnittest::TestAddTimeoutEvent() {
         BlockedEventManager::GetInstance()->mEventMap.clear();
         APSARA_TEST_EQUAL_FATAL(BlockedEventManager::GetInstance()->mEventMap.size(), 0U);
 
-        ModifyHandler* pHanlder = new ModifyHandler(mConfigName, mConfig);
+        auto pHanlder = make_unique<ModifyHandler>(mConfigName, mConfig);
         pHanlder->mReadFileTimeSlice = 0; // force one read for one event
 
         Event e1 = Event(reader.mHostLogPathDir,
@@ -373,7 +373,7 @@ void ForceReadUnittest::TestAddTimeoutEvent() {
         BlockedEventManager::GetInstance()->mEventMap.clear();
         APSARA_TEST_EQUAL_FATAL(BlockedEventManager::GetInstance()->mEventMap.size(), 0U);
 
-        ModifyHandler* pHanlder = new ModifyHandler(mConfigName, mConfig);
+        auto pHanlder = make_unique<ModifyHandler>(mConfigName, mConfig);
         pHanlder->mReadFileTimeSlice = 0; // force one read for one event
 
         Event e1 = Event(reader.mHostLogPathDir,
diff --git a/core/unittest/sender/SenderUnittest.cpp b/core/unittest/sender/SenderUnittest.cpp
index e2ac680333..0f01300bb2 100644
--- a/core/unittest/sender/SenderUnittest.cpp
+++ b/core/unittest/sender/SenderUnittest.cpp
@@ -46,7 +46,6 @@
 #include <vector>
 
 #include "checkpoint/CheckpointManagerV2.h"
-#include "constants/Constants.h"
 #include "common/FileEncryption.h"
 #include "common/FileSystemUtil.h"
 #include "common/Lock.h"
@@ -55,10 +54,11 @@
 #include "common/StringTools.h"
 #include "common/Thread.h"
 #include "common/WaitObject.h"
+#include "constants/Constants.h"
 #include "file_server/event_handler/LogInput.h"
 #include "logger/Logger.h"
-#include "monitor/LogIntegrity.h"
 #include "monitor/AlarmManager.h"
+#include "monitor/LogIntegrity.h"
 #include "protobuf/sls/metric.pb.h"
 #include "protobuf/sls/sls_logs.pb.h"
 #include "runner/ProcessorRunner.h"
@@ -850,7 +850,9 @@ class SenderUnittest : public ::testing::Test {
         sLogger->set_level(spdlog::level::trace);
         printf("Test case setup.\n");
         srand(time(NULL));
-        Sender::Instance()->AddEndpointEntry(STRING_FLAG(default_region_name), STRING_FLAG(logtail_send_address), SLSClientManager::EndpointSourceType::LOCAL);
+        Sender::Instance()->AddEndpointEntry(STRING_FLAG(default_region_name),
+                                             STRING_FLAG(logtail_send_address),
+                                             SLSClientManager::EndpointSourceType::LOCAL);
         STRING_FLAG(profile_project_name) = "sls-admin";
         INT32_FLAG(sls_host_update_interval) = 1;
         INT32_FLAG(logtail_alarm_interval) = 600;
@@ -2246,59 +2248,6 @@ class SenderUnittest : public ::testing::Test {
         LOG_INFO(sLogger, ("TestFlushOut() end", time(NULL)));
     }
 
-    void TestDumpSnapshot() {
-        LOG_INFO(sLogger, ("TestDumpSnapshot() begin", time(NULL)));
-        CaseSetUp();
-        EnableNetWork();
-        LogFileProfiler::GetInstance()->mSendInterval = 3600;
-        LogFileProfiler::GetInstance()->SendProfileData(true); // Update mLastSendTime.
-
-        OneJob(10, gRootDir, "Job", true, time(NULL));
-        WaitForFileBeenRead();
-        sleep(5);
-        LogFileProfiler::GetInstance()->SendProfileData(true);
-        Json::Value root;
-        ParseConfig(STRING_FLAG(logtail_profile_snapshot), root);
-        LOG_INFO(sLogger, ("snapshot", root.toStyledString()));
-        APSARA_TEST_TRUE(root["version"].asString().size() > 1);
-        APSARA_TEST_EQUAL(root["ip"].asString(), LogFileProfiler::mIpAddr);
-        int32_t timeInterval = root["end_time"].asInt64() - root["begin_time"].asInt64();
-#if defined(__linux__)
-        APSARA_TEST_TRUE_DESC(timeInterval >= 5 && timeInterval <= 7, timeInterval);
-#elif defined(_MSC_VER)
-        EXPECT_TRUE(timeInterval >= 5 && timeInterval <= 20);
-#endif
-        APSARA_TEST_EQUAL(root["detail"].size(), 1);
-        Json::Value categoryDetail = root["detail"][0];
-        APSARA_TEST_EQUAL(categoryDetail["project"].asString(), "1000000_proj");
-        APSARA_TEST_EQUAL(categoryDetail["logstore"].asString(), "app_log");
-        APSARA_TEST_EQUAL(StringTo<int64_t>(categoryDetail["split_lines"].asString()), 10);
-        APSARA_TEST_EQUAL(StringTo<int64_t>(categoryDetail["parse_fail_lines"].asString()), 0);
-        APSARA_TEST_TRUE(StringTo<int64_t>(categoryDetail["read_bytes"].asString()) > 0);
-
-        OneJob(100, gRootDir, "Job", true, time(NULL));
-        WaitForFileBeenRead();
-        sleep(5);
-        LogFileProfiler::GetInstance()->SendProfileData(true);
-
-        ParseConfig(STRING_FLAG(logtail_profile_snapshot), root);
-        LOG_INFO(sLogger, ("snapshot", root.toStyledString()));
-        APSARA_TEST_TRUE(root["version"].asString().size() > 1);
-        APSARA_TEST_EQUAL(root["ip"].asString(), LogFileProfiler::mIpAddr);
-        timeInterval = root["end_time"].asInt64() - root["begin_time"].asInt64();
-        APSARA_TEST_TRUE_DESC(timeInterval >= 5 && timeInterval <= 20, timeInterval);
-        APSARA_TEST_EQUAL(root["detail"].size(), 1);
-        categoryDetail = root["detail"][0];
-        APSARA_TEST_EQUAL(categoryDetail["project"].asString(), "1000000_proj");
-        APSARA_TEST_EQUAL(categoryDetail["logstore"].asString(), "app_log");
-        APSARA_TEST_EQUAL(StringTo<int64_t>(categoryDetail["split_lines"].asString()), 100);
-        APSARA_TEST_EQUAL(StringTo<int64_t>(categoryDetail["parse_fail_lines"].asString()), 0);
-        APSARA_TEST_TRUE(StringTo<int64_t>(categoryDetail["read_bytes"].asString()) > 0);
-
-        CaseCleanUp();
-        LOG_INFO(sLogger, ("TestDumpSnapshot() end", time(NULL)));
-    }
-
     void TestMergeByMinute() {
         LOG_INFO(sLogger, ("TestMergeByMinute() begin", time(NULL)));
         CaseSetUp();
diff --git a/core/unittest/serializer/SLSSerializerUnittest.cpp b/core/unittest/serializer/SLSSerializerUnittest.cpp
index 603f95f953..75a36a307d 100644
--- a/core/unittest/serializer/SLSSerializerUnittest.cpp
+++ b/core/unittest/serializer/SLSSerializerUnittest.cpp
@@ -41,6 +41,7 @@ class SLSSerializerUnittest : public ::testing::Test {
     BatchedEvents
     CreateBatchedMetricEvents(bool enableNanosecond, uint32_t nanoTimestamp, bool emptyValue, bool onlyOneTag);
     BatchedEvents CreateBatchedRawEvents(bool enableNanosecond, bool emptyContent);
+    BatchedEvents CreateBatchedSpanEvents();
 
     static unique_ptr<FlusherSLS> sFlusher;
 
@@ -215,6 +216,86 @@ void SLSSerializerUnittest::TestSerializeEventGroup() {
     }
     {
         // span
+        string res, errorMsg;
+        auto events = CreateBatchedSpanEvents();
+        APSARA_TEST_EQUAL(events.mEvents.size(), 1);
+        APSARA_TEST_TRUE(events.mEvents[0]->GetType() == PipelineEvent::Type::SPAN);
+        APSARA_TEST_TRUE(serializer.DoSerialize(std::move(events), res, errorMsg));
+        sls_logs::LogGroup logGroup;
+        APSARA_TEST_TRUE(logGroup.ParseFromString(res));
+        APSARA_TEST_EQUAL(1, logGroup.logs_size());
+        APSARA_TEST_EQUAL(13, logGroup.logs(0).contents_size());
+        // traceid
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(0).key(), "traceId");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(0).value(), "trace-1-2-3-4-5");
+        // span id
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(1).key(), "spanId");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(1).value(), "span-1-2-3-4-5");
+        // parent span id
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(2).key(), "parentSpanId");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(2).value(), "parent-1-2-3-4-5");
+        // spanName
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(3).key(), "spanName");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(3).value(), "/oneagent/qianlu/local/1");
+        // kind
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(4).key(), "kind");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(4).value(), "client");
+        // code
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(5).key(), "statusCode");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(5).value(), "OK");
+        // traceState
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(6).key(), "traceState");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(6).value(), "test-state");
+        // attributes
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(7).key(), "attributes");
+        auto attrs = logGroup.logs(0).contents(7).value();
+        Json::Value jsonVal;
+        Json::CharReaderBuilder readerBuilder;
+        std::string errs;
+
+        std::istringstream s(attrs);
+        bool ret = Json::parseFromStream(readerBuilder, s, &jsonVal, &errs);
+        APSARA_TEST_TRUE(ret);
+        APSARA_TEST_EQUAL(jsonVal.size(), 10);
+        APSARA_TEST_EQUAL(jsonVal["rpcType"].asString(), "25");
+        APSARA_TEST_EQUAL(jsonVal["scope-tag-0"].asString(), "scope-value-0");
+        // APSARA_TEST_EQUAL(logGroup.logs(0).contents(7).value(), "");
+        // links
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(8).key(), "links");
+        
+        auto linksStr = logGroup.logs(0).contents(8).value();
+
+        std::istringstream ss(linksStr);
+        ret = Json::parseFromStream(readerBuilder, ss, &jsonVal, &errs);
+        APSARA_TEST_TRUE(ret);
+        APSARA_TEST_EQUAL(jsonVal.size(), 1);
+        for (auto& link : jsonVal) {
+            APSARA_TEST_EQUAL(link["spanId"].asString(), "inner-link-spanid");
+            APSARA_TEST_EQUAL(link["traceId"].asString(), "inner-link-traceid");
+            APSARA_TEST_EQUAL(link["traceState"].asString(), "inner-link-trace-state");
+        }
+        // events
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(9).key(), "events");
+        auto eventsStr = logGroup.logs(0).contents(9).value();
+        std::istringstream sss(eventsStr);
+        ret = Json::parseFromStream(readerBuilder, sss, &jsonVal, &errs);
+        APSARA_TEST_TRUE(ret);
+        APSARA_TEST_EQUAL(jsonVal.size(), 1);
+        for (auto& event : jsonVal) {
+            APSARA_TEST_EQUAL(event["name"].asString(), "inner-event");
+            APSARA_TEST_EQUAL(event["timestamp"].asString(), "1000");
+        }
+        // start
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(10).key(), "startTime");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(10).value(), "1000");
+
+        // end
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(11).key(), "endTime");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(11).value(), "2000");
+
+        // duration
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(12).key(), "duration");
+        APSARA_TEST_EQUAL(logGroup.logs(0).contents(12).value(), "1000");
     }
     {
         // raw
@@ -391,6 +472,59 @@ BatchedEvents SLSSerializerUnittest::CreateBatchedRawEvents(bool enableNanosecon
     return batch;
 }
 
+BatchedEvents SLSSerializerUnittest::CreateBatchedSpanEvents() {
+    PipelineEventGroup group(make_shared<SourceBuffer>());
+    group.SetTag(LOG_RESERVED_KEY_TOPIC, "topic");
+    group.SetTag(LOG_RESERVED_KEY_SOURCE, "source");
+    group.SetTag(LOG_RESERVED_KEY_MACHINE_UUID, "aaa");
+    group.SetTag(LOG_RESERVED_KEY_PACKAGE_ID, "bbb");   
+    auto now = std::chrono::system_clock::now();
+    auto duration = now.time_since_epoch();
+    auto seconds = std::chrono::duration_cast<std::chrono::seconds>(duration).count();
+    // auto nano = std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count(); 
+    StringBuffer b = group.GetSourceBuffer()->CopyString(string("pack_id"));
+    group.SetMetadataNoCopy(EventGroupMetaKey::SOURCE_ID, StringView(b.data, b.size));
+    group.SetExactlyOnceCheckpoint(RangeCheckpointPtr(new RangeCheckpoint));
+    SpanEvent* spanEvent = group.AddSpanEvent();
+    spanEvent->SetScopeTag(std::string("scope-tag-0"), std::string("scope-value-0"));
+    spanEvent->SetTag(std::string("workloadName"), std::string("arms-oneagent-test-ql"));
+    spanEvent->SetTag(std::string("workloadKind"), std::string("faceless"));
+    spanEvent->SetTag(std::string("source_ip"), std::string("10.54.0.33"));
+    spanEvent->SetTag(std::string("host"), std::string("10.54.0.33"));
+    spanEvent->SetTag(std::string("rpc"), std::string("/oneagent/qianlu/local/1"));
+    spanEvent->SetTag(std::string("rpcType"), std::string("25"));
+    spanEvent->SetTag(std::string("callType"), std::string("http-client"));
+    spanEvent->SetTag(std::string("statusCode"), std::string("200"));
+    spanEvent->SetTag(std::string("version"), std::string("HTTP1.1"));
+    auto innerEvent = spanEvent->AddEvent();
+    innerEvent->SetTag(std::string("innner-event-key-0"), std::string("inner-event-value-0"));
+    innerEvent->SetTag(std::string("innner-event-key-1"), std::string("inner-event-value-1"));
+    innerEvent->SetName("inner-event");
+    innerEvent->SetTimestampNs(1000);
+    auto innerLink = spanEvent->AddLink();
+    innerLink->SetTag(std::string("innner-link-key-0"), std::string("inner-link-value-0"));
+    innerLink->SetTag(std::string("innner-link-key-1"), std::string("inner-link-value-1"));
+    innerLink->SetTraceId("inner-link-traceid");
+    innerLink->SetSpanId("inner-link-spanid");
+    innerLink->SetTraceState("inner-link-trace-state");
+    spanEvent->SetName("/oneagent/qianlu/local/1");
+    spanEvent->SetKind(SpanEvent::Kind::Client);
+    spanEvent->SetStatus(SpanEvent::StatusCode::Ok);
+    spanEvent->SetSpanId("span-1-2-3-4-5");
+    spanEvent->SetTraceId("trace-1-2-3-4-5");
+    spanEvent->SetParentSpanId("parent-1-2-3-4-5");
+    spanEvent->SetTraceState("test-state");
+    spanEvent->SetStartTimeNs(1000);
+    spanEvent->SetEndTimeNs(2000);
+    spanEvent->SetTimestamp(seconds);
+    BatchedEvents batch(std::move(group.MutableEvents()),
+                        std::move(group.GetSizedTags()),
+                        std::move(group.GetSourceBuffer()),
+                        group.GetMetadata(EventGroupMetaKey::SOURCE_ID),
+                        std::move(group.GetExactlyOnceCheckpoint()));
+    return batch;
+}
+
 UNIT_TEST_CASE(SLSSerializerUnittest, TestSerializeEventGroup)
 UNIT_TEST_CASE(SLSSerializerUnittest, TestSerializeEventGroupList)
 
diff --git a/core/unittest/task_pipeline/CMakeLists.txt b/core/unittest/task_pipeline/CMakeLists.txt
new file mode 100644
index 0000000000..b2113a6786
--- /dev/null
+++ b/core/unittest/task_pipeline/CMakeLists.txt
@@ -0,0 +1,30 @@
+# Copyright 2023 iLogtail Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cmake_minimum_required(VERSION 3.22)
+project(task_pipeline_unittest)
+
+add_executable(task_registry_unittest TaskRegistryUnittest.cpp)
+target_link_libraries(task_registry_unittest ${UT_BASE_TARGET})
+
+add_executable(task_pipeline_unittest TaskPipelineUnittest.cpp)
+target_link_libraries(task_pipeline_unittest ${UT_BASE_TARGET})
+
+add_executable(task_pipeline_manager_unittest TaskPipelineManagerUnittest.cpp)
+target_link_libraries(task_pipeline_manager_unittest ${UT_BASE_TARGET})
+
+include(GoogleTest)
+gtest_discover_tests(task_registry_unittest)
+gtest_discover_tests(task_pipeline_unittest)
+gtest_discover_tests(task_pipeline_manager_unittest)
diff --git a/core/unittest/task_pipeline/TaskPipelineManagerUnittest.cpp b/core/unittest/task_pipeline/TaskPipelineManagerUnittest.cpp
new file mode 100644
index 0000000000..3519c250c0
--- /dev/null
+++ b/core/unittest/task_pipeline/TaskPipelineManagerUnittest.cpp
@@ -0,0 +1,40 @@
+// Copyright 2023 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "task_pipeline/TaskPipelineManager.h"
+#include "unittest/Unittest.h"
+
+using namespace std;
+
+namespace logtail {
+
+class TaskPipelineManagerUnittest : public testing::Test {
+public:
+    void TestPipelineManagement() const;
+};
+
+void TaskPipelineManagerUnittest::TestPipelineManagement() const {
+    TaskPipelineManager::GetInstance()->mPipelineNameEntityMap["test1"] = make_unique<TaskPipeline>();
+    TaskPipelineManager::GetInstance()->mPipelineNameEntityMap["test2"] = make_unique<TaskPipeline>();
+
+    APSARA_TEST_EQUAL(2U, TaskPipelineManager::GetInstance()->GetAllPipelineNames().size());
+    APSARA_TEST_NOT_EQUAL(nullptr, TaskPipelineManager::GetInstance()->FindPipelineByName("test1"));
+    APSARA_TEST_EQUAL(nullptr, TaskPipelineManager::GetInstance()->FindPipelineByName("test3"));
+}
+
+UNIT_TEST_CASE(TaskPipelineManagerUnittest, TestPipelineManagement)
+
+} // namespace logtail
+
+UNIT_TEST_MAIN
diff --git a/core/unittest/task_pipeline/TaskPipelineUnittest.cpp b/core/unittest/task_pipeline/TaskPipelineUnittest.cpp
new file mode 100644
index 0000000000..9332784277
--- /dev/null
+++ b/core/unittest/task_pipeline/TaskPipelineUnittest.cpp
@@ -0,0 +1,127 @@
+// Copyright 2023 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <json/json.h>
+
+#include "common/JsonUtil.h"
+#include "task_pipeline/TaskPipeline.h"
+#include "task_pipeline/TaskRegistry.h"
+#include "unittest/Unittest.h"
+#include "unittest/plugin/PluginMock.h"
+
+using namespace std;
+
+namespace logtail {
+
+class TaskPipelineUnittest : public ::testing::Test {
+public:
+    void OnSuccessfulInit() const;
+    void OnFailedInit() const;
+    void OnUpdate() const;
+
+protected:
+    static void SetUpTestCase() { LoadTaskMock(); }
+
+    static void TearDownTestCase() { TaskRegistry::GetInstance()->UnloadPlugins(); }
+
+private:
+    const string configName = "test_config";
+};
+
+void TaskPipelineUnittest::OnSuccessfulInit() const {
+    unique_ptr<Json::Value> configJson;
+    string configStr, errorMsg;
+    unique_ptr<TaskConfig> config;
+    unique_ptr<TaskPipeline> task;
+
+    configStr = R"(
+        {
+            "createTime": 1234567890,
+            "task": {
+                "Type": "task_mock"
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    auto configPtr = configJson.get();
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_TRUE(config->Parse());
+    task.reset(new TaskPipeline());
+    APSARA_TEST_TRUE(task->Init(std::move(*config)));
+    APSARA_TEST_EQUAL(configName, task->Name());
+    APSARA_TEST_EQUAL(configPtr, &task->GetConfig());
+    APSARA_TEST_EQUAL(1234567890U, task->mCreateTime);
+    APSARA_TEST_NOT_EQUAL(nullptr, task->mPlugin);
+    APSARA_TEST_EQUAL(TaskMock::sName, task->mPlugin->Name());
+}
+
+void TaskPipelineUnittest::OnFailedInit() const {
+    unique_ptr<Json::Value> configJson;
+    string configStr, errorMsg;
+    unique_ptr<TaskConfig> config;
+    unique_ptr<TaskPipeline> task;
+
+    configStr = R"(
+        {
+            "createTime": 1234567890,
+            "task": {
+                "Type": "task_mock",
+                "Valid": false
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_TRUE(config->Parse());
+    task.reset(new TaskPipeline());
+    APSARA_TEST_FALSE(task->Init(std::move(*config)));
+}
+
+void TaskPipelineUnittest::OnUpdate() const {
+    unique_ptr<Json::Value> configJson;
+    string configStr, errorMsg;
+    unique_ptr<TaskConfig> config;
+    unique_ptr<TaskPipeline> task;
+
+    configStr = R"(
+        {
+            "createTime": 1234567890,
+            "task": {
+                "Type": "task_mock"
+            }
+        }
+    )";
+    configJson.reset(new Json::Value());
+    APSARA_TEST_TRUE(ParseJsonTable(configStr, *configJson, errorMsg));
+    config.reset(new TaskConfig(configName, std::move(configJson)));
+    APSARA_TEST_TRUE(config->Parse());
+    task.reset(new TaskPipeline());
+    APSARA_TEST_TRUE(task->Init(std::move(*config)));
+
+    auto ptr = static_cast<TaskMock*>(task->mPlugin.get());
+    task->Start();
+    APSARA_TEST_TRUE(ptr->mIsRunning);
+    task->Stop(true);
+    APSARA_TEST_FALSE(ptr->mIsRunning);
+}
+
+UNIT_TEST_CASE(TaskPipelineUnittest, OnSuccessfulInit)
+UNIT_TEST_CASE(TaskPipelineUnittest, OnFailedInit)
+UNIT_TEST_CASE(TaskPipelineUnittest, OnUpdate)
+
+} // namespace logtail
+
+UNIT_TEST_MAIN
diff --git a/core/unittest/task_pipeline/TaskRegistryUnittest.cpp b/core/unittest/task_pipeline/TaskRegistryUnittest.cpp
new file mode 100644
index 0000000000..05e84a1598
--- /dev/null
+++ b/core/unittest/task_pipeline/TaskRegistryUnittest.cpp
@@ -0,0 +1,50 @@
+// Copyright 2024 iLogtail Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <memory>
+
+#include "task_pipeline/TaskRegistry.h"
+#include "unittest/Unittest.h"
+#include "unittest/plugin/PluginMock.h"
+
+using namespace std;
+
+namespace logtail {
+
+class TaskRegistryUnittest : public testing::Test {
+public:
+    void TestCreateTask() const;
+    void TestValidPlugin() const;
+
+protected:
+    void SetUp() override { LoadTaskMock(); }
+    void TearDown() override { TaskRegistry::GetInstance()->UnloadPlugins(); }
+};
+
+void TaskRegistryUnittest::TestCreateTask() const {
+    auto input = TaskRegistry::GetInstance()->CreateTask(TaskMock::sName);
+    APSARA_TEST_NOT_EQUAL(nullptr, input);
+}
+
+void TaskRegistryUnittest::TestValidPlugin() const {
+    APSARA_TEST_TRUE(TaskRegistry::GetInstance()->IsValidPlugin("task_mock"));
+    APSARA_TEST_FALSE(TaskRegistry::GetInstance()->IsValidPlugin("task_unknown"));
+}
+
+UNIT_TEST_CASE(TaskRegistryUnittest, TestCreateTask)
+UNIT_TEST_CASE(TaskRegistryUnittest, TestValidPlugin)
+
+} // namespace logtail
+
+UNIT_TEST_MAIN
diff --git a/docs/cn/SUMMARY.md b/docs/cn/SUMMARY.md
index 037863731a..9a99de227d 100644
--- a/docs/cn/SUMMARY.md
+++ b/docs/cn/SUMMARY.md
@@ -30,6 +30,8 @@
   * [Docker镜像](installation/sources/docker-image.md)
   * [编译依赖](installation/sources/dependencies.md)
 * [镜像站](installation/mirrors.md)
+* [LoongCollector 的 目录结构说明](installation/loongcollector-dir.md)
+* [LoongCollector 的 Logtail 兼容模式使用指南](installation/logtail-mode.md)
 
 ## 概念 <a href="#concepts" id="concepts"></a>
 
@@ -148,10 +150,12 @@
   * [Checkpoint接口](developer-guide/plugin-development/checkpoint-api.md)
   * [Logger接口](developer-guide/plugin-development/logger-api.md)
   * [自监控指标接口](developer-guide/plugin-development/plugin-self-monitor-guide.md)
-  * [如何开发Input插件](developer-guide/plugin-development/how-to-write-input-plugins.md)
-  * [如何开发Processor插件](developer-guide/plugin-development/how-to-write-processor-plugins.md)
-  * [如何开发Aggregator插件](developer-guide/plugin-development/how-to-write-aggregator-plugins.md)
-  * [如何开发Flusher插件](developer-guide/plugin-development/how-to-write-flusher-plugins.md)
+  * [如何开发原生Input插件](developer-guide/plugin-development/how-to-write-native-input-plugins.md)
+  * [如何开发原生Flusher插件](developer-guide/plugin-development/how-to-write-native-flusher-plugins.md)
+  * [如何开发扩展Input插件](developer-guide/plugin-development/how-to-write-input-plugins.md)
+  * [如何开发扩展Processor插件](developer-guide/plugin-development/how-to-write-processor-plugins.md)
+  * [如何开发扩展Aggregator插件](developer-guide/plugin-development/how-to-write-aggregator-plugins.md)
+  * [如何开发扩展Flusher插件](developer-guide/plugin-development/how-to-write-flusher-plugins.md)
   * [如何生成插件文档](developer-guide/plugin-development/how-to-genernate-plugin-docs.md)
   * [插件文档规范](docs/cn/developer-guide/plugin-development/plugin-doc-templete.md)
   * [纯插件模式启动](developer-guide/plugin-development/pure-plugin-start.md)
diff --git a/docs/cn/developer-guide/plugin-development/how-to-write-native-flusher-plugins.md b/docs/cn/developer-guide/plugin-development/how-to-write-native-flusher-plugins.md
new file mode 100644
index 0000000000..031b1ed76c
--- /dev/null
+++ b/docs/cn/developer-guide/plugin-development/how-to-write-native-flusher-plugins.md
@@ -0,0 +1,209 @@
+# 如何开发原生Flusher插件
+
+## 接口定义
+
+```c++
+class Flusher : public Plugin {
+public:
+    // 用于初始化插件参数,同时根据参数初始化Flusher级的组件
+    virtual bool Init(const Json::Value& config, Json::Value& optionalGoPipeline) = 0;
+    virtual bool Start();
+    virtual bool Stop(bool isPipelineRemoving);
+    // 用于将处理插件的输出经过聚合、序列化和压缩处理后,放入发送队列
+    virtual void Send(PipelineEventGroup&& g) = 0;
+    // 用于将聚合组件内指定聚合队列内的数据进行强制发送
+    virtual void Flush(size_t key) = 0;
+    // 用于将聚合组件内的所有数据进行强制发送
+    virtual void FlushAll() = 0;
+};
+```
+
+对于使用Http协议发送数据的Flusher,进一步定义了HttpFlusher,接口如下:
+
+```c++
+class HttpFlusher : public Flusher {
+public:
+    // 用于将待发送数据打包成http请求
+    virtual bool BuildRequest(SenderQueueItem* item, std::unique_ptr<HttpSinkRequest>& req, bool* keepItem) const = 0;
+    // 用于发送完成后进行记录和处理
+    virtual void OnSendDone(const HttpResponse& response, SenderQueueItem* item) = 0;
+};
+```
+
+## Flusher级组件
+
+### 聚合(必选)
+
+* 作用:将多个小的PipelineEventGroup根据tag异同合并成一个大的group,提升发送效率
+
+* 参数:
+
+可以在flusher的参数中配置Batch字段,该字段的类型为map,其中允许包含的字段如下:
+
+|  **名称**  |  **类型**  |  **默认值**  |  **说明**  |
+| --- | --- | --- | --- |
+|  MinCnt  |  uint  |  每个Flusher自定义  |  每个聚合队列最少包含的event数量  |
+|  MinSizeBytes  |  uint  |  每个Flusher自定义  |  每个聚合队列最小的尺寸  |
+|  TimeoutSecs  |  uint  |  每个Flusher自定义  |  每个聚合队列在第一个event加入后,在被输出前最多等待的时间  |
+
+* 类接口:
+
+```c++
+template <typename T = EventBatchStatus>
+class Batcher {
+public:
+    bool Init(const Json::Value& config,
+              Flusher* flusher,
+              const DefaultFlushStrategyOptions& strategy,
+              bool enableGroupBatch = false);
+    void Add(PipelineEventGroup&& g, std::vector<BatchedEventsList>& res);
+    void FlushQueue(size_t key, BatchedEventsList& res);
+    void FlushAll(std::vector<BatchedEventsList>& res);
+}
+```
+
+### 序列化(必选)
+
+* 作用:对聚合模块的输出进行序列化,分为2个层级:
+
+  * event级:对每一个event单独进行序列化
+
+  * event group级:对多个event进行批量的序列化
+
+* 类接口:
+
+```c++
+// T: PipelineEventPtr, BatchedEvents, BatchedEventsList
+template <typename T>
+class Serializer {
+private:
+    virtual bool Serialize(T&& p, std::string& res, std::string& errorMsg) = 0;
+};
+
+using EventSerializer = Serializer<PipelineEventPtr>;
+using EventGroupSerializer = Serializer<BatchedEvents>;
+```
+
+### 压缩(可选)
+
+* 作用:对序列化后的结果进行压缩
+
+* 类接口:
+
+```c++
+class Compressor {
+private:
+    virtual bool Compress(const std::string& input, std::string& output, std::string& errorMsg) = 0;
+};
+```
+
+## 开发步骤
+
+下面以开发一个HttpFlusher为例,说明整个开发步骤:
+
+1. 在plugin/flusher目录下新建一个Flusherxxx.h和Flusherxxx.cpp文件,用于派生HttpFlusher接口生成具体的插件类;
+
+2. 在Flusherxxx.h文件中定义新的输出插件类Flusherxxx,满足以下要求:
+
+   a. 所有的可配置参数的权限为public,其余参数的权限均为private
+
+   b. 新增一个聚合组件:`Batcher<> mBatcher;`
+
+   c. 新增一个序列化组件:`std::unique_ptr<T> mSerializer;`,其中T为`EventSerializer`和`EventGroupSerializer`中的一种
+
+   d. 如果需要压缩,则新增一个压缩组件:`std::unique_ptr<Compressor> mCompressor;`
+
+3. 在pipeline/serializer目录下新建一个xxxSerializer.h和xxxSerializer.cpp文件,用于派生`Serializer` 接口生成具体类;
+
+4. (可选)如果需要压缩组件,且现有压缩组件库中没有所需算法,则新增一个压缩组件:
+
+    a. 在common/compression/CompressType.h文件中,扩展CompressType类用以标识新的压缩算法;
+
+    b. 在common/compression目录下新建一个xxxCompressor.h和xxxCompressor.cpp文件,用于派生`Compressor`接口生成具体类;
+
+    c. 在common/compression/CompressorFactory.cpp文件的各个函数中注册该压缩组件;
+
+5. 在Flusherxxx.cpp文件中实现插件类
+
+    a. `Init`函数:
+
+       i. 根据入参初始化插件,针对非法参数,根据非法程度和影响决定是跳过该参数、使用默认值或直接拒绝加载插件。
+
+       ii. 调用相关函数完成聚合、序列化和压缩组件的初始化
+
+    b. `SerializeAndPush(BatchedEventsList&&)`函数:
+
+    ```c++
+    void Flusherxxx::SerializeAndPush(BatchedEventsList&& groupLists) {
+        // 1. 使用mSerializer->Serialize函数对入参序列化
+        // 2. 如果需要压缩,则使用mCompressor->Compress函数对序列化结果进行压缩
+        // 3. 构建发送队列元素,其中,
+        //   a. data为待发送内容
+        //   b. 如果没用压缩组件,则rawSize=data.size();否则,rawSize为压缩前(序列化后)数据的长度
+        //   c. mLogstoreKey为发送队列的key
+        auto item = make_unique<SenderQueueItem>(std::move(data),
+                                                rawSize,
+                                                this,
+                                                mQueueKey);
+        Flusher::PushToQueue(std::move(item));
+    }
+    ```
+
+    c. `SerializeAndPush(vector<BatchedEventsList>&&)`函数:
+
+        ```c++
+        void Flusherxxx::SerializeAndPush(vector<BatchedEventsList>&& groupLists) {
+            for (auto& groupList : groupLists) {
+                SerializeAndPush(std::move(groupList));
+            }
+        }
+        ```
+
+    d. `Send`函数:
+
+        ```c++
+        void Flusherxxx::Send(PipelineEventGroup&& g) {
+            vector<BatchedEventsList> res;
+            mBatcher.Add(std::move(g), res);
+            SerializeAndPush(std::move(res));
+        }
+        ```
+
+    e. `Flush`函数:
+
+        ```c++
+        void Flusherxxx::Flush(size_t key) {
+            BatchedEventsList res;
+            mBatcher.FlushQueue(key, res);
+            SerializeAndPush(std::move(res));
+        }
+        ```
+
+    f. `FlushAll`函数:
+
+        ```c++
+        void Flusherxxx::FlushAll() {
+            vector<BatchedEventsList> res;
+            mBatcher.FlushAll(res);
+            SerializeAndPush(std::move(res));
+        }
+
+        ```
+
+    g. `BuildRequest`函数:将待发送数据包装成一个Http请求,如果请求构建失败,使用`keepItem`参数记录是否要保留数据供以后重试。
+
+    h. `OnSendDone`函数:根据返回的http response进行相应的记录和操作。
+
+6. 在`PluginRegistry`类中注册该插件:
+
+7. 在pipeline/plugin/PluginRegistry.cpp文件的头文件包含区新增如下行:
+
+    ```c++
+    #include "plugin/flusher/Flusherxxx.h"
+    ```
+
+8. 在`PluginRegistry`类的`LoadStaticPlugins()`函数中新增如下行:
+
+    ```c++
+    RegisterFlusherCreator(new StaticFlusherCreator<Flusherxxx>());
+    ```
diff --git a/docs/cn/developer-guide/plugin-development/how-to-write-native-input-plugins.md b/docs/cn/developer-guide/plugin-development/how-to-write-native-input-plugins.md
new file mode 100644
index 0000000000..505cb40509
--- /dev/null
+++ b/docs/cn/developer-guide/plugin-development/how-to-write-native-input-plugins.md
@@ -0,0 +1,98 @@
+# 如何开发原生Input插件
+
+## 工作模式
+
+同一输入类型的所有插件实例共享同一个线程来获取数据,插件实例只负责保存插件配置。
+
+## 接口定义
+
+```c++
+class Input : public Plugin {
+public:
+    // 初始化插件,入参为插件参数
+    virtual bool Init(const Json::Value& config) = 0;
+    // 负责向管理类注册配置
+    virtual bool Start() = 0;
+    // 负责向管理类注销配置
+    virtual bool Stop(bool isPipelineRemoving) = 0;
+};
+```
+
+## 开发步骤
+
+1. 在plugin/input目录下新建一个Inputxxx.h和Inputxxx.cpp文件,用于派生Input接口生成具体的插件类;
+
+2. 在Inputxxx.h文件中定义新的输入插件类Inputxxx,满足以下规范:
+
+   a. 所有的可配置参数的权限为public,其余参数的权限均为private。
+
+3. 在Inputxxx.cpp文件中实现`Init`函数,即根据入参初始化插件,针对非法参数,根据非法程度和影响决定是跳过该参数、使用默认值或直接拒绝加载插件。
+
+4. 在根目录下新增一个目录,用于创建当前输入插件的管理类及其他辅助类,该管理类需要继承InputRunner接口:
+
+```c++
+class InputRunner {
+public:
+    // 调用点:由插件的Start函数调用
+    // 作用:初始化管理类,并至少启动一个线程用于采集数据
+    // 注意:该函数必须是可重入的,因此需要在函数开头判断是否已经启动线程,如果是则直接退出
+    virtual void Init() = 0;
+    // 调用点:进程退出时,或配置热加载结束后无注册插件时由框架调用
+    // 作用:停止管理类,并进行扫尾工作,如资源回收、checkpoint记录等
+    virtual void Stop() = 0;
+    // 调用点:每次配置热加载结束后由框架调用
+    // 作用:判断是否有插件注册,若无,则框架将调用Stop函数对线程资源进行回收
+    virtual bool HasRegisteredPlugin() const = 0;
+}
+```
+
+管理类是输入插件线程资源的实际拥有者,其最基本的运行流程如下:
+
+- 依次访问每个注册的配置,根据配置情况抓取数据;
+
+- 根据数据类型将源数据转换为PipelineEvent子类中的一种,并将一批数据组装成PipelineEventGroup;
+
+- 将PipelineEventGroup发送到相应配置的处理队列中:
+
+```c++
+ProcessorRunner::GetInstance()->PushQueue(queueKey, inputIdx, std::move(group));
+```
+
+其中,
+
+- queueKey是队列的key,可以从相应流水线的PipelineContext类的`GetProcessQueueKey()`方法来获取。
+
+- inputIdx是当前数据所属输入插件在该流水线所有输入插件的位置(即配置中第几个,从0开始计数)
+
+- group是待发送的数据包
+
+最后,为了支持插件向管理类注册,管理类还需要提供注册和注销函数供插件使用,从性能的角度考虑,**该注册和注销过程应当是独立的,即某个插件的注册和注销不应当影响整个线程的运转**。
+
+5. 在Inputxxx.cpp文件中实现其余接口函数:
+
+    ```c++
+    bool Inputxxx::Start() {
+        // 1. 调用管理类的Start函数
+        // 2. 将当前插件注册到管理类中
+    }
+
+    bool Inputxxx::Stop(bool isPipelineRemoving) {
+        // 将当前插件从管理类中注销
+    }
+    ```
+
+6. 在`PluginRegistry`类中注册该插件:
+
+   a. 在pipeline/plugin/PluginRegistry.cpp文件的头文件包含区新增如下行:
+
+    ```c++
+    #include "plugin/input/Inputxxx.h"
+    ```
+
+   b. 在`PluginRegistry`类的`LoadStaticPlugins()`函数中新增如下行:
+
+    ```c++
+    RegisterInputCreator(new StaticInputCreator<Inputxxx>());
+    ```
+
+   c. 在`PipelineManager`类的构造函数中注册该插件的管理类
diff --git a/docs/cn/installation/logtail-mode.md b/docs/cn/installation/logtail-mode.md
new file mode 100644
index 0000000000..bbc6fa7683
--- /dev/null
+++ b/docs/cn/installation/logtail-mode.md
@@ -0,0 +1,79 @@
+# LoongCollector 的 Logtail 兼容模式使用指南
+
+LoongCollector 提供了 Logtail 兼容模式,可以让您在升级到 LoongCollector 后继续使用原有的 Logtail 配置和数据,实现平滑迁移。本文将详细介绍如何配置和使用这个兼容模式。
+
+> 在开始之前,请先了解 [LoongCollector 的目录结构说明](loongcollector-dir.md)。
+
+## 为什么需要兼容模式?
+
+由于 LoongCollector 采用了新的目录结构和配置体系,与原有 Logtail 存在差异,如果您相关的目录文件升级迁移困难,可以选择使用 Logtail 兼容模式。启用兼容模式后,LoongCollector 将:
+
+- 保持与 Logtail 相同的目录结构
+
+- 继续使用 Logtail 的自定义目录配置方式
+
+- 继续使用 Logtail 的文件命名格式
+
+## 配置方法
+
+### 1. 主机环境配置
+
+您可以通过以下两种方式之一启用兼容模式:
+
+**方式一:命令行参数**
+
+```bash
+./loongcollector --logtail_mode=true
+```
+
+**方式二:环境变量**
+
+```bash
+export logtail_mode=true
+./loongcollector
+```
+
+### 2. 容器环境配置
+
+此前的 Logtail 容器镜像中,Logtail 运行时目录为 `/usr/local/ilogtail`,而 LoongCollector 运行时目录为 `/usr/local/loongcollector`。
+
+因此,在容器环境中,除了启用兼容模式外,还需要调整目录映射。请按照以下步骤操作:
+
+1. 需要给LoongCollector容器添加环境变量:
+
+```bash
+logtail_mode=true
+```
+
+2. 需要调整LoongCollector挂载路径映射:
+
+将所有 `/usr/local/ilogtail` 路径替换为 `/usr/local/loongcollector`:
+
+```plaintext
+# 常用目录映射示例
+数据检查点:
+/usr/local/ilogtail/checkpoint → /usr/local/loongcollector/checkpoint
+
+采集配置目录:
+/usr/local/ilogtail/config → /usr/local/loongcollector/config
+```
+
+3. 修改容器镜像地址为LoongCollector镜像地址
+
+`sls-opensource-registry-vpc.cn-shanghai.cr.aliyuncs.com/loongcollector-community-edition/loongcollector:版本号`
+
+## 迁移建议
+
+为确保平稳迁移,我们建议您:
+
+1. 先在测试环境进行充分验证
+
+2. 选择业务低峰期进行升级
+
+3. 做好配置和数据的备份
+
+4. 逐步迁移,避免一次性升级所有实例
+
+5. 密切监控日志采集状态
+
+> **注意**: 迁移过程中请确保数据完整性,建议先在测试环境中进行测试,并非高峰期进行升级操作。
\ No newline at end of file
diff --git a/docs/cn/installation/loongcollector-dir.md b/docs/cn/installation/loongcollector-dir.md
new file mode 100644
index 0000000000..36d274d626
--- /dev/null
+++ b/docs/cn/installation/loongcollector-dir.md
@@ -0,0 +1,225 @@
+# LoongCollector 的 目录结构说明
+
+## 产品更名说明
+
+作为 2024 年产品规划的重要组成部分,iLogtail 产品将正式更名为 LoongCollector。此次更新主要包含以下变更:
+
+- 程序二进制文件由 iLogtail 更名为 LoongCollector
+
+- 全面优化目录结构和配置文件布局,提供更清晰的组织方式
+
+## 新版目录结构
+
+LoongCollector 采用模块化的分层目录设计,以下展示了安装在 /opt/loongcollector 下的标准目录结构:
+
+库文件:
+
+- `/opt/loongcollector/libPluginAdapter.so`
+
+- `/opt/loongcollector/libPluginBase.so`
+
+自带证书:`/opt/loongcollector/ca-bundle.crt`
+
+**配置文件目录:**`/opt/loongcollector/conf`
+
+日志配置文件:
+
+- `/opt/loongcollector/conf/apsara_log_conf.json`
+
+- `/opt/loongcollector/conf/plugin_logger.xml`
+
+标识配置文件:
+
+- `/opt/loongcollector/conf/user_defined_id`
+
+采集配置文件:`/opt/loongcollector/conf/continuous_pipeline_config`
+
+进程级文件:`/opt/loongcollector/conf/instance_config`
+
+**数据目录:**`/opt/loongcollector/data`
+
+检查点:
+
+- `/opt/loongcollector/data/go_plugin_checkpoint`
+
+- `/opt/loongcollector/data/exactly_once_checkpoint`
+
+- `/opt/loongcollector/data/file_check_point`
+
+容器路径映射:`/opt/loongcollector/data/docker_path_config.json`
+
+未发送数据:`/opt/loongcollector/data/send_buffer_file_xxxxxxxxxxxx`
+
+Crash临时文件:`/opt/loongcollector/data/backtrace.dat`
+
+**日志目录:**`/opt/loongcollector/log`
+
+主要日志:`/opt/loongcollector/log/loongcollector.log`
+
+Go插件日志:`/opt/loongcollector/log/go_plugin.log`
+
+日志库初始化日志:`/opt/loongcollector/log/logger_initialization.log`
+
+Profile日志:`/opt/loongcollector/log/snapshot`
+
+**run目录:**`/opt/loongcollector/run`
+
+Pid文件:`/opt/loongcollector/run/loongcollector.pid`
+
+inotify日志:`/opt/loongcollector/run/inotify_watcher_dirs`
+
+进程信息日志:`/opt/loongcollector/run/app_info.json`
+
+```plaintext
+/
+└── /opt/loongcollector/
+                       ├── loongcollector                 # 主程序
+                       ├── libPluginAdapter.so
+                       ├── libPluginBase.so
+                       ├── ca-bundle.crt
+                       ├── plugins/                       # 插件目录
+                       │      └── custom plugins          # 自定义插件
+                       ├── dump                           # 仅由 service_http_server 输入插件使用
+                       ├── thirdparty/                    # 第三方依赖
+                       │      ├── jvm
+                       │      └── telegraf
+                       ├── conf/                          # 配置目录
+                       │      ├── scripts
+                       │      ├── apsara_log_conf.json
+                       │      ├── plugin_logger.xml
+                       │      ├── user_defined_id
+                       │      ├── authorization.json
+                       │      ├── continuous_pipeline_config/
+                       │      │                 ├── local/
+                       │      │                 │         └── collect_stdout.json
+                       │      │                 └── remote/
+                       │      │                           └── collect_file.json
+                       │      └── instance_config/
+                       │                        ├── local/
+                       │                        │         ├── loongcollector_config.json(loongcollector配置)
+                       │                        │         └── ebpf.json
+                       │                        └── remote/
+                       │                                  ├── region.json
+                       │                                  └── resource.json
+                       ├── data/                                    # 数据目录
+                       │       ├── file_check_point                 # 文件采集的checkpoint
+                       │       ├── exactly_once_checkpoint/
+                       │       ├── go_plugin_checkpoint/            # go插件采集的checkpoint
+                       │       ├── docker_path_config.json
+                       │       ├── send_buffer_file_xxxxxxxxxxxx
+                       │       └── backtrace.dat
+                       ├── log/                                     # 日志目录
+                       │       ├── loongcollector.log
+                       │       ├── loongcollector.log.1
+                       │       ├── go_plugin.log
+                       │       ├── go_plugin.log.1
+                       │       ├── logger_initialization.log
+                       │       └── snapshot/
+                       └── run/
+                               ├── loongcollector.pid
+                               ├── inotify_watcher_dirs
+                               └── app_info.json
+```
+
+## 目录自定义配置
+
+### 支持的自定义目录参数
+
+LoongCollector 提供以下参数用于自定义各类目录位置:
+
+- `loongcollector_conf_dir`: 配置目录
+
+- `loongcollector_log_dir`: 日志目录
+
+- `loongcollector_data_dir`: 数据目录
+
+- `loongcollector_run_dir`: 运行时目录
+
+- `loongcollector_third_party_dir`: 第三方依赖目录
+
+### 配置方式
+
+1. 命令行参数:
+
+```bash
+./loongcollector --loongcollector_conf_dir=/custom/path/conf
+```
+
+2. 环境变量:
+
+```bash
+export loongcollector_conf_dir=/custom/path/conf
+./loongcollector
+```
+
+## 命名变更对照表
+
+为确保命名一致性,我们对以下文件和目录进行了规范化命名:
+
+| 文件/目录作用            | 原命名                  | 新命名                      |
+| ------------------------ | ----------------------- | --------------------------- |
+| agent可观测文件          | logtail_monitor_info    | loongcollector_monitor_info |
+| go插件采集的checkpoint   | checkpoint              | go_plugin_checkpoint        |
+| go插件运行日志           | logtail_plugin.LOG      | go_plugin.LOG               |
+| 采集配置目录名           | config                  | continuous_pipeline_config  |
+| exactly_once的checkpoint | checkpoint_v2           | exactly_once_checkpoint     |
+| agent的发送缓冲buffer文件    | logtail_buffer_file_xxx | send_buffer_file_xxx        |
+| agent可观测文件          | ilogtail_status.LOG     | loongcollector_status.LOG   |
+| agent运行日志            | ilogtail.LOG            | loongcollector.LOG          |
+
+## 配置兼容性说明
+
+为简化配置体系,以下原 Logtail 配置项将不再默认支持:
+
+- sls_observer_ebpf_host_path
+
+- logtail_snapshot_dir
+
+- inotify_watcher_dirs_dump_filename
+
+- local_event_data_file_name
+
+- crash_stack_file_name
+
+- check_point_filename
+
+- adhoc_check_point_file_dir
+
+- app_info_file
+
+- ilogtail_config
+
+- ilogtail_config_env_name
+
+- logtail_sys_conf_dir
+
+- ALIYUN_LOGTAIL_SYS_CONF_DIR
+
+- ilogtail_docker_file_path_config
+
+## 升级建议
+
+1. **兼容模式**: 如需保持与 Logtail 的兼容性,请参考 [LoongCollector 的 Logtail 兼容模式使用指南](logtail-mode.md)
+
+2. **新版迁移**: 如果选择使用新版目录结构:
+   - 建议先备份原有配置和数据
+
+   - 按新版目录结构迁移文件
+
+   - 更新相关配置引用
+
+   - 验证服务正常运行
+
+为确保平稳迁移,我们建议您:
+
+1. 先在测试环境进行充分验证
+
+2. 选择业务低峰期进行升级
+
+3. 做好配置和数据的备份
+
+4. 逐步迁移,避免一次性升级所有实例
+
+5. 密切监控日志采集状态
+
+> **注意**: 迁移过程中请确保数据完整性,建议先在测试环境中进行测试,并非高峰期进行升级操作。
\ No newline at end of file
diff --git a/docs/cn/plugins/input/service-otlp.md b/docs/cn/plugins/input/service-otlp.md
index a47b7b6aad..8ff61e6d85 100644
--- a/docs/cn/plugins/input/service-otlp.md
+++ b/docs/cn/plugins/input/service-otlp.md
@@ -2,7 +2,7 @@
 
 ## 简介
 
-`service_otlp` `input`插件实现了`ServiceInputV1`和`ServiceInputV2`接口,可以接受`Opentelemetry log/metric/trace protocol`的http/gRPC请求,并且转换输出SLSProto或PipelineGroupEvents。目前尚不支持otlp trace转换到SLSProto。
+`service_otlp` `input`插件实现了`ServiceInputV1`和`ServiceInputV2`接口,可以接受`Opentelemetry log/metric/trace protocol`的http/gRPC请求,并且转换输出SLSProto或PipelineGroupEvents。
 
 ## 版本
 
diff --git a/pkg/config/global_config.go b/pkg/config/global_config.go
index 487428fa8f..dbcee71a4b 100644
--- a/pkg/config/global_config.go
+++ b/pkg/config/global_config.go
@@ -38,6 +38,12 @@ type GlobalConfig struct {
 	LoongcollectorDebugDir string
 	// Directory to store loongcollector third party data.
 	LoongcollectorThirdPartyDir string
+	// Log name of loongcollector plugin.
+	LoongcollectorPluginLogName string
+	// Tag of loongcollector version.
+	LoongcollectorVersionTag string
+	// Checkpoint file name of loongcollector plugin.
+	LoongcollectorCheckPointFile string
 	// Network identification from loongcollector.
 	HostIP       string
 	Hostname     string
@@ -59,18 +65,21 @@ var UserAgent = fmt.Sprintf("ilogtail/%v (%v)", BaseVersion, runtime.GOOS) // se
 
 func newGlobalConfig() (cfg GlobalConfig) {
 	cfg = GlobalConfig{
-		InputMaxFirstCollectDelayMs: 10000, // 10s
-		InputIntervalMs:             1000,  // 1s
-		AggregatIntervalMs:          3000,
-		FlushIntervalMs:             3000,
-		DefaultLogQueueSize:         1000,
-		DefaultLogGroupQueueSize:    4,
-		LoongcollectorConfDir:       "./conf/",
-		LoongcollectorLogDir:        "./log/",
-		LoongcollectorDataDir:       "./data/",
-		LoongcollectorDebugDir:      "./debug/",
-		LoongcollectorThirdPartyDir: "./thirdparty/",
-		DelayStopSec:                300,
+		InputMaxFirstCollectDelayMs:  10000, // 10s
+		InputIntervalMs:              1000,  // 1s
+		AggregatIntervalMs:           3000,
+		FlushIntervalMs:              3000,
+		DefaultLogQueueSize:          1000,
+		DefaultLogGroupQueueSize:     4,
+		LoongcollectorConfDir:        "./conf/",
+		LoongcollectorLogDir:         "./log/",
+		LoongcollectorPluginLogName:  "go_plugin.LOG",
+		LoongcollectorVersionTag:     "loongcollector_version",
+		LoongcollectorCheckPointFile: "go_plugin_checkpoint",
+		LoongcollectorDataDir:        "./data/",
+		LoongcollectorDebugDir:       "./debug/",
+		LoongcollectorThirdPartyDir:  "./thirdparty/",
+		DelayStopSec:                 300,
 	}
 	return
 }
diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go
index 802502f41b..6b5e6f6e61 100644
--- a/pkg/logger/logger.go
+++ b/pkg/logger/logger.go
@@ -42,7 +42,7 @@ const (
 	asyncPattern = `
 <seelog type="asynctimer" asyncinterval="500000" minlevel="%s" >
  <outputs formatid="common">
-	 <rollingfile type="size" filename="%sgo_plugin.LOG" maxsize="20000000" maxrolls="10"/>
+	 <rollingfile type="size" filename="%s%s" maxsize="20000000" maxrolls="10"/>
 	 %s
      %s
  </outputs>
@@ -54,7 +54,7 @@ const (
 	syncPattern = `
 <seelog type="sync" minlevel="%s" >
  <outputs formatid="common">
-	 <rollingfile type="size" filename="%sgo_plugin.LOG" maxsize="20000000" maxrolls="10"/>
+	 <rollingfile type="size" filename="%s%s" maxsize="20000000" maxrolls="10"/>
 	 %s
 	 %s
  </outputs>
@@ -325,7 +325,7 @@ func generateDefaultConfig() string {
 	if memoryReceiverFlag {
 		memoryReceiverFlagStr = "<custom name=\"memory\" />"
 	}
-	return fmt.Sprintf(template, levelFlag, config.LoongcollectorGlobalConfig.LoongcollectorLogDir, consoleStr, memoryReceiverFlagStr)
+	return fmt.Sprintf(template, levelFlag, config.LoongcollectorGlobalConfig.LoongcollectorLogDir, config.LoongcollectorGlobalConfig.LoongcollectorPluginLogName, consoleStr, memoryReceiverFlagStr)
 }
 
 // Close the logger and recover the stdout and stderr
diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go
index f3bcc9a0d9..7fc6cb0bd2 100644
--- a/pkg/logger/logger_test.go
+++ b/pkg/logger/logger_test.go
@@ -49,11 +49,11 @@ func init() {
 
 func clean() {
 	_ = os.Remove(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorConfDir, "plugin_logger.xml"))
-	_ = os.Remove(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "go_plugin.LOG"))
+	_ = os.Remove(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorLogDir, config.LoongcollectorGlobalConfig.LoongcollectorPluginLogName))
 }
 
 func readLog(index int) string {
-	bytes, _ := os.ReadFile(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "go_plugin.LOG"))
+	bytes, _ := os.ReadFile(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorLogDir, config.LoongcollectorGlobalConfig.LoongcollectorPluginLogName))
 	logs := strings.Split(string(bytes), "\n")
 	if index > len(logs)-1 {
 		return ""
diff --git a/pkg/logtail/libGoPluginAdapter.so b/pkg/logtail/libGoPluginAdapter.so
index 5c90a1d9aa..7c2d78f120 100755
Binary files a/pkg/logtail/libGoPluginAdapter.so and b/pkg/logtail/libGoPluginAdapter.so differ
diff --git a/pkg/protocol/decoder/opentelemetry/otlpDataToSLSProto.go b/pkg/protocol/decoder/opentelemetry/otlpDataToSLSProto.go
index cde29c88ad..a5adc16357 100644
--- a/pkg/protocol/decoder/opentelemetry/otlpDataToSLSProto.go
+++ b/pkg/protocol/decoder/opentelemetry/otlpDataToSLSProto.go
@@ -427,5 +427,6 @@ func ConvertOtlpTraceRequestV1(otlpTraceReq ptraceotlp.ExportRequest) (logs []*p
 }
 
 func ConvertOtlpTraceV1(otlpTrace ptrace.Traces) (logs []*protocol.Log, err error) {
-	return logs, fmt.Errorf("does_not_support_otlptraces")
+	log, _ := ConvertTrace(otlpTrace)
+	return log, nil
 }
diff --git a/pluginmanager/checkpoint_manager.go b/pluginmanager/checkpoint_manager.go
index b2d1019e49..889fac05b3 100644
--- a/pluginmanager/checkpoint_manager.go
+++ b/pluginmanager/checkpoint_manager.go
@@ -30,7 +30,7 @@ import (
 	"github.com/alibaba/ilogtail/pkg/util"
 )
 
-var CheckPointFile = flag.String("CheckPointFile", "go_plugin_checkpoint", "checkpoint file name, base dir(binary dir)")
+var CheckPointFile = flag.String("CheckPointFile", "", "checkpoint file name, base dir(binary dir)")
 var CheckPointCleanInterval = flag.Int("CheckPointCleanInterval", 600, "checkpoint clean interval, second")
 var MaxCleanItemPerInterval = flag.Int("MaxCleanItemPerInterval", 1000, "max clean items per interval")
 
@@ -89,7 +89,11 @@ func (p *checkPointManager) Init() error {
 	pathExist, err := util.PathExists(logtailDataDir)
 	var dbPath string
 	if err == nil && pathExist {
-		dbPath = filepath.Join(logtailDataDir, *CheckPointFile)
+		if *CheckPointFile != "" {
+			dbPath = filepath.Join(logtailDataDir, *CheckPointFile)
+		} else {
+			dbPath = filepath.Join(logtailDataDir, config.LoongcollectorGlobalConfig.LoongcollectorCheckPointFile)
+		}
 	} else {
 		// c++程序如果这个目录创建失败会直接exit,所以这里一般应该不会走进来
 		logger.Error(context.Background(), "CHECKPOINT_ALARM", "logtailDataDir not exist", logtailDataDir, "err", err)
diff --git a/pluginmanager/plugin_manager.go b/pluginmanager/plugin_manager.go
index 552f8d90ec..5e652e6db0 100644
--- a/pluginmanager/plugin_manager.go
+++ b/pluginmanager/plugin_manager.go
@@ -57,7 +57,7 @@ var alarmConfigJSON = `{
 		"DefaultLogGroupQueueSize": 4,
 		"Tags" : {
 			"base_version" : "` + config.BaseVersion + `",
-			"loongcollector_version" : "` + config.BaseVersion + `"
+			"` + config.LoongcollectorGlobalConfig.LoongcollectorVersionTag + `" : "` + config.BaseVersion + `"
 		}
     },
 	"inputs" : [
@@ -77,7 +77,7 @@ var containerConfigJSON = `{
 		"DefaultLogGroupQueueSize": 4,
 		"Tags" : {
 			"base_version" : "` + config.BaseVersion + `",
-			"loongcollector_version" : "` + config.BaseVersion + `"
+			"` + config.LoongcollectorGlobalConfig.LoongcollectorVersionTag + `" : "` + config.BaseVersion + `"
 		}
     },
 	"inputs" : [
diff --git a/plugins/input/opentelemetry/service_otlp_v1_test.go b/plugins/input/opentelemetry/service_otlp_v1_test.go
index 0f05a6ad67..d08033201e 100644
--- a/plugins/input/opentelemetry/service_otlp_v1_test.go
+++ b/plugins/input/opentelemetry/service_otlp_v1_test.go
@@ -17,7 +17,9 @@ package opentelemetry
 import (
 	"fmt"
 	"net/http"
+	"strconv"
 	"testing"
+	"time"
 
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -203,7 +205,55 @@ func TestOtlpGRPC_Trace_V1(t *testing.T) {
 
 	for i := 0; i < queueSize; i++ {
 		err = exportTraces(cc, GenerateTraces(i+1))
-		assert.Error(t, err, "does_not_support_otlptraces")
+		assert.NoError(t, err)
+	}
+
+	starttm := time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)
+	endtm := time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)
+	traceID := "0102030405060708090a0b0c0d0e0f10"
+	spanID := "1112131415161718"
+	count := 0
+	pos := 0
+	for count < queueSize {
+		count++
+		for i := pos; i < pos+count; i++ {
+			log := collector.Logs[i]
+			assert.Equal(t, int64(log.Time), endtm.Unix())
+			assert.Equal(t, "host", log.Contents[0].Key)
+			assert.Equal(t, "service", log.Contents[1].Key)
+			assert.Equal(t, "resource", log.Contents[2].Key)
+			assert.Equal(t, "{\"resource-attr\":\"resource-attr-val-1\"}", log.Contents[2].Value)
+			assert.Equal(t, "otlp.name", log.Contents[3].Key)
+			assert.Equal(t, "otlp.version", log.Contents[4].Key)
+			assert.Equal(t, "traceID", log.Contents[5].Key)
+			assert.Equal(t, "spanID", log.Contents[6].Key)
+			assert.Equal(t, "parentSpanID", log.Contents[7].Key)
+			assert.Equal(t, "kind", log.Contents[8].Key)
+			assert.Equal(t, "name", log.Contents[9].Key)
+			assert.Equal(t, "links", log.Contents[10].Key)
+			assert.Equal(t, "logs", log.Contents[11].Key)
+			if (i-pos)%2 == 0 {
+				assert.Equal(t, traceID, log.Contents[5].Value)
+				assert.Equal(t, spanID, log.Contents[6].Value)
+				assert.Equal(t, "operationA", log.Contents[9].Value)
+				assert.Equal(t, "[{\"attribute\":{\"span-event-attr\":\"span-event-attr-val\"},\"name\":\"event-with-attr\",\"time\":1581452773000000123},{\"attribute\":{},\"name\":\"event\",\"time\":1581452773000000123}]", log.Contents[11].Value)
+			} else {
+				assert.Equal(t, []byte{}, []byte(log.Contents[5].Value))
+				assert.Equal(t, []byte{}, []byte(log.Contents[6].Value))
+				assert.Equal(t, "operationB", log.Contents[9].Value)
+				assert.Equal(t, "[{\"attribute\":{\"span-link-attr\":\"span-link-attr-val\"},\"spanID\":\"\",\"traceID\":\"\"},{\"attribute\":{},\"spanID\":\"\",\"traceID\":\"\"}]", log.Contents[10].Value)
+			}
+			assert.Equal(t, "traceState", log.Contents[12].Key)
+			assert.Equal(t, "start", log.Contents[13].Key)
+			assert.Equal(t, strconv.FormatInt(starttm.UnixMicro(), 10), log.Contents[13].Value)
+			assert.Equal(t, "end", log.Contents[14].Key)
+			assert.Equal(t, strconv.FormatInt(endtm.UnixMicro(), 10), log.Contents[14].Value)
+			assert.Equal(t, "duration", log.Contents[15].Key)
+			assert.Equal(t, "attribute", log.Contents[16].Key)
+			assert.Equal(t, "statusCode", log.Contents[17].Key)
+			assert.Equal(t, "statusMessage", log.Contents[18].Key)
+		}
+		pos += count
 	}
 }
 
@@ -312,6 +362,54 @@ func TestOtlpHTTP_Trace_V1(t *testing.T) {
 	for i := 0; i < queueSize; i++ {
 		req := ptraceotlp.NewExportRequestFromTraces(GenerateTraces(i + 1))
 		err = httpExport(client, req, url, i%2 == 0)
-		assert.Error(t, err, "does_not_support_otlptraces")
+		assert.NoError(t, err)
+	}
+
+	starttm := time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)
+	endtm := time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)
+	traceID := "0102030405060708090a0b0c0d0e0f10"
+	spanID := "1112131415161718"
+	count := 0
+	pos := 0
+	for count < queueSize {
+		count++
+		for i := pos; i < pos+count; i++ {
+			log := collector.Logs[i]
+			assert.Equal(t, int64(log.Time), endtm.Unix())
+			assert.Equal(t, "host", log.Contents[0].Key)
+			assert.Equal(t, "service", log.Contents[1].Key)
+			assert.Equal(t, "resource", log.Contents[2].Key)
+			assert.Equal(t, "{\"resource-attr\":\"resource-attr-val-1\"}", log.Contents[2].Value)
+			assert.Equal(t, "otlp.name", log.Contents[3].Key)
+			assert.Equal(t, "otlp.version", log.Contents[4].Key)
+			assert.Equal(t, "traceID", log.Contents[5].Key)
+			assert.Equal(t, "spanID", log.Contents[6].Key)
+			assert.Equal(t, "parentSpanID", log.Contents[7].Key)
+			assert.Equal(t, "kind", log.Contents[8].Key)
+			assert.Equal(t, "name", log.Contents[9].Key)
+			assert.Equal(t, "links", log.Contents[10].Key)
+			assert.Equal(t, "logs", log.Contents[11].Key)
+			if (i-pos)%2 == 0 {
+				assert.Equal(t, traceID, log.Contents[5].Value)
+				assert.Equal(t, spanID, log.Contents[6].Value)
+				assert.Equal(t, "operationA", log.Contents[9].Value)
+				assert.Equal(t, "[{\"attribute\":{\"span-event-attr\":\"span-event-attr-val\"},\"name\":\"event-with-attr\",\"time\":1581452773000000123},{\"attribute\":{},\"name\":\"event\",\"time\":1581452773000000123}]", log.Contents[11].Value)
+			} else {
+				assert.Equal(t, []byte{}, []byte(log.Contents[5].Value))
+				assert.Equal(t, []byte{}, []byte(log.Contents[6].Value))
+				assert.Equal(t, "operationB", log.Contents[9].Value)
+				assert.Equal(t, "[{\"attribute\":{\"span-link-attr\":\"span-link-attr-val\"},\"spanID\":\"\",\"traceID\":\"\"},{\"attribute\":{},\"spanID\":\"\",\"traceID\":\"\"}]", log.Contents[10].Value)
+			}
+			assert.Equal(t, "traceState", log.Contents[12].Key)
+			assert.Equal(t, "start", log.Contents[13].Key)
+			assert.Equal(t, strconv.FormatInt(starttm.UnixMicro(), 10), log.Contents[13].Value)
+			assert.Equal(t, "end", log.Contents[14].Key)
+			assert.Equal(t, strconv.FormatInt(endtm.UnixMicro(), 10), log.Contents[14].Value)
+			assert.Equal(t, "duration", log.Contents[15].Key)
+			assert.Equal(t, "attribute", log.Contents[16].Key)
+			assert.Equal(t, "statusCode", log.Contents[17].Key)
+			assert.Equal(t, "statusMessage", log.Contents[18].Key)
+		}
+		pos += count
 	}
 }
diff --git a/scripts/dist.sh b/scripts/dist.sh
index 74fbc777f3..946d6c328f 100755
--- a/scripts/dist.sh
+++ b/scripts/dist.sh
@@ -42,7 +42,7 @@ cp "${ROOTDIR}/${OUT_DIR}/libGoPluginAdapter.so" "${ROOTDIR}/${DIST_DIR}/${PACKA
 cp "${ROOTDIR}/${OUT_DIR}/libGoPluginBase.so" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}"
 mkdir -p "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/conf/instance_config/local/"
 cp "${ROOTDIR}/${OUT_DIR}/conf/instance_config/local/loongcollector_config.json" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/conf/instance_config/local/"
-cp -a "${ROOTDIR}/${OUT_DIR}/conf/pipeline_config/local" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/conf"
+cp -a "${ROOTDIR}/${OUT_DIR}/conf/continuous_pipeline_config/local" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/conf"
 if file "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/loongcollector" | grep x86-64; then ./scripts/download_ebpflib.sh "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}"; fi
 
 # Splitting debug info at build time with -gsplit-dwarf does not work with current gcc version
diff --git a/scripts/gen_build_scripts.sh b/scripts/gen_build_scripts.sh
index f1116b894b..9ad32874d0 100755
--- a/scripts/gen_build_scripts.sh
+++ b/scripts/gen_build_scripts.sh
@@ -122,7 +122,7 @@ function generateCopyScript() {
   fi
   echo 'mkdir -p $BINDIR/conf/instance_config/local/' >>$COPY_SCRIPT_FILE
   echo 'echo -e "{\n}" > $BINDIR/conf/instance_config/local/loongcollector_config.json' >>$COPY_SCRIPT_FILE
-  echo 'mkdir -p $BINDIR/conf/pipeline_config/local' >>$COPY_SCRIPT_FILE
+  echo 'mkdir -p $BINDIR/conf/continuous_pipeline_config/local' >>$COPY_SCRIPT_FILE
   echo 'docker rm -v "$id"' >>$COPY_SCRIPT_FILE
 }
 
diff --git a/test/benchmark/test_cases/performance_file_to_blackhole_ilogtail/docker-compose.yaml b/test/benchmark/test_cases/performance_file_to_blackhole_ilogtail/docker-compose.yaml
index f5f31279dc..47545cfb41 100644
--- a/test/benchmark/test_cases/performance_file_to_blackhole_ilogtail/docker-compose.yaml
+++ b/test/benchmark/test_cases/performance_file_to_blackhole_ilogtail/docker-compose.yaml
@@ -18,7 +18,7 @@ services:
   ilogtailC:
     image: aliyun/loongcollector:0.0.1
     volumes:
-      - ./ilogtail.yaml:/loongcollector/conf/pipeline_config/local/ilogtail.yaml
+      - ./ilogtail.yaml:/loongcollector/conf/continuous_pipeline_config/local/ilogtail.yaml
       - .:/home/ilogtail
     healthcheck:
       test: "cat /loongcollector/log/loongcollector.LOG"
diff --git a/test/benchmark/test_cases/performance_file_to_blackhole_ilogtailspl/docker-compose.yaml b/test/benchmark/test_cases/performance_file_to_blackhole_ilogtailspl/docker-compose.yaml
index f5f31279dc..47545cfb41 100644
--- a/test/benchmark/test_cases/performance_file_to_blackhole_ilogtailspl/docker-compose.yaml
+++ b/test/benchmark/test_cases/performance_file_to_blackhole_ilogtailspl/docker-compose.yaml
@@ -18,7 +18,7 @@ services:
   ilogtailC:
     image: aliyun/loongcollector:0.0.1
     volumes:
-      - ./ilogtail.yaml:/loongcollector/conf/pipeline_config/local/ilogtail.yaml
+      - ./ilogtail.yaml:/loongcollector/conf/continuous_pipeline_config/local/ilogtail.yaml
       - .:/home/ilogtail
     healthcheck:
       test: "cat /loongcollector/log/loongcollector.LOG"
diff --git a/test/engine/setup/dockercompose/compose.go b/test/engine/setup/dockercompose/compose.go
index 4163354568..780df589e5 100644
--- a/test/engine/setup/dockercompose/compose.go
+++ b/test/engine/setup/dockercompose/compose.go
@@ -62,7 +62,7 @@ services:
     pid: host
     volumes:
       - %s:/loongcollector/conf/default_flusher.json
-      - %s:/loongcollector/conf/pipeline_config/local
+      - %s:/loongcollector/conf/continuous_pipeline_config/local
       - /:/logtail_host
       - /var/run/docker.sock:/var/run/docker.sock
       - /sys/:/sys/