Skip to content

Commit

Permalink
update for 3.3 pre-release
Browse files Browse the repository at this point in the history
update for 3.3 pre-release
  • Loading branch information
githubname1024 committed Jun 25, 2024
1 parent 204c594 commit 4c93204
Show file tree
Hide file tree
Showing 1,728 changed files with 875,373 additions and 158,811 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,4 @@
public class EnvCustomUtil {
//是否启用默认密码
public static boolean pwdswitch = false;
//是否开源演示环境
public static boolean openswitch = false;
}
5 changes: 5 additions & 0 deletions cachecloud-web/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,11 @@
<configuration>
<encoding>UTF-8</encoding>
<useDefaultDelimiters>true</useDefaultDelimiters>
<nonFilteredFileExtensions>
<nonFilteredFileExtension>ttf</nonFilteredFileExtension>
<nonFilteredFileExtension>woff</nonFilteredFileExtension>
<nonFilteredFileExtension>woff2</nonFilteredFileExtension>
</nonFilteredFileExtensions>
</configuration>
</plugin>
<plugin>
Expand Down
1,320 changes: 1,320 additions & 0 deletions cachecloud-web/sql/3.3.sql

Large diffs are not rendered by default.

67 changes: 67 additions & 0 deletions cachecloud-web/sql/update 3.2 to 3.3.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
ALTER TABLE instance_config ADD value_type TINYINT(4) DEFAULT 0 NOT NULL COMMENT '取值类型(0:默认值 config_value;1:从主节点拷贝)';

ALTER TABLE app_client_statistic_gather ADD used_disk BIGINT(20) DEFAULT 0 NULL COMMENT '磁盘占用byte';
ALTER TABLE app_client_statistic_gather ADD server_cmd_count bigint(20) DEFAULT 0 NOT NULL COMMENT 'server端统计的命令调用次数';

ALTER TABLE instance_statistics ADD used_disk bigint(255) DEFAULT 0 NOT NULL COMMENT '已使用磁盘,单位byte';

ALTER TABLE app_minute_statistics ADD used_disk bigint(20) DEFAULT 0 NOT NULL COMMENT '磁盘占用(字节)';

ALTER TABLE app_hour_statistics ADD used_disk bigint(20) DEFAULT 0 NOT NULL COMMENT '磁盘占用(字节)';

ALTER TABLE machine_statistics ADD disk_total varchar(120) NULL COMMENT '机器分配磁盘,单位MB';
ALTER TABLE machine_statistics ADD disk_available varchar(120) NULL COMMENT '机器空闲磁盘,单位MB';
ALTER TABLE machine_statistics ADD disk_usage_ratio varchar(15) NULL COMMENT '机器磁盘使用率,百分比(无需乘100)';

ALTER TABLE app_daily ADD avg_used_disk BIGINT(20) NOT NULL COMMENT '平均磁盘使用量';
ALTER TABLE app_daily ADD max_used_disk BIGINT(20) NOT NULL COMMENT '最大磁盘使用量';

ALTER TABLE app_desc ADD persistence_type TINYINT(4) DEFAULT 0 NOT NULL COMMENT '持久化类型(0:常规;1:主aof自动刷盘;从常规;2:主关闭aof,从常规)';

ALTER TABLE app_user ADD biz_id BIGINT(20) DEFAULT NULL COMMENT '所属业务组id(app_biz)';

ALTER TABLE instance_alert_configs ADD app_type TINYINT(4) DEFAULT 0 NOT NULL COMMENT '应用类型(0:redis;)';
ALTER TABLE instance_alert_configs DROP KEY uniq_index;
ALTER TABLE instance_alert_configs ADD CONSTRAINT uniq_index UNIQUE KEY (`type`,instance_id,alert_config,compare_type,app_type);

--
-- Table structure for table `app_biz`
--

CREATE TABLE `app_biz` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(64) NOT NULL COMMENT '业务组名称',
`biz_desc` varchar(255) NOT NULL COMMENT '业务组描述',
PRIMARY KEY (`id`),
UNIQUE KEY `bidx_name` (`name`)
) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=utf8 COMMENT='业务组表';

--
-- Table structure for table `app_capacity_monitor`
--
CREATE TABLE `app_capacity_monitor` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`app_id` bigint(20) NOT NULL COMMENT '应用id',
`sharding_master_num` int(10) NOT NULL DEFAULT '0' COMMENT '主分片数',
`mem` bigint(20) NOT NULL COMMENT '应用初始内存(字节)',
`cur_mem` bigint(20) NOT NULL COMMENT '应用当前内存(字节)',
`mem_used` bigint(20) NOT NULL DEFAULT '0' COMMENT '应用已使用内存(字节)',
`mem_used_history` bigint(20) DEFAULT '0' COMMENT '应用已使用内存(历史最大值)',
`sharding_mem` bigint(20) NOT NULL COMMENT '应用分片初始内存(字节)',
`cur_sharding_mem` bigint(20) NOT NULL COMMENT '应用分片当前内存(字节)',
`sharding_mem_used` bigint(20) NOT NULL DEFAULT '0' COMMENT '分片已使用内存(最大值)',
`expand_mem_percent` tinyint(4) NOT NULL COMMENT '应用扩容内存使用百分比',
`expand_ratio` tinyint(4) NOT NULL COMMENT '扩容比率',
`expand_ratio_total` int(10) NOT NULL COMMENT '当日最大扩容比率(超出不可扩容)',
`is_expand` tinyint(4) NOT NULL DEFAULT '1' COMMENT '是否可扩容:0否;1是',
`is_reduce` tinyint(4) NOT NULL DEFAULT '1' COMMENT '是否可缩容: 0否,1是',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
`expand_time` datetime DEFAULT NULL COMMENT '上次扩容时间',
`schedule_status` tinyint(4) DEFAULT '0' COMMENT '计划状态:0:无意义;1:待缩容;2:待扩容',
`schedule_time` date DEFAULT NULL COMMENT '计划处理时间',
`reduce_ratio_min` tinyint(4) NOT NULL DEFAULT '40' COMMENT '缩容内存使用率最小值',
`reduce_ratio_max` tinyint(4) NOT NULL DEFAULT '60' COMMENT '缩容内存使用率最大值',
`expand_count` int(10) NOT NULL DEFAULT '0' COMMENT '当日自动扩容次数',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=596 DEFAULT CHARSET=utf8 COMMENT='app应用容量监控';

Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
@SpringBootApplication
@EnableAutoConfiguration(exclude = {MybatisAutoConfiguration.class})
@ImportResource("${spring.application.import}")
@ServletComponentScan(basePackages = "com.sohu.cache.web.druid")
@EnableAsync
public class ApplicationStarter {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ public interface AsyncService {
*/
public boolean submitFuture(KeyCallable<?> callable);

/**
* 提交任务并可拿到返回结果
* @param callable
* @return
*/
public Future<?> submitFutureWithRst(KeyCallable<?> callable);

/**
* 提交任务
*
Expand All @@ -27,6 +34,15 @@ public interface AsyncService {
*/
public boolean submitFuture(String threadPoolKey, KeyCallable<?> callable);

/**
* 提交任务
*
* @param threadPoolKey
* @param callable
* @return 返回任务结果
*/
public Future<?> submitFutureWithRst(String threadPoolKey, KeyCallable<?> callable);

/**
* 提交任务
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ public class AsyncThreadPoolFactory {
new LinkedBlockingQueue<Runnable>(256), new NamedThreadFactory(APP_POOL, true));

public static final String BREVITY_SCHEDULER_POOL = "brevity-scheduler-pool";
public static final ThreadPoolExecutor BREVITY_SCHEDULER_ASYNC_THREAD_POOL = new ThreadPoolExecutor(10, 100,
public static final ThreadPoolExecutor BREVITY_SCHEDULER_ASYNC_THREAD_POOL = new ThreadPoolExecutor(10, 50,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(1024),
new LinkedBlockingQueue<Runnable>(2048),
new NamedThreadFactory(BREVITY_SCHEDULER_POOL, true),new CounterRejectedExecutionHandler());

public static final String RESHARD_PROCESS_POOL = "redis-cluster-reshard";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ public boolean submitFuture(KeyCallable<?> callable) {
return submitFuture(DEFAULT_THREAD_POOL, callable);
}

@Override
public Future<?> submitFutureWithRst(KeyCallable<?> callable) {
return submitFuture((Callable)callable);
}

@Override
public boolean submitFuture(String threadPoolKey, KeyCallable<?> callable) {
try {
Expand All @@ -45,6 +50,22 @@ public boolean submitFuture(String threadPoolKey, KeyCallable<?> callable) {
}
}

@Override
public Future<?> submitFutureWithRst(String threadPoolKey, KeyCallable<?> callable) {
try {
ExecutorService executorService = threadPoolMap.get(threadPoolKey);
if (executorService == null) {
logger.warn("threadPoolKey={} not found , used defaultThreadPool", threadPoolKey);
executorService = defaultThreadPool;
}
Future<?> future = executorService.submit(callable);
return future;
} catch (Exception e) {
logger.error(callable.getKey(), e);
return null;
}
}

@Override
public Future<?> submitFuture(Callable<?> callable) {
try {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
package com.sohu.cache.client.command;

import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.hystrix.*;
import com.sohu.cache.client.service.ClientVersionService;
Expand Down Expand Up @@ -68,19 +67,23 @@ public AppClientCommand(AppClientParams appClientParams, AppDao appDao, Instance
@Override
protected Map<String, Object> run() throws Exception {
Map<String, Object> model = Maps.newHashMap();
int type = appClientParams.getType();
Integer type = appClientParams.getType();
long appId = appClientParams.getAppId();
boolean isCheck = checkRedisApp(model);
if (!isCheck) {
return model;
}
if(type == null){
type = appClientParams.getCacheAppDesc().getType();
}
if (type == ConstUtils.CACHE_TYPE_REDIS_CLUSTER) {
model.putAll(getRedisClusterInfo(true));
} else if (type == ConstUtils.CACHE_REDIS_SENTINEL) {
model.putAll(getRedisSentinelInfo(true));
} else if (type == ConstUtils.CACHE_REDIS_STANDALONE) {
model.putAll(getRedisStandaloneInfo(true));
}
model.put("type", type);
//每次数据库操作成功,更新缓存
addAppClient(appId, model);

Expand Down Expand Up @@ -117,13 +120,18 @@ protected Map<String, Object> getFallback() {

private boolean checkRedisApp(Map<String, Object> model) {
long appId = appClientParams.getAppId();
int type = appClientParams.getType();
Integer type = appClientParams.getType();
AppDesc appDesc = appDao.getAppDescById(appId);
if (type == null && appDesc != null) {
if (appDesc.getType() == ConstUtils.CACHE_TYPE_REDIS_CLUSTER) {
type = appDesc.getType();
}
}
if (appDesc == null) {
model.put("status", ClientStatusEnum.ERROR.getStatus());
model.put("message", String.format("appId:%s 不存在", appId));
return false;
} else if (appDesc.getType() != type) {
} else if (type == null || appDesc.getType() != type) {
model.put("status", ClientStatusEnum.ERROR.getStatus());
model.put("message",
String.format("appId:%s 类型不符,期望类型:%s,实际类型%s,请联系管理员!", appId, type, appDesc.getType()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ public class AppClientParams {
private List<InstanceInfo> cacheInstanceInfos;
private String cacheMaxVersion;

private final int type;
private final Integer type;

private final String appClientIp;

private final String clientVersion;

public AppClientParams(long appId, int type, String appClientIp, String clientVersion) {
public AppClientParams(long appId, Integer type, String appClientIp, String clientVersion) {
this.appId = appId;
this.type = type;
this.appClientIp = appClientIp;
Expand All @@ -34,7 +34,7 @@ public long getAppId() {
return appId;
}

public int getType() {
public Integer getType() {
return type;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@
public interface AppClientStatisticGatherService {
void bathSave(long startTime, long endTime);
void bathAdd(long startTime, long endTime);
void bathAddServerCmdCount(long startTime, long endTime);
}

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
* Created by rucao on 2019/12/13
*/
@Slf4j
@Service
@Service("appClientReportExceptionService")
public class AppClientReportExceptionServiceImpl implements AppClientReportExceptionService {

private static int ARGS_MAX_LEN = 255;
Expand All @@ -46,6 +46,7 @@ public void batchSave(long appId, String clientIp, String redisPoolConfig, long
// 2.解析
List<AppClientExceptionStatistics> appClientExceptionStatisticsList = exceptionModels.stream()
.map(exceptionModel -> generate(appId, clientIp, redisPoolConfig, currentMin, exceptionModel))
.filter(exceptionStatistics -> (exceptionStatistics != null))
.collect(Collectors.toList());
// 4.批量保存
if (CollectionUtils.isNotEmpty(appClientExceptionStatisticsList)) {
Expand Down Expand Up @@ -205,12 +206,15 @@ private AppClientExceptionStatistics generate(long appId, String clientIp, Strin
return null;
}
})
.filter(latencyCommand -> (latencyCommand != null))
.collect(Collectors.toList());
appClientLatencyCommandDao.batchSave(appClientLatencyCommandList);
String latencyCommands = appClientLatencyCommandList.stream()
.map(appClientLatencyCommand -> String.valueOf(appClientLatencyCommand.getId()))
.collect(Collectors.joining(","));
appClientExceptionStatistics.setLatencyCommands(latencyCommands);
if(CollectionUtils.isNotEmpty(appClientLatencyCommandList)){
appClientLatencyCommandDao.batchSave(appClientLatencyCommandList);
String latencyCommands = appClientLatencyCommandList.stream()
.map(appClientLatencyCommand -> String.valueOf(appClientLatencyCommand.getId()))
.collect(Collectors.joining(","));
appClientExceptionStatistics.setLatencyCommands(latencyCommands);
}
}
}
return appClientExceptionStatistics;
Expand Down
Loading

0 comments on commit 4c93204

Please sign in to comment.