diff --git a/codis/ansible/roles/codis-proxy/templates/proxy.toml b/codis/ansible/roles/codis-proxy/templates/proxy.toml
index 1cc4b5884f..eff82cbd4e 100644
--- a/codis/ansible/roles/codis-proxy/templates/proxy.toml
+++ b/codis/ansible/roles/codis-proxy/templates/proxy.toml
@@ -62,11 +62,14 @@ backend_primary_only = false
backend_primary_parallel = 1
backend_replica_parallel = 1
+# Set slot num
+max_slot_num = 1024
+
# Set backend tcp keepalive period. (0 to disable)
backend_keepalive_period = "75s"
# Set number of databases of backend.
-backend_number_databases = 16
+backend_number_databases = 1
# If there is no request from client for a long time, the connection will be closed. (0 to disable)
# Set session recv buffer size & timeout.
diff --git a/codis/cmd/admin/admin.go b/codis/cmd/admin/admin.go
index b435d27190..e490b7ff85 100644
--- a/codis/cmd/admin/admin.go
+++ b/codis/cmd/admin/admin.go
@@ -265,7 +265,7 @@ func (t *cmdAdmin) handleConfigConvert(d map[string]interface{}) {
for _, v := range slots.(map[string]interface{}) {
t.convertSlotsV1(temp, v)
}
- for i := 0; i < models.MaxSlotNum; i++ {
+ for i := 0; i < models.GetMaxSlotNum(); i++ {
if temp[i] == nil {
continue
}
@@ -333,7 +333,7 @@ func (t *cmdAdmin) loadJsonConfigV3(file string) *ConfigV3 {
var slots = make(map[int]*models.SlotMapping)
for _, s := range config.Slots {
- if s.Id < 0 || s.Id >= models.MaxSlotNum {
+ if s.Id < 0 || s.Id >= models.GetMaxSlotNum() {
log.Panicf("invalid slot id = %d", s.Id)
}
if slots[s.Id] != nil {
diff --git a/codis/cmd/admin/proxy.go b/codis/cmd/admin/proxy.go
index b40a567cef..ac815ed167 100644
--- a/codis/cmd/admin/proxy.go
+++ b/codis/cmd/admin/proxy.go
@@ -145,7 +145,7 @@ func (t *cmdProxy) handleFillSlots(d map[string]interface{}) {
}
for _, m := range slots {
- if m.Id < 0 || m.Id >= models.MaxSlotNum {
+ if m.Id < 0 || m.Id >= models.GetMaxSlotNum() {
log.Panicf("invalid slot id = %d", m.Id)
}
}
diff --git a/codis/cmd/dashboard/main.go b/codis/cmd/dashboard/main.go
index 759e1f38d0..a4ef83a18a 100644
--- a/codis/cmd/dashboard/main.go
+++ b/codis/cmd/dashboard/main.go
@@ -83,6 +83,7 @@ Options:
log.PanicErrorf(err, "load config %s failed", s)
}
}
+ models.SetMaxSlotNum(config.MaxSlotNum)
if s, ok := utils.Argument(d, "--host-admin"); ok {
config.HostAdmin = s
log.Warnf("option --host-admin = %s", s)
diff --git a/codis/cmd/fe/assets/css/main.css b/codis/cmd/fe/assets/css/main.css
index 1d6253c26a..2fb0f18186 100644
--- a/codis/cmd/fe/assets/css/main.css
+++ b/codis/cmd/fe/assets/css/main.css
@@ -1,5 +1,5 @@
body {
- min-height: 20000px;
+ /*min-height: 20000px;*/
min-width: 1000px;
}
diff --git a/codis/cmd/fe/assets/dashboard-fe.js b/codis/cmd/fe/assets/dashboard-fe.js
index e99d885545..35aa66a691 100644
--- a/codis/cmd/fe/assets/dashboard-fe.js
+++ b/codis/cmd/fe/assets/dashboard-fe.js
@@ -183,7 +183,7 @@ function renderSlotsCharts(slots_array) {
},
yAxis: {
min: 0,
- max: 1024,
+ max: n,
tickInterval: 64,
title: {
style: {
@@ -623,6 +623,7 @@ dashboard.controller('MainCodisCtrl', ['$scope', '$http', '$uibModal', '$timeout
$scope.codis_addr = overview.model.admin_addr;
$scope.codis_coord_name = "[" + overview.config.coordinator_name.charAt(0).toUpperCase() + overview.config.coordinator_name.slice(1) + "]";
$scope.codis_coord_addr = overview.config.coordinator_addr;
+ $scope.max_slot_num = overview.config.max_slot_num;
$scope.updateStats(overview.stats);
});
}
diff --git a/codis/cmd/fe/assets/index.html b/codis/cmd/fe/assets/index.html
index 8da3fbacf5..41f2a8e29f 100644
--- a/codis/cmd/fe/assets/index.html
+++ b/codis/cmd/fe/assets/index.html
@@ -70,6 +70,10 @@
Dashboard |
@@ -128,6 +132,7 @@ Proxy
| |
Data Center |
|
+ Slot Nums |
Sessions |
Commands |
|
@@ -181,6 +186,7 @@ Proxy
+ [[proxy.max_slot_num]] |
[[proxy.sessions]] |
[[proxy.status]]
@@ -219,11 +225,11 @@ Slots
Slots-[
~
] to Group
= MaxSlotNum {
+ if id < 0 || id >= models.GetMaxSlotNum() {
return nil
}
slot := &s.slots[id]
@@ -105,7 +104,7 @@ func (s *Router) FillSlot(m *models.Slot) error {
if s.closed {
return ErrClosedRouter
}
- if m.Id < 0 || m.Id >= MaxSlotNum {
+ if m.Id < 0 || m.Id >= models.GetMaxSlotNum() {
return ErrInvalidSlotId
}
var method forwardMethod
@@ -138,13 +137,13 @@ func (s *Router) isOnline() bool {
func (s *Router) dispatch(r *Request) error {
hkey := getHashKey(r.Multi, r.OpStr)
- var id = Hash(hkey) % MaxSlotNum
+ var id = Hash(hkey) % uint32(models.GetMaxSlotNum())
slot := &s.slots[id]
return slot.forward(r, hkey)
}
func (s *Router) dispatchSlot(r *Request, id int) error {
- if id < 0 || id >= MaxSlotNum {
+ if id < 0 || id >= models.GetMaxSlotNum() {
return ErrInvalidSlotId
}
slot := &s.slots[id]
diff --git a/codis/pkg/proxy/session.go b/codis/pkg/proxy/session.go
index 12dba038f4..b7bf07268b 100644
--- a/codis/pkg/proxy/session.go
+++ b/codis/pkg/proxy/session.go
@@ -354,7 +354,7 @@ func (s *Session) handleRequestPing(r *Request, d *Router) error {
var nblks = len(r.Multi) - 1
switch {
case nblks == 0:
- slot := uint32(time.Now().Nanosecond()) % MaxSlotNum
+ slot := uint32(time.Now().Nanosecond()) % uint32(models.GetMaxSlotNum())
return d.dispatchSlot(r, int(slot))
default:
addr = string(r.Multi[1].Value)
@@ -373,7 +373,7 @@ func (s *Session) handleRequestInfo(r *Request, d *Router) error {
var nblks = len(r.Multi) - 1
switch {
case nblks == 0:
- slot := uint32(time.Now().Nanosecond()) % MaxSlotNum
+ slot := uint32(time.Now().Nanosecond()) % uint32(models.GetMaxSlotNum())
return d.dispatchSlot(r, int(slot))
default:
addr = string(r.Multi[1].Value)
@@ -578,7 +578,7 @@ func (s *Session) handleRequestSlotsScan(r *Request, d *Router) error {
case err != nil:
r.Resp = redis.NewErrorf("ERR parse slotnum '%s' failed, %s", r.Multi[1].Value, err)
return nil
- case slot < 0 || slot >= MaxSlotNum:
+ case slot < 0 || slot >= int64(models.GetMaxSlotNum()):
r.Resp = redis.NewErrorf("ERR parse slotnum '%s' failed, out of range", r.Multi[1].Value)
return nil
default:
@@ -613,7 +613,7 @@ func (s *Session) handleRequestSlotsMapping(r *Request, d *Router) error {
})
}
if nblks == 0 {
- var array = make([]*redis.Resp, MaxSlotNum)
+ var array = make([]*redis.Resp, uint32(models.GetMaxSlotNum()))
for i, m := range d.GetSlots() {
array[i] = marshalToResp(m)
}
@@ -624,7 +624,7 @@ func (s *Session) handleRequestSlotsMapping(r *Request, d *Router) error {
case err != nil:
r.Resp = redis.NewErrorf("ERR parse slotnum '%s' failed, %s", r.Multi[1].Value, err)
return nil
- case slot < 0 || slot >= MaxSlotNum:
+ case slot < 0 || slot >= int64(models.GetMaxSlotNum()):
r.Resp = redis.NewErrorf("ERR parse slotnum '%s' failed, out of range", r.Multi[1].Value)
return nil
default:
diff --git a/codis/pkg/topom/config.go b/codis/pkg/topom/config.go
index b6c36f21be..4d7234b662 100644
--- a/codis/pkg/topom/config.go
+++ b/codis/pkg/topom/config.go
@@ -38,6 +38,9 @@ product_auth = ""
# Set bind address for admin(rpc), tcp only.
admin_addr = "0.0.0.0:18080"
+# Set slot num
+max_slot_num = 1024
+
# Set arguments for data migration (only accept 'sync' & 'semi-async').
migration_method = "semi-async"
migration_parallel_slots = 100
@@ -78,6 +81,8 @@ type Config struct {
MigrationAsyncNumKeys int `toml:"migration_async_numkeys" json:"migration_async_numkeys"`
MigrationTimeout timesize.Duration `toml:"migration_timeout" json:"migration_timeout"`
+ MaxSlotNum int `toml:"max_slot_num" json:"max_slot_num"`
+
SentinelCheckServerStateInterval timesize.Duration `toml:"sentinel_check_server_state_interval" json:"sentinel_client_timeout"`
SentinelCheckMasterFailoverInterval timesize.Duration `toml:"sentinel_check_master_failover_interval" json:"sentinel_check_master_failover_interval"`
SentinelMasterDeadCheckTimes int8 `toml:"sentinel_master_dead_check_times" json:"sentinel_master_dead_check_times"`
@@ -130,6 +135,9 @@ func (c *Config) Validate() error {
if c.ProductName == "" {
return errors.New("invalid product_name")
}
+ if c.MaxSlotNum <= 0 {
+ return errors.New("invalid max_slot_num")
+ }
if _, ok := models.ParseForwardMethod(c.MigrationMethod); !ok {
return errors.New("invalid migration_method")
}
diff --git a/codis/pkg/topom/context.go b/codis/pkg/topom/context.go
index 0426100bbd..b765154e7c 100644
--- a/codis/pkg/topom/context.go
+++ b/codis/pkg/topom/context.go
@@ -15,8 +15,6 @@ import (
"pika/codis/v2/pkg/utils/math2"
)
-const MaxSlotNum = models.MaxSlotNum
-
type context struct {
slots []*models.SlotMapping
group map[int]*models.Group
@@ -32,10 +30,10 @@ type context struct {
}
func (ctx *context) getSlotMapping(sid int) (*models.SlotMapping, error) {
- if len(ctx.slots) != MaxSlotNum {
- return nil, errors.Errorf("invalid number of slots = %d/%d", len(ctx.slots), MaxSlotNum)
+ if len(ctx.slots) != models.GetMaxSlotNum() {
+ return nil, errors.Errorf("invalid number of slots = %d/%d", len(ctx.slots), models.GetMaxSlotNum())
}
- if sid >= 0 && sid < MaxSlotNum {
+ if sid >= 0 && sid < models.GetMaxSlotNum() {
return ctx.slots[sid], nil
}
return nil, errors.Errorf("slot-[%d] doesn't exist", sid)
diff --git a/codis/pkg/topom/topom_slots.go b/codis/pkg/topom/topom_slots.go
index 802671a006..35d20ca39b 100644
--- a/codis/pkg/topom/topom_slots.go
+++ b/codis/pkg/topom/topom_slots.go
@@ -108,7 +108,7 @@ func (s *Topom) SlotCreateActionRange(beg, end int, gid int, must bool) error {
return err
}
- if !(beg >= 0 && beg <= end && end < MaxSlotNum) {
+ if !(beg >= 0 && beg <= end && end < models.GetMaxSlotNum()) {
return errors.Errorf("invalid slot range [%d,%d]", beg, end)
}
@@ -581,7 +581,7 @@ func (s *Topom) SlotsRebalance(confirm bool) (map[int]int, error) {
}
}
- var lowerBound = MaxSlotNum / len(groupIds)
+ var lowerBound = models.GetMaxSlotNum() / len(groupIds)
// don't migrate slot if groupSize < lowerBound
for _, m := range ctx.slots {
@@ -629,7 +629,7 @@ func (s *Topom) SlotsRebalance(confirm bool) (map[int]int, error) {
tree.Put(dest, nil)
}
- var upperBound = (MaxSlotNum + len(groupIds) - 1) / len(groupIds)
+ var upperBound = (models.GetMaxSlotNum() + len(groupIds) - 1) / len(groupIds)
// rebalance between different server groups
for tree.Size() >= 2 {
diff --git a/conf/pika.conf b/conf/pika.conf
index 65847e323d..24154dcac4 100644
--- a/conf/pika.conf
+++ b/conf/pika.conf
@@ -81,7 +81,6 @@ userblacklist :
# Running Mode of Pika, The current version only supports running in "classic mode".
# If set to 'classic', Pika will create multiple DBs whose number is the value of configure item "databases".
-# Meanwhile the following configure item "databases" is valid and the "default-slot-num" is disabled.
instance-mode : classic
# The number of databases when Pika runs in classic mode.
@@ -320,6 +319,9 @@ slotmigrate : no
# whether the block cache is shared among the RocksDB instances, default is per CF
# share-block-cache: no
+# The slot number of pika when used with codis.
+default-slot-num : 1024
+
# whether or not index and filter blocks is stored in block cache
# cache-index-and-filter-blocks: no
diff --git a/docs/ops/config.md b/docs/ops/config.md
index 08695ef265..cc7679275f 100644
--- a/docs/ops/config.md
+++ b/docs/ops/config.md
@@ -49,8 +49,8 @@ instance-mode : classic
# 经典模式下下指定db的数量,使用方式和redis一致
databases : 1
-# 分片模式下每一个table中默认的slot数量
-default-slot-num:16
+# 和 codis 一起使用时,slot 的数量
+default-slot-num : 1024
# 定义一个副本组又多少个从副本,目前支持的配置选项范围[0, 1, 2, 3, 4], 0代表不开启此功能
replication-num : 0
diff --git a/docs/ops/shardingAPI.md b/docs/ops/shardingAPI.md
index 9107fed750..2b2ff3fad0 100644
--- a/docs/ops/shardingAPI.md
+++ b/docs/ops/shardingAPI.md
@@ -55,9 +55,9 @@
## 自pika3.3开始,分片模式支持动态创建table的功能。为了保持与原命令的兼容性和减少对多table不使用者的学习成本,pika默认会自动创建table 0,slot num为配置文件中的配置。使用其他table时,需要手动创建。
### 1. `pkcluster addtable` 命令:
-作用:用于创建table,创建时需指定table-id,max-slot-num。默认table-id为0。
+作用:用于创建table,创建时需指定table-id,default-slot-num。默认table-id为0。
-`pkcluster addtable 1 64`:创建table-id为1,max-slot-num为64的表。
+`pkcluster addtable 1 64`:创建table-id为1,default-slot-num为64的表。
### 2. `pkcluster deltalbe` 命令:
@@ -67,7 +67,7 @@
### 3.`pkcluster addslots`命令:
-作用:在table-id的表中中添加指定ID的slot,ID的区间为[0,max-slot-num - 1],支持以下三种指定ID的语法.不指定table-id时在默认表中添加。
+作用:在table-id的表中中添加指定ID的slot,ID的区间为[0,default-slot-num - 1],支持以下三种指定ID的语法.不指定table-id时在默认表中添加。
`pkcluster addslots 0-2 1`: 在table-id为1的表中添加id为0,1,2的三个slot
@@ -77,7 +77,7 @@
### 4.`pkcluster delslots`命令:
-作用:在table-id的表中删除指定ID的slot,ID的区间为[0,max-slot-num - 1],支持以下三种指定ID的语法
+作用:在table-id的表中删除指定ID的slot,ID的区间为[0,default-slot-num - 1],支持以下三种指定ID的语法
`pkcluster delslots 0-2 1`: 在table-id为1的表中删除id为0,1,2的三个slot
diff --git a/include/pika_define.h b/include/pika_define.h
index 257db76dc3..c58bbff65a 100644
--- a/include/pika_define.h
+++ b/include/pika_define.h
@@ -28,8 +28,6 @@
#define PIKA_MAX_CONN_RBUF_LB (1 << 26) // 64MB
#define PIKA_MAX_CONN_RBUF_HB (1 << 29) // 512MB
#define PIKA_SERVER_ID_MAX 65535
-#define HASH_SLOTS_MASK 0x000003ff
-#define HASH_SLOTS_SIZE (HASH_SLOTS_MASK + 1)
class PikaServer;
diff --git a/include/pika_slot_command.h b/include/pika_slot_command.h
index 1ed14b4b97..1edd5fe815 100644
--- a/include/pika_slot_command.h
+++ b/include/pika_slot_command.h
@@ -13,10 +13,6 @@ const std::string SlotKeyPrefix = "_internal:slotkey:4migrate:";
const std::string SlotTagPrefix = "_internal:slottag:4migrate:";
const size_t MaxKeySendSize = 10 * 1024;
-// crc32
-#define HASH_SLOTS_MASK 0x000003ff
-#define HASH_SLOTS_SIZE (HASH_SLOTS_MASK + 1)
-
extern uint32_t crc32tab[256];
void CRC32TableInit(uint32_t poly);
diff --git a/src/pika_conf.cc b/src/pika_conf.cc
index 7be6fe7e4c..daad05b562 100644
--- a/src/pika_conf.cc
+++ b/src/pika_conf.cc
@@ -199,7 +199,11 @@ int PikaConf::Load() {
for (auto& item : user_blacklist_) {
pstd::StringToLower(item);
}
-
+ GetConfInt("default-slot-num", &default_slot_num_);
+ if (default_slot_num_ <= 0) {
+ LOG(FATAL) << "config default-slot-num error,"
+ << " it should greater than zero, the actual is: " << default_slot_num_;
+ }
GetConfStr("dump-path", &bgsave_path_);
bgsave_path_ = bgsave_path_.empty() ? "./dump/" : bgsave_path_;
if (bgsave_path_[bgsave_path_.length() - 1] != '/') {
diff --git a/src/pika_list.cc b/src/pika_list.cc
index 55b38c435a..aa54711e0f 100644
--- a/src/pika_list.cc
+++ b/src/pika_list.cc
@@ -111,7 +111,6 @@ void LPopCmd::Do(std::shared_ptr slot) {
rocksdb::Status s = slot->db()->LPop(key_, &value);
if (s.ok()) {
res_.AppendString(value);
- AddSlotKey("l", key_, slot);
} else if (s.IsNotFound()) {
res_.AppendStringLen(-1);
} else {
@@ -190,7 +189,6 @@ void LRemCmd::Do(std::shared_ptr slot) {
rocksdb::Status s = slot->db()->LRem(key_, count_, value_, &res);
if (s.ok() || s.IsNotFound()) {
res_.AppendInteger(res);
- RemKeyNotExists("l", key_, slot);
} else {
res_.SetRes(CmdRes::kErrOther, s.ToString());
}
@@ -261,7 +259,6 @@ void RPopCmd::Do(std::shared_ptr slot) {
rocksdb::Status s = slot->db()->RPop(key_, &value);
if (s.ok()) {
res_.AppendString(value);
- RemKeyNotExists("l", key_, slot);
} else if (s.IsNotFound()) {
res_.AppendStringLen(-1);
} else {
@@ -284,6 +281,7 @@ void RPopLPushCmd::Do(std::shared_ptr slot) {
std::string value;
rocksdb::Status s = slot->db()->RPoplpush(source_, receiver_, &value);
if (s.ok()) {
+ AddSlotKey("k", receiver_, slot);
res_.AppendString(value);
value_poped_from_source_ = value;
is_write_binlog_ = true;
@@ -336,7 +334,7 @@ void RPushCmd::Do(std::shared_ptr slot) {
rocksdb::Status s = slot->db()->RPush(key_, values_, &llen);
if (s.ok()) {
res_.AppendInteger(llen);
- RemKeyNotExists("l", key_, slot);
+ AddSlotKey("l", key_, slot);
} else {
res_.SetRes(CmdRes::kErrOther, s.ToString());
}
@@ -358,7 +356,7 @@ void RPushxCmd::Do(std::shared_ptr slot) {
rocksdb::Status s = slot->db()->RPushx(key_, values_, &llen);
if (s.ok() || s.IsNotFound()) {
res_.AppendInteger(llen);
- RemKeyNotExists("l", key_, slot);
+ AddSlotKey("l", key_, slot);
} else {
res_.SetRes(CmdRes::kErrOther, s.ToString());
}
diff --git a/src/pika_set.cc b/src/pika_set.cc
index f8e4c507cc..bad383ebef 100644
--- a/src/pika_set.cc
+++ b/src/pika_set.cc
@@ -25,9 +25,9 @@ void SAddCmd::Do(std::shared_ptr slot) {
rocksdb::Status s = slot->db()->SAdd(key_, members_, &count);
if (!s.ok()) {
res_.SetRes(CmdRes::kErrOther, s.ToString());
- AddSlotKey("s", key_, slot);
return;
}
+ AddSlotKey("s", key_, slot);
res_.AppendInteger(count);
}
@@ -62,7 +62,6 @@ void SPopCmd::Do(std::shared_ptr slot) {
for (const auto& member : members) {
res_.AppendStringLen(member.size());
res_.AppendContent(member);
- AddSlotKey("s", key_, slot);
}
} else if (s.IsNotFound()) {
res_.AppendContent("$-1");
@@ -185,7 +184,6 @@ void SRemCmd::Do(std::shared_ptr slot) {
int32_t count = 0;
rocksdb::Status s = slot->db()->SRem(key_, members_, &count);
res_.AppendInteger(count);
- AddSlotKey("s", key_, slot);
}
void SUnionCmd::DoInitial() {
@@ -342,7 +340,6 @@ void SMoveCmd::Do(std::shared_ptr slot) {
rocksdb::Status s = slot->db()->SMove(src_key_, dest_key_, member_, &res);
if (s.ok() || s.IsNotFound()) {
res_.AppendInteger(res);
- AddSlotKey("s", src_key_, slot);
} else {
res_.SetRes(CmdRes::kErrOther, s.ToString());
}
diff --git a/src/pika_slot_command.cc b/src/pika_slot_command.cc
index 0770d12ca5..bf9c8a8118 100644
--- a/src/pika_slot_command.cc
+++ b/src/pika_slot_command.cc
@@ -792,7 +792,7 @@ int GetSlotsID(const std::string &str, uint32_t *pcrc, int *phastag) {
if (phastag != NULL) {
*phastag = hastag;
}
- return (int)(crc & HASH_SLOTS_MASK);
+ return crc % g_pika_conf->default_slot_num();
}
uint32_t CRC32CheckSum(const char *buf, int len) { return CRC32Update(0, buf, len); }
@@ -1017,7 +1017,7 @@ void SlotsMgrtTagSlotCmd::DoInitial() {
res_.SetRes(CmdRes::kInvalidInt);
return;
}
- if (slot_id_ < 0 || slot_id_ >= HASH_SLOTS_SIZE) {
+ if (slot_id_ < 0 || slot_id_ >= g_pika_conf->default_slot_num()) {
std::string detail = "invalid slot number " + std::to_string(slot_id_);
res_.SetRes(CmdRes::kErrOther, detail);
return;
@@ -1298,8 +1298,11 @@ void SlotsInfoCmd::DoInitial() {
}
void SlotsInfoCmd::Do(std::shared_ptr slot) {
- int slots_slot[HASH_SLOTS_SIZE] = {0};
- int slots_size[HASH_SLOTS_SIZE] = {0};
+ int slotNum = g_pika_conf->default_slot_num();
+ int slots_slot[slotNum];
+ int slots_size[slotNum];
+ memset(slots_slot, 0, slotNum);
+ memset(slots_size, 0, slotNum);
int n = 0;
int i = 0;
int32_t len = 0;
@@ -1368,7 +1371,7 @@ void SlotsMgrtTagSlotAsyncCmd::DoInitial() {
std::string str_slot_num = *it++;
if (!pstd::string2int(str_slot_num.data(), str_slot_num.size(), &slot_id_) || slot_id_ < 0 ||
- slot_id_ >= HASH_SLOTS_SIZE) {
+ slot_id_ >= g_pika_conf->default_slot_num()) {
res_.SetRes(CmdRes::kInvalidInt);
return;
}
@@ -1522,7 +1525,7 @@ void SlotsScanCmd::DoInitial() {
return;
}
key_ = SlotKeyPrefix + argv_[1];
- if (std::stoll(argv_[1].data()) < 0 || std::stoll(argv_[1].data()) >= HASH_SLOTS_SIZE) {
+ if (std::stoll(argv_[1].data()) < 0 || std::stoll(argv_[1].data()) >= g_pika_conf->default_slot_num()) {
res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan);
return;
}
diff --git a/tools/pika_operator/examples/pika-minikube/pika-cm.yaml b/tools/pika_operator/examples/pika-minikube/pika-cm.yaml
index acc0d1da78..67456d50a8 100644
--- a/tools/pika_operator/examples/pika-minikube/pika-cm.yaml
+++ b/tools/pika_operator/examples/pika-minikube/pika-cm.yaml
@@ -43,7 +43,7 @@ data:
# a different one on a per-connection basis using SELECT where
# dbid is a number between 0 and 'databases' - 1, limited in [1, 8]
databases : 1
- # default slot number each table in sharding mode
+ # # The slot number of pika when used with codis.
default-slot-num : 1024
# replication num defines how many followers in a single raft group, only [0, 1, 2, 3, 4] is valid
replication-num : 0
diff --git a/tools/pika_operator/examples/pika-pvc/pika-cm.yaml b/tools/pika_operator/examples/pika-pvc/pika-cm.yaml
index 1c629165ab..72478972f0 100644
--- a/tools/pika_operator/examples/pika-pvc/pika-cm.yaml
+++ b/tools/pika_operator/examples/pika-pvc/pika-cm.yaml
@@ -43,7 +43,7 @@ data:
# a different one on a per-connection basis using SELECT where
# dbid is a number between 0 and 'databases' - 1, limited in [1, 8]
databases : 1
- # default slot number each table in sharding mode
+ # The slot number of pika when used with codis.
default-slot-num : 1024
# replication num defines how many followers in a single raft group, only [0, 1, 2, 3, 4] is valid
replication-num : 0
|