diff --git a/curvefs/src/client/fuse_client.cpp b/curvefs/src/client/fuse_client.cpp index 5d327cc5d1..6207a36474 100644 --- a/curvefs/src/client/fuse_client.cpp +++ b/curvefs/src/client/fuse_client.cpp @@ -172,6 +172,8 @@ void FuseClient::Fini() { CURVEFS_ERROR FuseClient::FuseOpInit(void *userdata, struct fuse_conn_info *conn) { + (void)userdata; + (void)conn; return CURVEFS_ERROR::OK; } @@ -253,6 +255,7 @@ void GetDentryParamFromInodeAttr( CURVEFS_ERROR FuseClient::FuseOpLookup(fuse_req_t req, fuse_ino_t parent, const char *name, fuse_entry_param *e) { + (void)req; VLOG(1) << "FuseOpLookup parent: " << parent << ", name: " << name; if (strlen(name) > option_.maxNameLength) { @@ -285,6 +288,7 @@ CURVEFS_ERROR FuseClient::FuseOpLookup(fuse_req_t req, fuse_ino_t parent, CURVEFS_ERROR FuseClient::FuseOpOpen(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; VLOG(1) << "FuseOpOpen, ino: " << ino; std::shared_ptr inodeWrapper; CURVEFS_ERROR ret = inodeManager_->GetInode(ino, inodeWrapper); @@ -674,6 +678,7 @@ CURVEFS_ERROR FuseClient::GetOrCreateRecycleDir(fuse_req_t req, Dentry *out) { CURVEFS_ERROR FuseClient::MoveToRecycle(fuse_req_t req, fuse_ino_t ino, fuse_ino_t parent, const char* name, FsFileType type) { + (void)type; // 1. check recycle exist, if not exist, create recycle dir Dentry recycleDir; CURVEFS_ERROR ret = GetOrCreateRecycleDir(req, &recycleDir); @@ -810,6 +815,7 @@ CURVEFS_ERROR FuseClient::RemoveNode(fuse_req_t req, fuse_ino_t parent, CURVEFS_ERROR FuseClient::FuseOpOpenDir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; VLOG(1) << "FuseOpOpenDir ino = " << ino; std::shared_ptr inodeWrapper; CURVEFS_ERROR ret = inodeManager_->GetInode(ino, inodeWrapper); @@ -829,6 +835,7 @@ CURVEFS_ERROR FuseClient::FuseOpOpenDir(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpReleaseDir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; uint64_t dindex = fi->fh; VLOG(1) << "FuseOpReleaseDir, ino: " << ino << ", dindex: " << dindex; dirBuf_->DirBufferRelease(dindex); @@ -924,7 +931,7 @@ CURVEFS_ERROR FuseClient::FuseOpReadDirPlus(fuse_req_t req, fuse_ino_t ino, bufHead->wasRead = true; } - if (off < bufHead->size) { + if (off < static_cast(bufHead->size)) { *buffer = bufHead->p + off; *rSize = std::min(bufHead->size - off, size); } else { @@ -937,6 +944,7 @@ CURVEFS_ERROR FuseClient::FuseOpReadDirPlus(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpRename(fuse_req_t req, fuse_ino_t parent, const char *name, fuse_ino_t newparent, const char *newname) { + (void)req; VLOG(1) << "FuseOpRename from (" << parent << ", " << name << ") to (" << newparent << ", " << newname << ")"; if (strlen(name) > option_.maxNameLength || @@ -986,6 +994,8 @@ CURVEFS_ERROR FuseClient::FuseOpRename(fuse_req_t req, fuse_ino_t parent, CURVEFS_ERROR FuseClient::FuseOpGetAttr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, struct stat *attr) { + (void)req; + (void)fi; VLOG(1) << "FuseOpGetAttr ino = " << ino; InodeAttr inodeAttr; CURVEFS_ERROR ret = @@ -1003,6 +1013,8 @@ CURVEFS_ERROR FuseClient::FuseOpSetAttr(fuse_req_t req, fuse_ino_t ino, struct stat *attr, int to_set, struct fuse_file_info *fi, struct stat *attrOut) { + (void)req; + (void)fi; VLOG(1) << "FuseOpSetAttr to_set: " << to_set << ", ino: " << ino << ", attr: " << *attr; std::shared_ptr inodeWrapper; @@ -1095,6 +1107,7 @@ CURVEFS_ERROR FuseClient::FuseOpSetAttr(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpGetXattr(fuse_req_t req, fuse_ino_t ino, const char* name, std::string* value, size_t size) { + (void)req; VLOG(9) << "FuseOpGetXattr, ino: " << ino << ", name: " << name << ", size = " << size; if (option_.disableXattr) { @@ -1134,6 +1147,8 @@ CURVEFS_ERROR FuseClient::FuseOpGetXattr(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpSetXattr(fuse_req_t req, fuse_ino_t ino, const char* name, const char* value, size_t size, int flags) { + (void)req; + (void)flags; if (option_.disableXattr) { return CURVEFS_ERROR::NOTSUPPORT; } @@ -1173,6 +1188,7 @@ CURVEFS_ERROR FuseClient::FuseOpSetXattr(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpListXattr(fuse_req_t req, fuse_ino_t ino, char *value, size_t size, size_t *realSize) { + (void)req; VLOG(1) << "FuseOpListXattr, ino: " << ino << ", size = " << size; InodeAttr inodeAttr; CURVEFS_ERROR ret = inodeManager_->GetInodeAttr(ino, &inodeAttr); @@ -1301,6 +1317,7 @@ CURVEFS_ERROR FuseClient::FuseOpLink(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent, const char *newname, FsFileType type, fuse_entry_param *e) { + (void)req; if (strlen(newname) > option_.maxNameLength) { return CURVEFS_ERROR::NAMETOOLONG; } @@ -1370,6 +1387,7 @@ CURVEFS_ERROR FuseClient::FuseOpLink(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpReadLink(fuse_req_t req, fuse_ino_t ino, std::string *linkStr) { + (void)req; VLOG(1) << "FuseOpReadLink, ino: " << ino << ", linkStr: " << linkStr; InodeAttr attr; CURVEFS_ERROR ret = inodeManager_->GetInodeAttr(ino, &attr); @@ -1384,6 +1402,8 @@ CURVEFS_ERROR FuseClient::FuseOpReadLink(fuse_req_t req, fuse_ino_t ino, CURVEFS_ERROR FuseClient::FuseOpRelease(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(1) << "FuseOpRelease, ino: " << ino; if (FLAGS_enableCto) { inodeManager_->RemoveOpenedInode(ino); diff --git a/curvefs/src/client/fuse_client.h b/curvefs/src/client/fuse_client.h index c949183d0f..cd620d8857 100644 --- a/curvefs/src/client/fuse_client.h +++ b/curvefs/src/client/fuse_client.h @@ -93,11 +93,11 @@ class FuseClient { dentryManager_(std::make_shared(metaClient_)), dirBuf_(std::make_shared()), fsInfo_(nullptr), - mdsBase_(nullptr), - isStop_(true), init_(false), enableSumInDir_(false), - warmupManager_(nullptr) {} + warmupManager_(nullptr), + mdsBase_(nullptr), + isStop_(true) {} virtual ~FuseClient() {} @@ -112,11 +112,11 @@ class FuseClient { dentryManager_(dentryManager), dirBuf_(std::make_shared()), fsInfo_(nullptr), - mdsBase_(nullptr), - isStop_(true), init_(false), enableSumInDir_(false), - warmupManager_(warmupManager) {} + warmupManager_(warmupManager), + mdsBase_(nullptr), + isStop_(true) {} virtual CURVEFS_ERROR Init(const FuseClientOption &option); @@ -221,11 +221,16 @@ class FuseClient { struct fuse_file_info* fi) = 0; virtual CURVEFS_ERROR FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; + (void)ino; + (void)fi; return CURVEFS_ERROR::OK; } virtual CURVEFS_ERROR FuseOpStatFs(fuse_req_t req, fuse_ino_t ino, struct statvfs* stbuf) { + (void)req; + (void)ino; // TODO(chengyi01,wuhanqing): implement in s3 and volume client stbuf->f_frsize = stbuf->f_bsize = fsInfo_->blocksize(); stbuf->f_blocks = 10UL << 30; diff --git a/curvefs/src/client/fuse_common.h b/curvefs/src/client/fuse_common.h index 8bbb17a510..280d96aa16 100644 --- a/curvefs/src/client/fuse_common.h +++ b/curvefs/src/client/fuse_common.h @@ -34,9 +34,9 @@ extern "C" { #endif struct MountOption { - char* mountPoint; - char* fsName; - char* fsType; + const char* mountPoint; + const char* fsName; + const char* fsType; char* conf; char* mdsAddr; }; diff --git a/curvefs/src/client/fuse_s3_client.cpp b/curvefs/src/client/fuse_s3_client.cpp index 9d291aa69e..4bc8f9aaf2 100644 --- a/curvefs/src/client/fuse_s3_client.cpp +++ b/curvefs/src/client/fuse_s3_client.cpp @@ -146,6 +146,7 @@ CURVEFS_ERROR FuseS3Client::FuseOpWrite(fuse_req_t req, fuse_ino_t ino, const char *buf, size_t size, off_t off, struct fuse_file_info *fi, size_t *wSize) { + (void)req; // check align if (fi->flags & O_DIRECT) { if (!(is_aligned(off, DirectIOAlignment) && @@ -214,6 +215,7 @@ CURVEFS_ERROR FuseS3Client::FuseOpRead(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi, char *buffer, size_t *rSize) { + (void)req; // check align if (fi->flags & O_DIRECT) { if (!(is_aligned(off, DirectIOAlignment) && @@ -232,7 +234,7 @@ CURVEFS_ERROR FuseS3Client::FuseOpRead(fuse_req_t req, fuse_ino_t ino, uint64_t fileSize = inodeWrapper->GetLength(); size_t len = 0; - if (fileSize <= off) { + if (static_cast(fileSize) <= off) { *rSize = 0; return CURVEFS_ERROR::OK; } else if (fileSize < off + size) { @@ -306,6 +308,8 @@ CURVEFS_ERROR FuseS3Client::FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, CURVEFS_ERROR FuseS3Client::FuseOpFsync(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(1) << "FuseOpFsync, ino: " << ino << ", datasync: " << datasync; CURVEFS_ERROR ret = s3Adaptor_->Flush(ino); @@ -334,6 +338,8 @@ CURVEFS_ERROR FuseS3Client::Truncate(InodeWrapper *inode, uint64_t length) { CURVEFS_ERROR FuseS3Client::FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(1) << "FuseOpFlush, ino: " << ino; CURVEFS_ERROR ret = CURVEFS_ERROR::OK; diff --git a/curvefs/src/client/fuse_volume_client.cpp b/curvefs/src/client/fuse_volume_client.cpp index ba9295797a..a51b53a6cb 100644 --- a/curvefs/src/client/fuse_volume_client.cpp +++ b/curvefs/src/client/fuse_volume_client.cpp @@ -134,6 +134,7 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpWrite(fuse_req_t req, off_t off, struct fuse_file_info *fi, size_t *wSize) { + (void)req; VLOG(9) << "write start, ino: " << ino << ", offset: " << off << ", length: " << size; @@ -189,6 +190,7 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpRead(fuse_req_t req, struct fuse_file_info *fi, char *buffer, size_t *rSize) { + (void)req; VLOG(3) << "read start, ino: " << ino << ", offset: " << off << ", length: " << size; @@ -272,6 +274,8 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpUnlink(fuse_req_t req, fuse_ino_t parent, CURVEFS_ERROR FuseVolumeClient::FuseOpFsync(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(3) << "FuseOpFsync start, ino: " << ino << ", datasync: " << datasync; CURVEFS_ERROR ret = storage_->Flush(ino); @@ -298,12 +302,16 @@ CURVEFS_ERROR FuseVolumeClient::FuseOpFsync(fuse_req_t req, fuse_ino_t ino, } CURVEFS_ERROR FuseVolumeClient::Truncate(InodeWrapper *inode, uint64_t length) { + (void)inode; + (void)length; // Todo: call volume truncate return CURVEFS_ERROR::OK; } CURVEFS_ERROR FuseVolumeClient::FuseOpFlush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { + (void)req; + (void)fi; VLOG(9) << "FuseOpFlush, ino: " << ino; CURVEFS_ERROR ret = storage_->Flush(ino); diff --git a/curvefs/src/client/inode_wrapper.cpp b/curvefs/src/client/inode_wrapper.cpp index 91a4124bd6..b51d24c4f5 100644 --- a/curvefs/src/client/inode_wrapper.cpp +++ b/curvefs/src/client/inode_wrapper.cpp @@ -559,6 +559,7 @@ class UpdateInodeAsyncS3Done : public MetaServerClientDone { } // namespace void InodeWrapper::AsyncS3(MetaServerClientDone *done, bool internal) { + (void)internal; if (dirty_ || !s3ChunkInfoAdd_.empty()) { LockSyncingInode(); LockSyncingS3ChunkInfo(); diff --git a/curvefs/src/client/inode_wrapper.h b/curvefs/src/client/inode_wrapper.h index 0ffcbb2889..82d9d84f09 100644 --- a/curvefs/src/client/inode_wrapper.h +++ b/curvefs/src/client/inode_wrapper.h @@ -80,7 +80,7 @@ class InodeWrapper : public std::enable_shared_from_this { InodeWrapper(Inode inode, std::shared_ptr metaClient, std::shared_ptr s3ChunkInfoMetric = nullptr, - uint64_t maxDataSize = ULONG_MAX, + int64_t maxDataSize = LONG_MAX, uint32_t refreshDataInterval = UINT_MAX) : inode_(std::move(inode)), status_(InodeStatus::kNormal), @@ -400,8 +400,8 @@ class InodeWrapper : public std::enable_shared_from_this { InodeAttr dirtyAttr_; InodeStatus status_; - uint64_t baseMaxDataSize_; - uint64_t maxDataSize_; + int64_t baseMaxDataSize_; + int64_t maxDataSize_; uint32_t refreshDataInterval_; uint64_t lastRefreshTime_; diff --git a/curvefs/src/client/main.cpp b/curvefs/src/client/main.cpp index 21a7df16de..66353256d7 100644 --- a/curvefs/src/client/main.cpp +++ b/curvefs/src/client/main.cpp @@ -109,7 +109,7 @@ void extra_options_help() { std::string match_any_pattern( const std::unordered_map& patterns, const char* src) { - int src_len = strlen(src); + size_t src_len = strlen(src); for (const auto& pair : patterns) { const auto& pattern = pair.first; if (pattern.length() < src_len && diff --git a/curvefs/src/client/metric/client_metric.h b/curvefs/src/client/metric/client_metric.h index 2728dd36c1..af38ceb200 100644 --- a/curvefs/src/client/metric/client_metric.h +++ b/curvefs/src/client/metric/client_metric.h @@ -269,7 +269,6 @@ struct DiskCacheMetric { struct KVClientMetric { static const std::string prefix; - InterfaceMetric kvClientGet; InterfaceMetric kvClientSet; diff --git a/curvefs/src/client/rpcclient/mds_client.cpp b/curvefs/src/client/rpcclient/mds_client.cpp index 928fca1d62..4e23f1e0a2 100644 --- a/curvefs/src/client/rpcclient/mds_client.cpp +++ b/curvefs/src/client/rpcclient/mds_client.cpp @@ -59,6 +59,8 @@ MdsClientImpl::Init(const ::curve::client::MetaServerOption &mdsOpt, FSStatusCode MdsClientImpl::MountFs(const std::string& fsName, const Mountpoint& mountPt, FsInfo* fsInfo) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.mountFs.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.mountFs.latency); MountFsResponse response; @@ -88,6 +90,8 @@ FSStatusCode MdsClientImpl::MountFs(const std::string& fsName, FSStatusCode MdsClientImpl::UmountFs(const std::string& fsName, const Mountpoint& mountPt) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.umountFs.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.umountFs.latency); UmountFsResponse response; @@ -113,6 +117,8 @@ FSStatusCode MdsClientImpl::UmountFs(const std::string& fsName, FSStatusCode MdsClientImpl::GetFsInfo(const std::string &fsName, FsInfo *fsInfo) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getFsInfo.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getFsInfo.latency); GetFsInfoResponse response; @@ -143,6 +149,8 @@ FSStatusCode MdsClientImpl::GetFsInfo(const std::string &fsName, FSStatusCode MdsClientImpl::GetFsInfo(uint32_t fsId, FsInfo *fsInfo) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getFsInfo.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getFsInfo.latency); GetFsInfoResponse response; @@ -203,6 +211,9 @@ bool MdsClientImpl::GetMetaServerInfo( ::curve::common::StringToUll(strs[1], &port); auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; + (void)addrindex; mdsClientMetric_.getMetaServerInfo.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getMetaServerInfo.latency); GetMetaServerInfoResponse response; @@ -239,6 +250,8 @@ bool MdsClientImpl::GetMetaServerListInCopysets( const LogicPoolID &logicalpooid, const std::vector ©setidvec, std::vector> *cpinfoVec) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getMetaServerListInCopysets.qps.count << 1; LatencyUpdater updater( &mdsClientMetric_.getMetaServerListInCopysets.latency); @@ -290,6 +303,8 @@ bool MdsClientImpl::GetMetaServerListInCopysets( bool MdsClientImpl::CreatePartition( uint32_t fsID, uint32_t count, std::vector *partitionInfos) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.createPartition.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.createPartition.latency); CreatePartitionResponse response; @@ -335,6 +350,8 @@ bool MdsClientImpl::GetCopysetOfPartitions( const std::vector &partitionIDList, std::map *copysetMap) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.getCopysetOfPartitions.qps.count << 1; LatencyUpdater updater( &mdsClientMetric_.getCopysetOfPartitions.latency); @@ -379,6 +396,8 @@ bool MdsClientImpl::GetCopysetOfPartitions( bool MdsClientImpl::ListPartition(uint32_t fsID, std::vector *partitionInfos) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.listPartition.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.listPartition.latency); ListPartitionResponse response; @@ -416,6 +435,8 @@ bool MdsClientImpl::ListPartition(uint32_t fsID, bool MdsClientImpl::AllocOrGetMemcacheCluster(uint32_t fsId, MemcacheClusterInfo* cluster) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.allocOrGetMemcacheCluster.qps.count << 1; LatencyUpdater updater( &mdsClientMetric_.allocOrGetMemcacheCluster.latency); @@ -447,6 +468,8 @@ bool MdsClientImpl::AllocOrGetMemcacheCluster(uint32_t fsId, FSStatusCode MdsClientImpl::AllocS3ChunkId(uint32_t fsId, uint32_t idNum, uint64_t *chunkId) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.allocS3ChunkId.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.allocS3ChunkId.latency); AllocateS3ChunkResponse response; @@ -480,6 +503,8 @@ MdsClientImpl::RefreshSession(const std::vector &txIds, const std::string& fsName, const Mountpoint& mountpoint) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; mdsClientMetric_.refreshSession.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.refreshSession.latency); RefreshSessionRequest request; @@ -517,6 +542,8 @@ MdsClientImpl::RefreshSession(const std::vector &txIds, FSStatusCode MdsClientImpl::GetLatestTxId(const GetLatestTxIdRequest& request, GetLatestTxIdResponse* response) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; VLOG(3) << "GetLatestTxId [request]: " << request.DebugString(); mdsClientMetric_.getLatestTxId.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.getLatestTxId.latency); @@ -552,6 +579,8 @@ FSStatusCode MdsClientImpl::GetLatestTxId(const GetLatestTxIdRequest& request, FSStatusCode MdsClientImpl::CommitTx(const CommitTxRequest& request) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; VLOG(3) << "CommitTx [request]: " << request.DebugString(); mdsClientMetric_.commitTx.qps.count << 1; LatencyUpdater updater(&mdsClientMetric_.commitTx.latency); @@ -670,6 +699,8 @@ SpaceErrCode MdsClientImpl::AllocateVolumeBlockGroup( const std::string &owner, std::vector *groups) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; AllocateBlockGroupResponse response; mdsbasecli_->AllocateVolumeBlockGroup(fsId, count, owner, &response, cntl, channel); @@ -705,6 +736,8 @@ SpaceErrCode MdsClientImpl::AcquireVolumeBlockGroup( const std::string &owner, curvefs::mds::space::BlockGroup *groups) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; AcquireBlockGroupResponse response; mdsbasecli_->AcquireVolumeBlockGroup(fsId, blockGroupOffset, owner, &response, cntl, channel); @@ -730,6 +763,8 @@ SpaceErrCode MdsClientImpl::ReleaseVolumeBlockGroup( const std::string &owner, const std::vector &blockGroups) { auto task = RPCTask { + (void)addrindex; + (void)rpctimeoutMS; ReleaseBlockGroupResponse response; mdsbasecli_->ReleaseVolumeBlockGroup(fsId, owner, blockGroups, &response, cntl, channel); diff --git a/curvefs/src/client/rpcclient/metacache.cpp b/curvefs/src/client/rpcclient/metacache.cpp index 4da4c32eab..b7bc02f06e 100644 --- a/curvefs/src/client/rpcclient/metacache.cpp +++ b/curvefs/src/client/rpcclient/metacache.cpp @@ -50,13 +50,11 @@ void MetaCache::GetTxId(uint32_t partitionId, uint64_t *txId) { } } -bool MetaCache::GetTxId(uint32_t fsId, - uint64_t inodeId, - uint32_t *partitionId, +bool MetaCache::GetTxId(uint32_t fsId, uint64_t inodeId, uint32_t *partitionId, uint64_t *txId) { for (const auto &partition : partitionInfos_) { - if (fsId == partition.fsid() && - inodeId >= partition.start() && inodeId <= partition.end()) { + if (fsId == partition.fsid() && inodeId >= partition.start() && + inodeId <= partition.end()) { *partitionId = partition.partitionid(); *txId = partition.txid(); GetTxId(*partitionId, txId); @@ -85,7 +83,7 @@ bool MetaCache::RefreshTxId() { return false; } - for (const auto& item : txIds) { + for (const auto &item : txIds) { SetTxId(item.partitionid(), item.txid()); } return true; @@ -215,7 +213,7 @@ bool MetaCache::GetTargetLeader(CopysetTarget *target, uint64_t *applyindex, // if cacahe do not have invalid leader, refresh leader VLOG(3) << "refresh leader for " << target->groupID.ToString(); bool ret = true; - uint32_t retry = 0; + int retry = 0; while (retry++ < metacacheopt_.metacacheGetLeaderRetry) { // refresh from metaserver ret = UpdateLeaderInternal(target->groupID, ©setInfo); @@ -272,7 +270,7 @@ bool MetaCache::CreatePartitions(int currentNum, // already create { ReadLockGuard rl(rwlock4Partitions_); - if (partitionInfos_.size() > currentNum) { + if (static_cast(partitionInfos_.size()) > currentNum) { newPartitions->reserve(partitionInfos_.size() - currentNum); newPartitions->insert(newPartitions->end(), partitionInfos_.begin() + currentNum, @@ -290,8 +288,8 @@ bool MetaCache::CreatePartitions(int currentNum, // add partition and copyset info WriteLockGuard wl4PartitionMap(rwlock4Partitions_); WriteLockGuard wl4CopysetMap(rwlock4copysetInfoMap_); - DoAddOrResetPartitionAndCopyset(*newPartitions, - std::move(copysetMap), false); + DoAddOrResetPartitionAndCopyset(*newPartitions, std::move(copysetMap), + false); return true; } @@ -476,6 +474,7 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( std::vector> metaServerInfos; bool ret = mdsClient_->GetMetaServerListInCopysets( groupID.poolID, {groupID.copysetID}, &metaServerInfos); + (void)ret; bool needUpdate = (!metaServerInfos.empty()) && (metaServerInfos[0].HasPeerInCopyset(leaderAddr)); @@ -562,7 +561,8 @@ bool MetaCache::GetCopysetInfowithCopySetID( } bool TryGetPartitionIdByInodeId(const std::vector &plist, - RWLock *lock, uint64_t inodeID, PartitionID *pid) { + RWLock *lock, uint64_t inodeID, + PartitionID *pid) { ReadLockGuard rl(*lock); for (const auto &it : plist) { if (it.start() <= inodeID && it.end() >= inodeID) { @@ -574,17 +574,17 @@ bool TryGetPartitionIdByInodeId(const std::vector &plist, } bool MetaCache::GetPartitionIdByInodeId(uint32_t fsID, uint64_t inodeID, - PartitionID *pid) { - if (!TryGetPartitionIdByInodeId(partitionInfos_, - &rwlock4Partitions_, inodeID, pid)) { + PartitionID *pid) { + if (!TryGetPartitionIdByInodeId(partitionInfos_, &rwlock4Partitions_, + inodeID, pid)) { // list form mds if (!ListPartitions(fsID)) { LOG(ERROR) << "ListPartitions for {fsid:" << fsID << "} fail, partition list not exist"; return false; } - return TryGetPartitionIdByInodeId(partitionInfos_, - &rwlock4Partitions_, inodeID, pid); + return TryGetPartitionIdByInodeId(partitionInfos_, &rwlock4Partitions_, + inodeID, pid); } return true; } diff --git a/curvefs/src/client/rpcclient/metaserver_client.cpp b/curvefs/src/client/rpcclient/metaserver_client.cpp index 0fc7bfb6b0..0b0ced99e7 100644 --- a/curvefs/src/client/rpcclient/metaserver_client.cpp +++ b/curvefs/src/client/rpcclient/metaserver_client.cpp @@ -42,12 +42,12 @@ using ::curve::common::StringToUl; using ::curve::common::StringToUll; -using curvefs::metaserver::GetOrModifyS3ChunkInfoRequest; -using curvefs::metaserver::GetOrModifyS3ChunkInfoResponse; using curvefs::metaserver::BatchGetInodeAttrRequest; using curvefs::metaserver::BatchGetInodeAttrResponse; using curvefs::metaserver::BatchGetXAttrRequest; using curvefs::metaserver::BatchGetXAttrResponse; +using curvefs::metaserver::GetOrModifyS3ChunkInfoRequest; +using curvefs::metaserver::GetOrModifyS3ChunkInfoResponse; namespace curvefs { namespace client { @@ -67,10 +67,10 @@ using UpdateVolumeExtentExecutor = TaskExecutor; using GetVolumeExtentExecutor = TaskExecutor; using ::curvefs::common::LatencyUpdater; -using ::curvefs::common::StreamOptions; using ::curvefs::common::StreamConnection; -using ::curvefs::metaserver::S3ChunkInfo; +using ::curvefs::common::StreamOptions; using ::curvefs::metaserver::MetaServerService_Stub; +using ::curvefs::metaserver::S3ChunkInfo; MetaStatusCode MetaServerClientImpl::Init( const ExcutorOpt &excutorOpt, const ExcutorOpt &excutorInternalOpt, @@ -86,12 +86,12 @@ MetaStatusCode MetaServerClientImpl::Init( #define RPCTask \ [&](LogicPoolID poolID, CopysetID copysetID, PartitionID partitionID, \ uint64_t txId, uint64_t applyIndex, brpc::Channel * channel, \ - brpc::Controller * cntl, TaskExecutorDone *taskExecutorDone) -> int + brpc::Controller * cntl, TaskExecutorDone * taskExecutorDone) -> int #define AsyncRPCTask \ [=](LogicPoolID poolID, CopysetID copysetID, PartitionID partitionID, \ uint64_t txId, uint64_t applyIndex, brpc::Channel * channel, \ - brpc::Controller * cntl, TaskExecutorDone *taskExecutorDone) -> int + brpc::Controller * cntl, TaskExecutorDone * taskExecutorDone) -> int class MetaServerClientRpcDoneBase : public google::protobuf::Closure { public: @@ -123,6 +123,7 @@ MetaStatusCode MetaServerClientImpl::GetDentry(uint32_t fsId, uint64_t inodeid, const std::string &name, Dentry *out) { auto task = RPCTask { + (void)taskExecutorDone; metric_.getDentry.qps.count << 1; LatencyUpdater updater(&metric_.getDentry.latency); GetDentryResponse response; @@ -183,10 +184,10 @@ MetaStatusCode MetaServerClientImpl::GetDentry(uint32_t fsId, uint64_t inodeid, MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, const std::string &last, - uint32_t count, - bool onlyDir, + uint32_t count, bool onlyDir, std::list *dentryList) { auto task = RPCTask { + (void)taskExecutorDone; metric_.listDentry.qps.count << 1; LatencyUpdater updater(&metric_.listDentry.latency); ListDentryRequest request; @@ -217,9 +218,8 @@ MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { LOG(WARNING) << "ListDentry: fsId = " << fsId - << ", inodeid = " << inodeid - << ", last = " << last << ", count = " << count - << ", onlyDir = " << onlyDir + << ", inodeid = " << inodeid << ", last = " << last + << ", count = " << count << ", onlyDir = " << onlyDir << ", errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.has_appliedindex()) { @@ -230,12 +230,11 @@ MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, for_each(dentrys.begin(), dentrys.end(), [&](Dentry &d) { dentryList->push_back(d); }); } else { - LOG(WARNING) - << "ListDentry: fsId = " << fsId << ", inodeid = " << inodeid - << ", last = " << last << ", count = " << count - << ", onlyDir = " << onlyDir - << " ok, but applyIndex not set in response:" - << response.DebugString(); + LOG(WARNING) << "ListDentry: fsId = " << fsId + << ", inodeid = " << inodeid << ", last = " << last + << ", count = " << count << ", onlyDir = " << onlyDir + << " ok, but applyIndex not set in response:" + << response.DebugString(); return -1; } @@ -254,6 +253,8 @@ MetaStatusCode MetaServerClientImpl::ListDentry(uint32_t fsId, uint64_t inodeid, MetaStatusCode MetaServerClientImpl::CreateDentry(const Dentry &dentry) { auto task = RPCTask { + (void)applyIndex; + (void)taskExecutorDone; metric_.createDentry.qps.count << 1; LatencyUpdater updater(&metric_.createDentry.latency); CreateDentryResponse response; @@ -324,6 +325,8 @@ MetaStatusCode MetaServerClientImpl::DeleteDentry(uint32_t fsId, const std::string &name, FsFileType type) { auto task = RPCTask { + (void)applyIndex; + (void)taskExecutorDone; metric_.deleteDentry.qps.count << 1; LatencyUpdater updater(&metric_.deleteDentry.latency); DeleteDentryResponse response; @@ -382,9 +385,11 @@ MetaStatusCode MetaServerClientImpl::DeleteDentry(uint32_t fsId, MetaStatusCode MetaServerClientImpl::PrepareRenameTx(const std::vector &dentrys) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.prepareRenameTx.qps.count << 1; - LatencyUpdater updater( - &metric_.prepareRenameTx.latency); + LatencyUpdater updater(&metric_.prepareRenameTx.latency); PrepareRenameTxRequest request; PrepareRenameTxResponse response; request.set_poolid(poolID); @@ -433,8 +438,10 @@ MetaServerClientImpl::PrepareRenameTx(const std::vector &dentrys) { } MetaStatusCode MetaServerClientImpl::GetInode(uint32_t fsId, uint64_t inodeid, - Inode *out, bool* streaming) { + Inode *out, bool *streaming) { auto task = RPCTask { + (void)txId; + (void)taskExecutorDone; metric_.getInode.qps.count << 1; LatencyUpdater updater(&metric_.getInode.latency); GetInodeRequest request; @@ -493,8 +500,7 @@ MetaStatusCode MetaServerClientImpl::GetInode(uint32_t fsId, uint64_t inodeid, } bool GroupInodeIdByPartition( - uint32_t fsId, - std::shared_ptr metaCache, + uint32_t fsId, std::shared_ptr metaCache, const std::set &inodeIds, std::unordered_map> *inodeGroups) { for (const auto &it : inodeIds) { @@ -516,8 +522,7 @@ bool GroupInodeIdByPartition( } bool MetaServerClientImpl::SplitRequestInodes( - uint32_t fsId, - const std::set &inodeIds, + uint32_t fsId, const std::set &inodeIds, std::vector> *inodeGroups) { std::unordered_map> groups; bool ret = GroupInodeIdByPartition(fsId, metaCache_, inodeIds, &groups); @@ -552,7 +557,7 @@ void BatchGetInodeAttrRpcDone::Run() { std::unique_ptr self_guard(this); brpc::ClosureGuard done_guard(done_); auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); if (cntl.Failed()) { metric_->batchGetInodeAttr.eps.count << 1; @@ -560,7 +565,7 @@ void BatchGetInodeAttrRpcDone::Run() { << cntl.ErrorCode() << ", error content: " << cntl.ErrorText() << ", log id: " << cntl.log_id(); - done_->SetRetCode(-cntl.ErrorCode()); + done_->SetRetCode(-cntl.ErrorCode()); return; } @@ -583,14 +588,15 @@ void BatchGetInodeAttrRpcDone::Run() { VLOG(6) << "batchGetInodeAttr done, " << "response: " << response.DebugString(); done_->SetRetCode(ret); - dynamic_cast(done_) - ->SetInodeAttrs(response.attr()); + dynamic_cast(done_)->SetInodeAttrs( + response.attr()); return; } -MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, - const std::set &inodeIds, - std::list *attr) { +MetaStatusCode +MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, + const std::set &inodeIds, + std::list *attr) { // group inodeid by partition and batchlimit std::vector> inodeGroups; if (!SplitRequestInodes(fsId, inodeIds, &inodeGroups)) { @@ -605,6 +611,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, } uint64_t inodeId = *it.begin(); auto task = RPCTask { + (void)txId; + (void)taskExecutorDone; metric_.batchGetInodeAttr.qps.count << 1; LatencyUpdater updater(&metric_.batchGetInodeAttr.latency); BatchGetInodeAttrRequest request; @@ -614,7 +622,7 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, request.set_partitionid(partitionID); request.set_fsid(fsId); request.set_appliedindex(applyIndex); - *request.mutable_inodeid() = { it.begin(), it.end() }; + *request.mutable_inodeid() = {it.begin(), it.end()}; curvefs::metaserver::MetaServerService_Stub stub(channel); stub.BatchGetInodeAttr(cntl, &request, &response, nullptr); @@ -633,14 +641,13 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, LOG(WARNING) << "BatchGetInodeAttr failed, errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.attr_size() > 0 && - response.has_appliedindex()) { + response.has_appliedindex()) { auto *attrs = response.mutable_attr(); attr->insert(attr->end(), std::make_move_iterator(attrs->begin()), std::make_move_iterator(attrs->end())); - metaCache_->UpdateApplyIndex( - CopysetGroupID(poolID, copysetID), - response.appliedindex()); + metaCache_->UpdateApplyIndex(CopysetGroupID(poolID, copysetID), + response.appliedindex()); } else { LOG(WARNING) << "BatchGetInodeAttr ok, but" << " applyIndex or attr not set in response: " @@ -651,8 +658,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, }; auto taskCtx = std::make_shared( MetaServerOpType::BatchGetInodeAttr, task, fsId, inodeId); - BatchGetInodeAttrExcutor excutor( - opt_, metaCache_, channelManager_, std::move(taskCtx)); + BatchGetInodeAttrExcutor excutor(opt_, metaCache_, channelManager_, + std::move(taskCtx)); auto ret = ConvertToMetaStatusCode(excutor.DoRPCTask()); if (ret != MetaStatusCode::OK) { attr->clear(); @@ -662,14 +669,16 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttr(uint32_t fsId, return MetaStatusCode::OK; } -MetaStatusCode MetaServerClientImpl::BatchGetInodeAttrAsync(uint32_t fsId, - const std::vector &inodeIds, MetaServerClientDone *done) { +MetaStatusCode MetaServerClientImpl::BatchGetInodeAttrAsync( + uint32_t fsId, const std::vector &inodeIds, + MetaServerClientDone *done) { if (inodeIds.empty()) { done->Run(); return MetaStatusCode::OK; } auto task = AsyncRPCTask { + (void)txId; metric_.batchGetInodeAttr.qps.count << 1; LatencyUpdater updater(&metric_.batchGetInodeAttr.latency); BatchGetInodeAttrRequest request; @@ -679,26 +688,27 @@ MetaStatusCode MetaServerClientImpl::BatchGetInodeAttrAsync(uint32_t fsId, request.set_partitionid(partitionID); request.set_fsid(fsId); request.set_appliedindex(applyIndex); - *request.mutable_inodeid() = { inodeIds.begin(), inodeIds.end() }; - auto *rpcDone = new BatchGetInodeAttrRpcDone(taskExecutorDone, - &metric_); + *request.mutable_inodeid() = {inodeIds.begin(), inodeIds.end()}; + auto *rpcDone = + new BatchGetInodeAttrRpcDone(taskExecutorDone, &metric_); curvefs::metaserver::MetaServerService_Stub stub(channel); stub.BatchGetInodeAttr(cntl, &request, &rpcDone->response, rpcDone); return MetaStatusCode::OK; }; auto taskCtx = std::make_shared( MetaServerOpType::BatchGetInodeAttr, task, fsId, *inodeIds.begin()); - auto excutor = std::make_shared(opt_, - metaCache_, channelManager_, std::move(taskCtx)); - TaskExecutorDone *taskDone = new BatchGetInodeAttrTaskExecutorDone( - excutor, done); + auto excutor = std::make_shared( + opt_, metaCache_, channelManager_, std::move(taskCtx)); + TaskExecutorDone *taskDone = + new BatchGetInodeAttrTaskExecutorDone(excutor, done); excutor->DoAsyncRPCTask(taskDone); return MetaStatusCode::OK; } -MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, - const std::set &inodeIds, - std::list *xattr) { +MetaStatusCode +MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, + const std::set &inodeIds, + std::list *xattr) { // group inodeid by partition and batchlimit std::vector> inodeGroups; if (!SplitRequestInodes(fsId, inodeIds, &inodeGroups)) { @@ -714,6 +724,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, uint64_t inodeId = *it.begin(); auto task = RPCTask { + (void)txId; + (void)taskExecutorDone; metric_.batchGetXattr.qps.count << 1; LatencyUpdater updater(&metric_.batchGetXattr.latency); BatchGetXAttrRequest request; @@ -723,17 +735,17 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, request.set_partitionid(partitionID); request.set_fsid(fsId); request.set_appliedindex(applyIndex); - *request.mutable_inodeid() = { it.begin(), it.end() }; + *request.mutable_inodeid() = {it.begin(), it.end()}; curvefs::metaserver::MetaServerService_Stub stub(channel); stub.BatchGetXAttr(cntl, &request, &response, nullptr); if (cntl->Failed()) { metric_.batchGetXattr.eps.count << 1; - LOG(WARNING) << "BatchGetXAttr Failed, errorcode = " - << cntl->ErrorCode() - << ", error content:" << cntl->ErrorText() - << ", log id = " << cntl->log_id(); + LOG(WARNING) + << "BatchGetXAttr Failed, errorcode = " << cntl->ErrorCode() + << ", error content:" << cntl->ErrorText() + << ", log id = " << cntl->log_id(); return -cntl->ErrorCode(); } @@ -742,14 +754,13 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, LOG(WARNING) << "BatchGetXAttr failed, errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.xattr_size() > 0 && - response.has_appliedindex()) { + response.has_appliedindex()) { auto *xattrs = response.mutable_xattr(); xattr->insert(xattr->end(), std::make_move_iterator(xattrs->begin()), std::make_move_iterator(xattrs->end())); - metaCache_->UpdateApplyIndex( - CopysetGroupID(poolID, copysetID), - response.appliedindex()); + metaCache_->UpdateApplyIndex(CopysetGroupID(poolID, copysetID), + response.appliedindex()); } else { LOG(WARNING) << "BatchGetXAttr ok, but" << " applyIndex or attr not set in response: " @@ -760,8 +771,8 @@ MetaStatusCode MetaServerClientImpl::BatchGetXAttr(uint32_t fsId, }; auto taskCtx = std::make_shared( MetaServerOpType::BatchGetInodeAttr, task, fsId, inodeId); - BatchGetInodeAttrExcutor excutor( - opt_, metaCache_, channelManager_, std::move(taskCtx)); + BatchGetInodeAttrExcutor excutor(opt_, metaCache_, channelManager_, + std::move(taskCtx)); auto ret = ConvertToMetaStatusCode(excutor.DoRPCTask()); if (ret != MetaStatusCode::OK) { xattr->clear(); @@ -775,6 +786,9 @@ MetaStatusCode MetaServerClientImpl::UpdateInode(const UpdateInodeRequest &request, bool internal) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.updateInode.qps.count << 1; LatencyUpdater updater(&metric_.updateInode.latency); @@ -831,18 +845,15 @@ MetaServerClientImpl::UpdateInode(const UpdateInodeRequest &request, namespace { -#define SET_REQUEST_FIELD_IF_HAS(request, attr, field) \ - do { \ - if ((attr).has_##field()) { \ - (request)->set_##field((attr).field()); \ - } \ +#define SET_REQUEST_FIELD_IF_HAS(request, attr, field) \ + do { \ + if ((attr).has_##field()) { \ + (request)->set_##field((attr).field()); \ + } \ } while (false) -void FillInodeAttr(uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr, - bool nlink, - UpdateInodeRequest* request) { +void FillInodeAttr(uint32_t fsId, uint64_t inodeId, const InodeAttr &attr, + bool nlink, UpdateInodeRequest *request) { request->set_fsid(fsId); request->set_inodeid(inodeId); @@ -869,7 +880,7 @@ void FillInodeAttr(uint32_t fsId, #undef SET_REQUEST_FIELD_IF_HAS -void FillDataIndices(DataIndices&& indices, UpdateInodeRequest* request) { +void FillDataIndices(DataIndices &&indices, UpdateInodeRequest *request) { if (indices.s3ChunkInfoMap && !indices.s3ChunkInfoMap->empty()) { *request->mutable_s3chunkinfoadd() = std::move(indices.s3ChunkInfoMap.value()); @@ -883,24 +894,19 @@ void FillDataIndices(DataIndices&& indices, UpdateInodeRequest* request) { } // namespace -MetaStatusCode MetaServerClientImpl::UpdateInodeAttr( - uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr) { +MetaStatusCode MetaServerClientImpl::UpdateInodeAttr(uint32_t fsId, + uint64_t inodeId, + const InodeAttr &attr) { UpdateInodeRequest request; FillInodeAttr(fsId, inodeId, attr, /*nlink=*/true, &request); return UpdateInode(request); } MetaStatusCode MetaServerClientImpl::UpdateInodeAttrWithOutNlink( - uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr, - S3ChunkInfoMap* s3ChunkInfoAdd, - bool internal) { + uint32_t fsId, uint64_t inodeId, const InodeAttr &attr, + S3ChunkInfoMap *s3ChunkInfoAdd, bool internal) { UpdateInodeRequest request; - FillInodeAttr(fsId, inodeId, attr, /*nlink=*/false, - &request); + FillInodeAttr(fsId, inodeId, attr, /*nlink=*/false, &request); if (s3ChunkInfoAdd != nullptr) { DataIndices indices; indices.s3ChunkInfoMap = *s3ChunkInfoAdd; @@ -921,30 +927,27 @@ void UpdateInodeRpcDone::Run() { std::unique_ptr self_guard(this); brpc::ClosureGuard done_guard(done_); auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); if (cntl.Failed()) { metric_->updateInode.eps.count << 1; - LOG(WARNING) << "UpdateInode Failed, errorcode = " - << cntl.ErrorCode() + LOG(WARNING) << "UpdateInode Failed, errorcode = " << cntl.ErrorCode() << ", error content: " << cntl.ErrorText() << ", log id: " << cntl.log_id(); - done_->SetRetCode(-cntl.ErrorCode()); + done_->SetRetCode(-cntl.ErrorCode()); return; } MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { - LOG(WARNING) << "UpdateInode: inodeid = " - << taskCtx->inodeID + LOG(WARNING) << "UpdateInode: inodeid = " << taskCtx->inodeID << ", errcode = " << ret << ", errmsg = " << MetaStatusCode_Name(ret); } else if (response.has_appliedindex()) { metaCache->UpdateApplyIndex(taskCtx->target.groupID, - response.appliedindex()); + response.appliedindex()); } else { - LOG(WARNING) << "UpdateInode: inodeid = " - << taskCtx->inodeID + LOG(WARNING) << "UpdateInode: inodeid = " << taskCtx->inodeID << "ok, but applyIndex not set in response:" << response.DebugString(); done_->SetRetCode(-1); @@ -957,8 +960,10 @@ void UpdateInodeRpcDone::Run() { } void MetaServerClientImpl::UpdateInodeAsync(const UpdateInodeRequest &request, - MetaServerClientDone *done) { + MetaServerClientDone *done) { auto task = AsyncRPCTask { + (void)txId; + (void)applyIndex; metric_.updateInode.qps.count << 1; UpdateInodeRequest req = request; @@ -974,28 +979,24 @@ void MetaServerClientImpl::UpdateInodeAsync(const UpdateInodeRequest &request, auto taskCtx = std::make_shared( MetaServerOpType::UpdateInode, task, request.fsid(), request.inodeid()); - auto excutor = std::make_shared(opt_, - metaCache_, channelManager_, std::move(taskCtx)); - TaskExecutorDone *taskDone = new TaskExecutorDone( - excutor, done); + auto excutor = std::make_shared( + opt_, metaCache_, channelManager_, std::move(taskCtx)); + TaskExecutorDone *taskDone = new TaskExecutorDone(excutor, done); excutor->DoAsyncRPCTask(taskDone); } void MetaServerClientImpl::UpdateInodeWithOutNlinkAsync( - uint32_t fsId, - uint64_t inodeId, - const InodeAttr& attr, - MetaServerClientDone* done, - DataIndices&& indices) { + uint32_t fsId, uint64_t inodeId, const InodeAttr &attr, + MetaServerClientDone *done, DataIndices &&indices) { UpdateInodeRequest request; FillInodeAttr(fsId, inodeId, attr, /*nlink=*/false, &request); FillDataIndices(std::move(indices), &request); UpdateInodeAsync(request, done); } -bool MetaServerClientImpl::ParseS3MetaStreamBuffer(butil::IOBuf* buffer, - uint64_t* chunkIndex, - S3ChunkInfoList* list) { +bool MetaServerClientImpl::ParseS3MetaStreamBuffer(butil::IOBuf *buffer, + uint64_t *chunkIndex, + S3ChunkInfoList *list) { butil::IOBuf out; std::string delim = ":"; if (buffer->cut_until(&out, delim) != 0) { @@ -1012,16 +1013,16 @@ bool MetaServerClientImpl::ParseS3MetaStreamBuffer(butil::IOBuf* buffer, return true; } -bool MetaServerClientImpl::HandleS3MetaStreamBuffer(butil::IOBuf* buffer, - S3ChunkInfoMap* out) { +bool MetaServerClientImpl::HandleS3MetaStreamBuffer(butil::IOBuf *buffer, + S3ChunkInfoMap *out) { uint64_t chunkIndex; S3ChunkInfoList list; if (!ParseS3MetaStreamBuffer(buffer, &chunkIndex, &list)) { return false; } - auto merge = [](S3ChunkInfoList* from, S3ChunkInfoList* to) { - for (size_t i = 0; i < from->s3chunks_size(); i++) { + auto merge = [](S3ChunkInfoList *from, S3ChunkInfoList *to) { + for (int i = 0; i < from->s3chunks_size(); i++) { auto chunkinfo = to->add_s3chunks(); *chunkinfo = std::move(*from->mutable_s3chunks(i)); } @@ -1042,6 +1043,9 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( bool returnS3ChunkInfoMap, google::protobuf::Map *out, bool internal) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.appendS3ChunkInfo.qps.count << 1; LatencyUpdater updater(&metric_.appendS3ChunkInfo.latency); GetOrModifyS3ChunkInfoRequest request; @@ -1064,7 +1068,7 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( streamClient_.Close(connection); } }); - auto receiveCallback = [&](butil::IOBuf* buffer) { + auto receiveCallback = [&](butil::IOBuf *buffer) { return HandleS3MetaStreamBuffer(buffer, out); }; if (returnS3ChunkInfoMap) { @@ -1090,8 +1094,7 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " << inodeId - << ", fsId: " << fsId - << ", errorcode: " << ret + << ", fsId: " << fsId << ", errorcode: " << ret << ", errmsg: " << MetaStatusCode_Name(ret); return ret; } else if (response.has_appliedindex()) { @@ -1120,17 +1123,17 @@ MetaStatusCode MetaServerClientImpl::GetOrModifyS3ChunkInfo( }; bool streaming = returnS3ChunkInfoMap; - auto taskCtx = std::make_shared( - MetaServerOpType::GetOrModifyS3ChunkInfo, - task, fsId, inodeId, streaming); + auto taskCtx = + std::make_shared(MetaServerOpType::GetOrModifyS3ChunkInfo, + task, fsId, inodeId, streaming); ExcutorOpt opt; if (internal) { opt = optInternal_; } else { opt = opt_; } - GetOrModifyS3ChunkInfoExcutor excutor( - opt, metaCache_, channelManager_, std::move(taskCtx)); + GetOrModifyS3ChunkInfoExcutor excutor(opt, metaCache_, channelManager_, + std::move(taskCtx)); return ConvertToMetaStatusCode(excutor.DoRPCTask()); } @@ -1146,7 +1149,7 @@ void GetOrModifyS3ChunkInfoRpcDone::Run() { std::unique_ptr self_guard(this); brpc::ClosureGuard done_guard(done_); auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); if (cntl.Failed()) { metric_->appendS3ChunkInfo.eps.count << 1; @@ -1160,21 +1163,17 @@ void GetOrModifyS3ChunkInfoRpcDone::Run() { MetaStatusCode ret = response.statuscode(); if (ret != MetaStatusCode::OK) { - LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " - << taskCtx->inodeID - << ", fsId: " << taskCtx->fsID - << ", errorcode: " << ret + LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " << taskCtx->inodeID + << ", fsId: " << taskCtx->fsID << ", errorcode: " << ret << ", errmsg: " << MetaStatusCode_Name(ret); done_->SetRetCode(ret); return; } else if (response.has_appliedindex()) { metaCache->UpdateApplyIndex(taskCtx->target.groupID, - response.appliedindex()); + response.appliedindex()); } else { - LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " - << taskCtx->inodeID - << ", fsId: " - << taskCtx->fsID + LOG(WARNING) << "GetOrModifyS3ChunkInfo, inodeId: " << taskCtx->inodeID + << ", fsId: " << taskCtx->fsID << "ok, but applyIndex or inode not set in response: " << response.DebugString(); done_->SetRetCode(-1); @@ -1188,10 +1187,11 @@ void GetOrModifyS3ChunkInfoRpcDone::Run() { void MetaServerClientImpl::GetOrModifyS3ChunkInfoAsync( uint32_t fsId, uint64_t inodeId, - const google::protobuf::Map< - uint64_t, S3ChunkInfoList> &s3ChunkInfos, + const google::protobuf::Map &s3ChunkInfos, MetaServerClientDone *done) { auto task = AsyncRPCTask { + (void)txId; + (void)applyIndex; metric_.appendS3ChunkInfo.qps.count << 1; GetOrModifyS3ChunkInfoRequest request; @@ -1207,23 +1207,25 @@ void MetaServerClientImpl::GetOrModifyS3ChunkInfoAsync( new GetOrModifyS3ChunkInfoRpcDone(taskExecutorDone, &metric_); curvefs::metaserver::MetaServerService_Stub stub(channel); - stub.GetOrModifyS3ChunkInfo( - cntl, &request, &rpcDone->response, rpcDone); + stub.GetOrModifyS3ChunkInfo(cntl, &request, &rpcDone->response, + rpcDone); return MetaStatusCode::OK; }; auto taskCtx = std::make_shared( MetaServerOpType::GetOrModifyS3ChunkInfo, task, fsId, inodeId); - auto excutor = std::make_shared(opt_, - metaCache_, channelManager_, std::move(taskCtx)); - TaskExecutorDone *taskDone = new TaskExecutorDone( - excutor, done); + auto excutor = std::make_shared( + opt_, metaCache_, channelManager_, std::move(taskCtx)); + TaskExecutorDone *taskDone = new TaskExecutorDone(excutor, done); excutor->DoAsyncRPCTask(taskDone); } MetaStatusCode MetaServerClientImpl::CreateInode(const InodeParam ¶m, Inode *out) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.createInode.qps.count << 1; LatencyUpdater updater(&metric_.createInode.latency); CreateInodeResponse response; @@ -1242,7 +1244,7 @@ MetaStatusCode MetaServerClientImpl::CreateInode(const InodeParam ¶m, request.set_parent(param.parent); struct timespec now; clock_gettime(CLOCK_REALTIME, &now); - Time* tm = new Time(); + Time *tm = new Time(); tm->set_sec(now.tv_sec); tm->set_nsec(now.tv_nsec); request.set_allocated_create(tm); @@ -1295,6 +1297,9 @@ MetaStatusCode MetaServerClientImpl::CreateInode(const InodeParam ¶m, MetaStatusCode MetaServerClientImpl::CreateManageInode(const InodeParam ¶m, Inode *out) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.createInode.qps.count << 1; LatencyUpdater updater(&metric_.createInode.latency); CreateManageInodeResponse response; @@ -1349,7 +1354,7 @@ MetaStatusCode MetaServerClientImpl::CreateManageInode(const InodeParam ¶m, }; auto taskCtx = std::make_shared( - MetaServerOpType::CreateManageInode, task, param.fsId, 0); + MetaServerOpType::CreateManageInode, task, param.fsId, 0); CreateInodeExcutor excutor(opt_, metaCache_, channelManager_, std::move(taskCtx)); return ConvertToMetaStatusCode(excutor.DoRPCTask()); @@ -1358,6 +1363,9 @@ MetaStatusCode MetaServerClientImpl::CreateManageInode(const InodeParam ¶m, MetaStatusCode MetaServerClientImpl::DeleteInode(uint32_t fsId, uint64_t inodeid) { auto task = RPCTask { + (void)txId; + (void)applyIndex; + (void)taskExecutorDone; metric_.deleteInode.qps.count << 1; LatencyUpdater updater(&metric_.deleteInode.latency); DeleteInodeResponse response; @@ -1421,7 +1429,7 @@ void UpdateVolumeExtentRpcDone::Run() { auto taskCtx = done_->GetTaskExcutor()->GetTaskCxt(); auto metaCache = done_->GetTaskExcutor()->GetMetaCache(); - auto& cntl = taskCtx->cntl_; + auto &cntl = taskCtx->cntl_; if (cntl.Failed()) { metric_->updateVolumeExtent.eps.count << 1; @@ -1447,21 +1455,21 @@ void UpdateVolumeExtentRpcDone::Run() { done_->SetRetCode(st); } -#define SET_COMMON_FIELDS \ - do { \ - request.set_poolid(poolID); \ - request.set_copysetid(copysetID); \ - request.set_partitionid(partitionID); \ - request.set_fsid(fsId); \ - request.set_inodeid(inodeId); \ +#define SET_COMMON_FIELDS \ + do { \ + request.set_poolid(poolID); \ + request.set_copysetid(copysetID); \ + request.set_partitionid(partitionID); \ + request.set_fsid(fsId); \ + request.set_inodeid(inodeId); \ } while (0) void MetaServerClientImpl::AsyncUpdateVolumeExtent( - uint32_t fsId, - uint64_t inodeId, - const VolumeExtentList &extents, + uint32_t fsId, uint64_t inodeId, const VolumeExtentList &extents, MetaServerClientDone *done) { auto task = AsyncRPCTask { + (void)txId; + (void)applyIndex; metric_.updateVolumeExtent.qps.count << 1; metaserver::UpdateVolumeExtentRequest request; SET_COMMON_FIELDS; @@ -1503,11 +1511,10 @@ struct ParseVolumeExtentCallBack { } // namespace -MetaStatusCode MetaServerClientImpl::GetVolumeExtent( - uint32_t fsId, - uint64_t inodeId, - bool streaming, - VolumeExtentList *extents) { +MetaStatusCode +MetaServerClientImpl::GetVolumeExtent(uint32_t fsId, uint64_t inodeId, + bool streaming, + VolumeExtentList *extents) { auto task = RPCTask { (void)txId; (void)applyIndex; @@ -1589,8 +1596,9 @@ MetaStatusCode MetaServerClientImpl::GetVolumeExtent( return ConvertToMetaStatusCode(executor.DoRPCTask()); } -MetaStatusCode MetaServerClientImpl::GetInodeAttr( - uint32_t fsId, uint64_t inodeid, InodeAttr *attr) { +MetaStatusCode MetaServerClientImpl::GetInodeAttr(uint32_t fsId, + uint64_t inodeid, + InodeAttr *attr) { std::set inodeIds; inodeIds.insert(inodeid); std::list attrs; diff --git a/curvefs/src/client/s3/client_s3_adaptor.cpp b/curvefs/src/client/s3/client_s3_adaptor.cpp index 914eeca4a2..d7fdb73eec 100644 --- a/curvefs/src/client/s3/client_s3_adaptor.cpp +++ b/curvefs/src/client/s3/client_s3_adaptor.cpp @@ -333,6 +333,7 @@ int S3ClientAdaptorImpl::Stop() { int S3ClientAdaptorImpl::ExecAsyncDownloadTask( void *meta, bthread::TaskIterator &iter) { // NOLINT + (void)meta; if (iter.is_queue_stopped()) { return 0; } diff --git a/curvefs/src/client/s3/client_s3_cache_manager.cpp b/curvefs/src/client/s3/client_s3_cache_manager.cpp index 12402362b4..c1830bddbd 100644 --- a/curvefs/src/client/s3/client_s3_cache_manager.cpp +++ b/curvefs/src/client/s3/client_s3_cache_manager.cpp @@ -23,6 +23,7 @@ #include "curvefs/src/client/s3/client_s3_cache_manager.h" #include +#include #include #include "curvefs/src/client/s3/client_s3_adaptor.h" @@ -46,11 +47,11 @@ namespace curvefs { namespace client { void FsCacheManager::DataCacheNumInc() { - g_s3MultiManagerMetric->writeDataCacheNum << 1; - VLOG(9) << "DataCacheNumInc() v: 1,wDataCacheNum:" - << wDataCacheNum_.load(std::memory_order_relaxed); - wDataCacheNum_.fetch_add(1, std::memory_order_relaxed); - } + g_s3MultiManagerMetric->writeDataCacheNum << 1; + VLOG(9) << "DataCacheNumInc() v: 1,wDataCacheNum:" + << wDataCacheNum_.load(std::memory_order_relaxed); + wDataCacheNum_.fetch_add(1, std::memory_order_relaxed); +} void FsCacheManager::DataCacheNumFetchSub(uint64_t v) { g_s3MultiManagerMetric->writeDataCacheNum << -1 * v; @@ -137,7 +138,7 @@ bool FsCacheManager::Set(DataCachePtr dataCache, while (lruByte_ >= readCacheMaxByte_) { --iter; - auto& trim = *iter; + auto &trim = *iter; trim->SetReadCacheState(false); lruByte_ -= trim->GetActualLen(); retiredBytes += trim->GetActualLen(); @@ -278,8 +279,7 @@ void FileCacheManager::WriteChunk(uint64_t index, uint64_t chunkPos, chunkCacheManager->WriteNewDataCache(s3ClientAdaptor_, chunkPos, writeLen, dataBuf); } - VLOG(9) << "WriteChunk end, index: " << index - << ", chunkPos: " << chunkPos; + VLOG(9) << "WriteChunk end, index: " << index << ", chunkPos: " << chunkPos; return; } @@ -359,8 +359,7 @@ void FileCacheManager::ReadFromMemCache( *actualReadLen = dataBufferOffset; - VLOG_IF(3, memCacheMissRequest->empty()) - << "greate! memory cache all hit."; + VLOG_IF(3, memCacheMissRequest->empty()) << "greate! memory cache all hit."; } int FileCacheManager::GenerateKVReuqest( @@ -400,6 +399,8 @@ int FileCacheManager::GenerateKVReuqest( int FileCacheManager::HandleReadS3NotExist( int ret, uint32_t retry, const std::shared_ptr &inodeWrapper) { + (void)ret; + (void)retry; uint32_t maxIntervalMs = s3ClientAdaptor_->GetMaxReadRetryIntervalMs(); // hardcode, fixme uint32_t retryIntervalMs = s3ClientAdaptor_->GetReadRetryIntervalMs(); @@ -426,7 +427,7 @@ int FileCacheManager::HandleReadS3NotExist( } int FileCacheManager::Read(uint64_t inodeId, uint64_t offset, uint64_t length, - char *dataBuf) { + char *dataBuf) { // 1. read from memory cache uint64_t actualReadLen = 0; std::vector memCacheMissRequest; @@ -482,9 +483,9 @@ int FileCacheManager::Read(uint64_t inodeId, uint64_t offset, uint64_t length, } bool FileCacheManager::ReadKVRequestFromLocalCache(const std::string &name, - char *databuf, - uint64_t offset, - uint64_t len) { + char *databuf, + uint64_t offset, + uint64_t len) { uint64_t start = butil::cpuwide_time_us(); bool mayCached = s3ClientAdaptor_->HasDiskCache() && @@ -517,6 +518,7 @@ bool FileCacheManager::ReadKVRequestFromRemoteCache(const std::string &name, auto task = std::make_shared(name, databuf, offset, length); CountDownEvent event(1); task->done = [&](const std::shared_ptr &task) { + (void)task; event.Signal(); return; }; @@ -627,9 +629,9 @@ int FileCacheManager::ReadKVRequest( } void FileCacheManager::PrefetchForBlock(const S3ReadRequest &req, - uint64_t fileLen, uint64_t blockSize, - uint64_t chunkSize, - uint64_t startBlockIndex) { + uint64_t fileLen, uint64_t blockSize, + uint64_t chunkSize, + uint64_t startBlockIndex) { uint32_t prefetchBlocks = s3ClientAdaptor_->GetPrefetchBlocks(); uint32_t objectPrefix = s3ClientAdaptor_->GetObjectPrefix(); std::vector> prefetchObjs; @@ -640,9 +642,8 @@ void FileCacheManager::PrefetchForBlock(const S3ReadRequest &req, req.chunkId, blockIndex, req.compaction, req.fsId, req.inodeId, objectPrefix); uint64_t maxReadLen = (blockIndex + 1) * blockSize; - uint64_t needReadLen = maxReadLen > fileLen - ? fileLen - blockIndex * blockSize - : blockSize; + uint64_t needReadLen = + maxReadLen > fileLen ? fileLen - blockIndex * blockSize : blockSize; prefetchObjs.push_back(std::make_pair(name, needReadLen)); @@ -663,7 +664,7 @@ class AsyncPrefetchCallback { void operator()(const S3Adapter *, const std::shared_ptr &context) { VLOG(9) << "prefetch end: " << context->key << ", len " << context->len - << "actual len: " << context->actualLen; + << "actual len: " << context->actualLen; std::unique_ptr guard(context->buf); auto fileCache = s3Client_->GetFsCacheManager()->FindFileCacheManager(inode_); @@ -683,8 +684,8 @@ class AsyncPrefetchCallback { int ret = s3Client_->GetDiskCacheManager()->WriteReadDirect( context->key, context->buf, context->actualLen); if (ret < 0) { - LOG_EVERY_SECOND(INFO) << - "write read directly failed, key: " << context->key; + LOG_EVERY_SECOND(INFO) + << "write read directly failed, key: " << context->key; } { curve::common::LockGuard lg(fileCache->downloadMtx_); @@ -982,7 +983,7 @@ void FileCacheManager::ReleaseCache() { WriteLockGuard writeLockGuard(rwLock_); uint64_t chunNum = chunkCacheMap_.size(); - for (auto& chunk : chunkCacheMap_) { + for (auto &chunk : chunkCacheMap_) { chunk.second->ReleaseCache(); } @@ -1016,6 +1017,7 @@ void FileCacheManager::TruncateCache(uint64_t offset, uint64_t fileSize) { } CURVEFS_ERROR FileCacheManager::Flush(bool force, bool toS3) { + (void)toS3; // Todo: concurrent flushes within one file // instead of multiple file flushes may be better CURVEFS_ERROR ret = CURVEFS_ERROR::OK; @@ -1088,9 +1090,10 @@ CURVEFS_ERROR FileCacheManager::Flush(bool force, bool toS3) { } void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, - uint64_t readLen, char *dataBuf, - uint64_t dataBufOffset, - std::vector *requests) { + uint64_t readLen, char *dataBuf, + uint64_t dataBufOffset, + std::vector *requests) { + (void)index; ReadLockGuard readLockGuard(rwLockChunk_); std::vector cacheMissWriteRequests, cacheMissFlushDataRequest; // read by write cache @@ -1103,9 +1106,8 @@ void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, // read by flushing data cache for (auto request : cacheMissWriteRequests) { std::vector tmpRequests; - ReadByFlushData(request.chunkPos, request.len, - dataBuf, request.bufOffset, - &tmpRequests); + ReadByFlushData(request.chunkPos, request.len, dataBuf, + request.bufOffset, &tmpRequests); cacheMissFlushDataRequest.insert(cacheMissFlushDataRequest.end(), tmpRequests.begin(), tmpRequests.end()); @@ -1115,9 +1117,8 @@ void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, // read by read cache for (auto request : cacheMissFlushDataRequest) { std::vector tmpRequests; - ReadByReadCache(request.chunkPos, request.len, - dataBuf, request.bufOffset, - &tmpRequests); + ReadByReadCache(request.chunkPos, request.len, dataBuf, + request.bufOffset, &tmpRequests); requests->insert(requests->end(), tmpRequests.begin(), tmpRequests.end()); } @@ -1128,11 +1129,10 @@ void ChunkCacheManager::ReadChunk(uint64_t index, uint64_t chunkPos, // read by read cache for (auto request : cacheMissWriteRequests) { std::vector tmpRequests; - ReadByReadCache(request.chunkPos, request.len, - dataBuf, request.bufOffset, - &tmpRequests); + ReadByReadCache(request.chunkPos, request.len, dataBuf, + request.bufOffset, &tmpRequests); requests->insert(requests->end(), tmpRequests.begin(), - tmpRequests.end()); + tmpRequests.end()); } return; @@ -1208,8 +1208,8 @@ void ChunkCacheManager::ReadByWriteCache(uint64_t chunkPos, uint64_t readLen, --------- DataCache */ if (chunkPos + readLen <= dcChunkPos + dcLen) { - iter->second->CopyDataCacheToBuf( - chunkPos - dcChunkPos, readLen, dataBuf + dataBufOffset); + iter->second->CopyDataCacheToBuf(chunkPos - dcChunkPos, readLen, + dataBuf + dataBufOffset); readLen = 0; break; /* @@ -1218,8 +1218,8 @@ void ChunkCacheManager::ReadByWriteCache(uint64_t chunkPos, uint64_t readLen, */ } else { iter->second->CopyDataCacheToBuf(chunkPos - dcChunkPos, - dcChunkPos + dcLen - chunkPos, - dataBuf + dataBufOffset); + dcChunkPos + dcLen - chunkPos, + dataBuf + dataBufOffset); readLen = chunkPos + readLen - dcChunkPos - dcLen; dataBufOffset = dcChunkPos + dcLen - chunkPos + dataBufOffset; chunkPos = dcChunkPos + dcLen; @@ -1314,7 +1314,7 @@ void ChunkCacheManager::ReadByReadCache(uint64_t chunkPos, uint64_t readLen, */ if (chunkPos + readLen <= dcChunkPos + dcLen) { dataCache->CopyDataCacheToBuf(chunkPos - dcChunkPos, readLen, - dataBuf + dataBufOffset); + dataBuf + dataBufOffset); readLen = 0; break; /* @@ -1323,8 +1323,8 @@ void ChunkCacheManager::ReadByReadCache(uint64_t chunkPos, uint64_t readLen, */ } else { dataCache->CopyDataCacheToBuf(chunkPos - dcChunkPos, - dcChunkPos + dcLen - chunkPos, - dataBuf + dataBufOffset); + dcChunkPos + dcLen - chunkPos, + dataBuf + dataBufOffset); readLen = chunkPos + readLen - dcChunkPos - dcLen; dataBufOffset = dcChunkPos + dcLen - chunkPos + dataBufOffset; chunkPos = dcChunkPos + dcLen; @@ -1346,8 +1346,8 @@ void ChunkCacheManager::ReadByReadCache(uint64_t chunkPos, uint64_t readLen, } void ChunkCacheManager::ReadByFlushData(uint64_t chunkPos, uint64_t readLen, - char *dataBuf, uint64_t dataBufOffset, - std::vector *requests) { + char *dataBuf, uint64_t dataBufOffset, + std::vector *requests) { uint64_t dcChunkPos = flushingDataCache_->GetChunkPos(); uint64_t dcLen = flushingDataCache_->GetLen(); ReadRequest request; @@ -1405,9 +1405,9 @@ void ChunkCacheManager::ReadByFlushData(uint64_t chunkPos, uint64_t readLen, --------- DataCache */ } else { - flushingDataCache_->CopyDataCacheToBuf(chunkPos - dcChunkPos, - dcChunkPos + dcLen - chunkPos, - dataBuf + dataBufOffset); + flushingDataCache_->CopyDataCacheToBuf( + chunkPos - dcChunkPos, dcChunkPos + dcLen - chunkPos, + dataBuf + dataBufOffset); readLen = chunkPos + readLen - dcChunkPos - dcLen; dataBufOffset = dcChunkPos + dcLen - chunkPos + dataBufOffset; chunkPos = dcChunkPos + dcLen; @@ -1525,18 +1525,15 @@ void ChunkCacheManager::AddReadDataCache(DataCachePtr dataCache) { uint64_t actualLen = (*dcpIter)->GetActualLen(); if (s3ClientAdaptor_->GetFsCacheManager()->Delete(dcpIter)) { g_s3MultiManagerMetric->readDataCacheNum << -1; - g_s3MultiManagerMetric->readDataCacheByte - << -1 * actualLen; + g_s3MultiManagerMetric->readDataCacheByte << -1 * actualLen; dataRCacheMap_.erase(iter); } } std::list::iterator outIter; - bool ret = - s3ClientAdaptor_->GetFsCacheManager()->Set(dataCache, &outIter); + bool ret = s3ClientAdaptor_->GetFsCacheManager()->Set(dataCache, &outIter); if (ret) { g_s3MultiManagerMetric->readDataCacheNum << 1; - g_s3MultiManagerMetric->readDataCacheByte - << dataCache->GetActualLen(); + g_s3MultiManagerMetric->readDataCacheByte << dataCache->GetActualLen(); dataRCacheMap_.emplace(chunkPos, outIter); } } @@ -1550,7 +1547,7 @@ void ChunkCacheManager::ReleaseReadDataCache(uint64_t key) { } g_s3MultiManagerMetric->readDataCacheNum << -1; g_s3MultiManagerMetric->readDataCacheByte - << -1 * (*(iter->second))->GetActualLen(); + << -1 * (*(iter->second))->GetActualLen(); dataRCacheMap_.erase(iter); return; } @@ -1620,8 +1617,7 @@ void ChunkCacheManager::TruncateReadCache(uint64_t chunkPos) { if ((dcChunkPos + dcLen) > chunkPos) { if (s3ClientAdaptor_->GetFsCacheManager()->Delete(rIter->second)) { g_s3MultiManagerMetric->readDataCacheNum << -1; - g_s3MultiManagerMetric->readDataCacheByte - << -1 * dcActualLen; + g_s3MultiManagerMetric->readDataCacheByte << -1 * dcActualLen; dataRCacheMap_.erase(next(rIter).base()); } } else { @@ -1632,8 +1628,7 @@ void ChunkCacheManager::TruncateReadCache(uint64_t chunkPos) { void ChunkCacheManager::ReleaseWriteDataCache(const DataCachePtr &dataCache) { s3ClientAdaptor_->GetFsCacheManager()->DataCacheNumFetchSub(1); - VLOG(9) << "chunk flush DataCacheByteDec len:" - << dataCache->GetActualLen(); + VLOG(9) << "chunk flush DataCacheByteDec len:" << dataCache->GetActualLen(); s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteDec( dataCache->GetActualLen()); if (!s3ClientAdaptor_->GetFsCacheManager()->WriteCacheIsFull()) { @@ -1713,7 +1708,7 @@ CURVEFS_ERROR ChunkCacheManager::Flush(uint64_t inodeId, bool force, } void ChunkCacheManager::UpdateWriteCacheMap(uint64_t oldChunkPos, - DataCache *pDataCache) { + DataCache *pDataCache) { auto iter = dataWCacheMap_.find(oldChunkPos); DataCachePtr datacache; if (iter != dataWCacheMap_.end()) { @@ -1739,10 +1734,10 @@ void ChunkCacheManager::AddWriteDataCacheForTest(DataCachePtr dataCache) { DataCache::DataCache(S3ClientAdaptorImpl *s3ClientAdaptor, ChunkCacheManagerPtr chunkCacheManager, uint64_t chunkPos, uint64_t len, const char *data, - std::shared_ptr kvClientManager) + std::shared_ptr kvClientManager) : s3ClientAdaptor_(std::move(s3ClientAdaptor)), - chunkCacheManager_(chunkCacheManager), - status_(DataCacheStatus::Dirty), inReadCache_(false) { + chunkCacheManager_(chunkCacheManager), status_(DataCacheStatus::Dirty), + inReadCache_(false) { uint64_t blockSize = s3ClientAdaptor->GetBlockSize(); uint32_t pageSize = s3ClientAdaptor->GetPageSize(); chunkPos_ = chunkPos; @@ -1803,7 +1798,7 @@ DataCache::DataCache(S3ClientAdaptorImpl *s3ClientAdaptor, } void DataCache::CopyBufToDataCache(uint64_t dataCachePos, uint64_t len, - const char *data) { + const char *data) { uint64_t blockSize = s3ClientAdaptor_->GetBlockSize(); uint32_t pageSize = s3ClientAdaptor_->GetPageSize(); uint64_t pos = chunkPos_ + dataCachePos; @@ -1944,7 +1939,7 @@ void DataCache::MergeDataCacheToDataCache(DataCachePtr mergeDataCache, char *data = nullptr; PageData *meragePage = nullptr; PageDataMap *pdMap = &dataMap_[blockIndex]; - int n = 0; + uint64_t n = 0; VLOG(9) << "MergeDataCacheToDataCache dataOffset:" << dataOffset << ", len:" << len << ",dataCache chunkPos:" << chunkPos_ @@ -1996,10 +1991,8 @@ void DataCache::MergeDataCacheToDataCache(DataCachePtr mergeDataCache, void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, const std::vector &mergeDataCacheVer) { - uint64_t totalSize = 0; uint64_t addByte = 0; uint64_t oldSize = 0; - uint32_t pageSize = s3ClientAdaptor_->GetPageSize(); VLOG(9) << "DataCache Write() chunkPos:" << chunkPos << ",len:" << len << ",dataCache's chunkPos:" << chunkPos_ << ",actualChunkPos:" << actualChunkPos_ @@ -2021,7 +2014,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, chunkCacheManager_->rwLockWrite_.WRLock(); oldSize = actualLen_; CopyBufToDataCache(0, chunkPos + len - chunkPos_, - data + chunkPos_ - chunkPos); + data + chunkPos_ - chunkPos); AddDataBefore(chunkPos_ - chunkPos, data); addByte = actualLen_ - oldSize; s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteInc(addByte); @@ -2041,7 +2034,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, chunkCacheManager_->rwLockWrite_.WRLock(); oldSize = actualLen_; CopyBufToDataCache(0, chunkPos + len - chunkPos_, - data + chunkPos_ - chunkPos); + data + chunkPos_ - chunkPos); MergeDataCacheToDataCache( (*iter), chunkPos + len - (*iter)->GetChunkPos(), (*iter)->GetChunkPos() + (*iter)->GetLen() - chunkPos - @@ -2051,7 +2044,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteInc( addByte); chunkCacheManager_->UpdateWriteCacheMap(oldChunkPos, this); - chunkCacheManager_->rwLockWrite_.Unlock(); + chunkCacheManager_->rwLockWrite_.Unlock(); return; } } @@ -2062,7 +2055,7 @@ void DataCache::Write(uint64_t chunkPos, uint64_t len, const char *data, chunkCacheManager_->rwLockWrite_.WRLock(); oldSize = actualLen_; CopyBufToDataCache(0, chunkPos + len - chunkPos_, - data + chunkPos_ - chunkPos); + data + chunkPos_ - chunkPos); AddDataBefore(chunkPos_ - chunkPos, data); addByte = actualLen_ - oldSize; s3ClientAdaptor_->GetFsCacheManager()->DataCacheByteInc(addByte); @@ -2181,7 +2174,7 @@ void DataCache::Truncate(uint64_t size) { ((chunkPos_ + len_ - actualChunkPos_) / pageSize + 1) * pageSize; } assert(tmpActualLen == actualLen_); - (void) tmpActualLen; + (void)tmpActualLen; return; } @@ -2266,8 +2259,8 @@ CURVEFS_ERROR DataCache::Flush(uint64_t inodeId, bool toS3) { // inode ship to flush std::shared_ptr inodeWrapper; - ret = s3ClientAdaptor_->GetInodeCacheManager()->GetInode( - inodeId, inodeWrapper); + ret = s3ClientAdaptor_->GetInodeCacheManager()->GetInode(inodeId, + inodeWrapper); if (ret != CURVEFS_ERROR::OK) { LOG(WARNING) << "get inode fail, ret:" << ret; status_.store(DataCacheStatus::Dirty, std::memory_order_release); @@ -2285,8 +2278,9 @@ CURVEFS_ERROR DataCache::Flush(uint64_t inodeId, bool toS3) { return CURVEFS_ERROR::OK; } -CURVEFS_ERROR DataCache::PrepareFlushTasks(uint64_t inodeId, - char *data, std::vector> *s3Tasks, +CURVEFS_ERROR DataCache::PrepareFlushTasks( + uint64_t inodeId, char *data, + std::vector> *s3Tasks, std::vector> *kvCacheTasks, uint64_t *chunkId, uint64_t *writeOffset) { // allocate chunkid @@ -2298,7 +2292,6 @@ CURVEFS_ERROR DataCache::PrepareFlushTasks(uint64_t inodeId, } // generate flush task - uint64_t chunkSize = s3ClientAdaptor_->GetChunkSize(); uint64_t blockSize = s3ClientAdaptor_->GetBlockSize(); uint32_t objectPrefix = s3ClientAdaptor_->GetObjectPrefix(); uint64_t blockPos = chunkPos_ % blockSize; @@ -2311,8 +2304,6 @@ CURVEFS_ERROR DataCache::PrepareFlushTasks(uint64_t inodeId, // generate flush to disk or s3 task std::string objectName = curvefs::common::s3util::GenObjName( *chunkId, blockIndex, 0, fsId, inodeId, objectPrefix); - int ret = 0; - uint64_t start = butil::cpuwide_time_us(); auto context = std::make_shared(); context->key = objectName; context->buffer = data + (*writeOffset); @@ -2389,6 +2380,7 @@ void DataCache::FlushTaskExecute( }; SetKVCacheDone kvdone = [&](const std::shared_ptr &task) { + (void)task; kvTaskEnvent.Signal(); return; }; @@ -2420,7 +2412,7 @@ void DataCache::FlushTaskExecute( } void DataCache::PrepareS3ChunkInfo(uint64_t chunkId, uint64_t offset, - uint64_t len, S3ChunkInfo *info) { + uint64_t len, S3ChunkInfo *info) { info->set_chunkid(chunkId); info->set_compaction(0); info->set_offset(offset); @@ -2471,7 +2463,7 @@ void FsCacheManager::ReadCacheReleaseExecutor::ReleaseCache() { tmp.swap(retired_); } - for (auto& c : tmp) { + for (auto &c : tmp) { c->Release(); c.reset(); } @@ -2493,7 +2485,7 @@ FsCacheManager::ReadCacheReleaseExecutor::~ReadCacheReleaseExecutor() { } void FsCacheManager::ReadCacheReleaseExecutor::Release( - std::list* caches) { + std::list *caches) { std::lock_guard lk(mtx_); retired_.splice(retired_.end(), *caches); cond_.notify_one(); diff --git a/curvefs/src/client/s3/disk_cache_base.cpp b/curvefs/src/client/s3/disk_cache_base.cpp index 315275977d..dd5f644669 100644 --- a/curvefs/src/client/s3/disk_cache_base.cpp +++ b/curvefs/src/client/s3/disk_cache_base.cpp @@ -88,7 +88,7 @@ std::string DiskCacheBase::GetCacheIoFullDir() { int DiskCacheBase::CreateDir(const std::string dir) { size_t p = dir.find_last_of('/'); std::string dirPath = dir; - if (p != -1) { + if (p != -1ULL) { dirPath.erase(dirPath.begin()+p, dirPath.end()); } std::vector names; diff --git a/curvefs/src/client/s3/disk_cache_manager.cpp b/curvefs/src/client/s3/disk_cache_manager.cpp index 28b3735ad4..fb0dae3b1e 100644 --- a/curvefs/src/client/s3/disk_cache_manager.cpp +++ b/curvefs/src/client/s3/disk_cache_manager.cpp @@ -159,13 +159,12 @@ int DiskCacheManager::ClearReadCache(const std::list &files) { return cacheRead_->ClearReadCache(files); } -void DiskCacheManager::AddCache(const std::string name, - bool cacheWriteExist) { +void DiskCacheManager::AddCache(const std::string &name) { cachedObjName_->Put(name); VLOG(9) << "cache size is: " << cachedObjName_->Size(); } -bool DiskCacheManager::IsCached(const std::string name) { +bool DiskCacheManager::IsCached(const std::string &name) { if (!cachedObjName_->IsCached(name)) { VLOG(9) << "not cached, name = " << name; return false; diff --git a/curvefs/src/client/s3/disk_cache_manager.h b/curvefs/src/client/s3/disk_cache_manager.h index e0372e97b1..1c77fe6c83 100644 --- a/curvefs/src/client/s3/disk_cache_manager.h +++ b/curvefs/src/client/s3/disk_cache_manager.h @@ -65,7 +65,7 @@ class DiskCacheManager { const S3ClientAdaptorOption option); virtual int UmountDiskCache(); - virtual bool IsCached(const std::string name); + virtual bool IsCached(const std::string &name); /** * @brief add obj to cachedObjName @@ -73,8 +73,7 @@ class DiskCacheManager { * @param[in] cacheWriteExist whether the obj is * exist in cache write */ - void AddCache(const std::string name, - bool cacheWriteExist = true); + void AddCache(const std::string &name); int CreateDir(); std::string GetCacheReadFullDir(); diff --git a/curvefs/src/client/s3/disk_cache_manager_impl.cpp b/curvefs/src/client/s3/disk_cache_manager_impl.cpp index 255386eac6..209a2d4e97 100644 --- a/curvefs/src/client/s3/disk_cache_manager_impl.cpp +++ b/curvefs/src/client/s3/disk_cache_manager_impl.cpp @@ -151,7 +151,7 @@ int DiskCacheManagerImpl::WriteReadDirect(const std::string fileName, return ret; } // add cache. - diskCacheManager_->AddCache(fileName, false); + diskCacheManager_->AddCache(fileName); return ret; } @@ -165,7 +165,7 @@ int DiskCacheManagerImpl::Read(const std::string name, char *buf, } // read disk file maybe fail because of disk file has been removed. int ret = diskCacheManager_->ReadDiskFile(name, buf, offset, length); - if (ret < 0 || ret < length) { + if (ret < static_cast(length)) { LOG(ERROR) << "read disk file error. readRet = " << ret; ret = client_->Download(name, buf, offset, length); if (ret < 0) { diff --git a/curvefs/src/client/s3/disk_cache_read.cpp b/curvefs/src/client/s3/disk_cache_read.cpp index 9ede0c0bd1..4f452a3181 100644 --- a/curvefs/src/client/s3/disk_cache_read.cpp +++ b/curvefs/src/client/s3/disk_cache_read.cpp @@ -47,7 +47,7 @@ int DiskCacheRead::ReadDiskFile(const std::string name, char *buf, VLOG(6) << "ReadDiskFile start. name = " << name << ", offset = " << offset << ", length = " << length; std::string fileFullPath; - int fd, ret; + int fd; fileFullPath = GetCacheIoFullDir() + "/" + name; fd = posixWrapper_->open(fileFullPath.c_str(), O_RDONLY, MODE); if (fd < 0) { @@ -69,7 +69,7 @@ int DiskCacheRead::ReadDiskFile(const std::string name, char *buf, posixWrapper_->close(fd); return readLen; } - if (readLen < length) { + if (readLen < static_cast(length)) { LOG(ERROR) << "read disk file is not entirely. read len = " << readLen << ", but want len = " << length << ", file = " << name; posixWrapper_->close(fd); @@ -148,7 +148,7 @@ int DiskCacheRead::WriteDiskFile(const std::string fileName, const char *buf, return fd; } ssize_t writeLen = posixWrapper_->write(fd, buf, length); - if (writeLen < 0 || writeLen < length) { + if (writeLen < static_cast(length)) { LOG(ERROR) << "write disk file error. ret = " << writeLen << ", file = " << fileName; posixWrapper_->close(fd); diff --git a/curvefs/src/client/s3/disk_cache_write.cpp b/curvefs/src/client/s3/disk_cache_write.cpp index 9ae76e3d78..149ea65fff 100644 --- a/curvefs/src/client/s3/disk_cache_write.cpp +++ b/curvefs/src/client/s3/disk_cache_write.cpp @@ -336,8 +336,6 @@ int DiskCacheWrite::UploadAllCacheWriteFile() { VLOG(3) << "upload all cached write file start."; std::string fileFullPath; bool ret; - DIR *cacheWriteDir = NULL; - struct dirent *cacheWriteDirent = NULL; int doRet; fileFullPath = GetCacheIoFullDir(); ret = IsFileExist(fileFullPath); @@ -472,7 +470,7 @@ int DiskCacheWrite::WriteDiskFile(const std::string fileName, const char *buf, return fd; } ssize_t writeLen = posixWrapper_->write(fd, buf, length); - if (writeLen < 0 || writeLen < length) { + if (writeLen < static_cast(length)) { LOG(ERROR) << "write disk file error. ret: " << writeLen << ", file: " << fileName << ", error: " << errno; diff --git a/curvefs/src/client/warmup/warmup_manager.cpp b/curvefs/src/client/warmup/warmup_manager.cpp index a6a65a41fb..3392a8f89d 100644 --- a/curvefs/src/client/warmup/warmup_manager.cpp +++ b/curvefs/src/client/warmup/warmup_manager.cpp @@ -366,8 +366,7 @@ void WarmupManagerS3Impl::TravelChunk(fuse_ino_t ino, uint64_t chunkSize = s3Adaptor_->GetChunkSize(); uint32_t objectPrefix = s3Adaptor_->GetObjectPrefix(); uint64_t offset, len, chunkid, compaction; - for (size_t i = 0; i < chunkInfo.s3chunks_size(); i++) { - auto const &chunkinfo = chunkInfo.s3chunks(i); + for (const auto &chunkinfo : chunkInfo.s3chunks()) { auto fsId = fsInfo_->fsid(); chunkid = chunkinfo.chunkid(); compaction = chunkinfo.compaction(); @@ -375,8 +374,6 @@ void WarmupManagerS3Impl::TravelChunk(fuse_ino_t ino, len = chunkinfo.len(); // the offset in the chunk uint64_t chunkPos = offset % chunkSize; - // the offset in the block - uint64_t blockPos = chunkPos % blockSize; // the first blockIndex uint64_t blockIndexBegin = chunkPos / blockSize; @@ -468,6 +465,7 @@ void WarmupManagerS3Impl::WarmUpAllObjs( GetObjectAsyncCallBack cb = [&](const S3Adapter *adapter, const std::shared_ptr &context) { + (void)adapter; if (bgFetchStop_.load(std::memory_order_acquire)) { VLOG(9) << "need stop warmup"; cond.Signal(); diff --git a/curvefs/src/client/xattr_manager.h b/curvefs/src/client/xattr_manager.h index ec36fbb2b3..1504802f7a 100644 --- a/curvefs/src/client/xattr_manager.h +++ b/curvefs/src/client/xattr_manager.h @@ -126,13 +126,13 @@ class XattrManager { // dentry cache manager std::shared_ptr dentryManager_; - Atomic isStop_; - InterruptibleSleeper sleeper_; uint32_t listDentryLimit_; uint32_t listDentryThreads_; + + Atomic isStop_; }; } // namespace client diff --git a/curvefs/src/common/process.cpp b/curvefs/src/common/process.cpp index 2239de2dbc..2966702b27 100644 --- a/curvefs/src/common/process.cpp +++ b/curvefs/src/common/process.cpp @@ -24,37 +24,39 @@ #include "curvefs/src/common/process.h" -extern char** environ; +extern char **environ; namespace curvefs { namespace common { -char** Process::OsArgv_ = nullptr; -char* Process::OsArgvLast_ = nullptr; +char **Process::OsArgv_ = nullptr; +char *Process::OsArgvLast_ = nullptr; pid_t Process::SpawnProcess(ProcFunc proc) { pid_t pid = fork(); switch (pid) { - case -1: - return -1; - case 0: - proc(); - break; - default: - break; + case -1: + return -1; + case 0: + proc(); + break; + default: + break; } return pid; } -void Process::InitSetProcTitle(int argc, char* const* argv) { +void Process::InitSetProcTitle(int argc, char *const *argv) { + (void)argc; // Silence the warning + size_t size = 0; for (auto i = 0; environ[i]; i++) { size += strlen(environ[i]) + 1; } - OsArgv_ = (char**) argv; // NOLINT + OsArgv_ = (char **)argv; // NOLINT OsArgvLast_ = OsArgv_[0]; for (auto i = 0; OsArgv_[i]; i++) { if (OsArgvLast_ == OsArgv_[i]) { @@ -62,7 +64,7 @@ void Process::InitSetProcTitle(int argc, char* const* argv) { } } - char* p = new (std::nothrow) char[size]; + char *p = new (std::nothrow) char[size]; for (auto i = 0; environ[i]; i++) { if (OsArgvLast_ == environ[i]) { size = strlen(environ[i]) + 1; @@ -78,15 +80,15 @@ void Process::InitSetProcTitle(int argc, char* const* argv) { OsArgvLast_--; } -void Process::SetProcTitle(const std::string& title) { +void Process::SetProcTitle(const std::string &title) { OsArgv_[1] = NULL; strncpy(OsArgv_[0], title.c_str(), OsArgvLast_ - OsArgv_[0]); } -bool Process::InitSignals(const std::vector& signals) { +bool Process::InitSignals(const std::vector &signals) { struct sigaction sa; - for (const auto& signal : signals) { + for (const auto &signal : signals) { memset(&sa, 0, sizeof(struct sigaction)); if (signal.handler) { sa.sa_sigaction = signal.handler; diff --git a/curvefs/src/common/rpc_stream.cpp b/curvefs/src/common/rpc_stream.cpp index b01d8c0e47..27490e3d63 100644 --- a/curvefs/src/common/rpc_stream.cpp +++ b/curvefs/src/common/rpc_stream.cpp @@ -257,6 +257,8 @@ std::shared_ptr StreamServer::Accept(brpc::Controller* cntl) { int StreamServer::on_received_messages(brpc::StreamId id, butil::IOBuf* const buffers[], size_t size) { + (void)buffers; // Slience the warnings + (void)size; LOG(ERROR) << "on_received_messages: stream (streamId=" << id << ") in server-side should not reveice any message" << ", but now we received"; diff --git a/curvefs/src/mds/fs_storage.cpp b/curvefs/src/mds/fs_storage.cpp index 8c12cddf48..5e9d41e106 100644 --- a/curvefs/src/mds/fs_storage.cpp +++ b/curvefs/src/mds/fs_storage.cpp @@ -424,14 +424,14 @@ bool PersisKVStorage::RenameFromStorage(const FsInfoWrapper& oldFs, OpType::OpDelete, const_cast(oldKey.c_str()), const_cast(""), - oldKey.size(), + static_cast(oldKey.size()), 0}; Operation op2{ OpType::OpPut, const_cast(newKey.c_str()), const_cast(newValue.c_str()), - newKey.size(), - newValue.size()}; + static_cast(newKey.size()), + static_cast(newValue.size())}; std::vector ops{op1, op2}; int ret = storage_->TxnN(ops); if (ret != EtcdErrCode::EtcdOK) { diff --git a/curvefs/src/mds/heartbeat/heartbeat_service.cpp b/curvefs/src/mds/heartbeat/heartbeat_service.cpp index 046c106f7d..e3827390b3 100644 --- a/curvefs/src/mds/heartbeat/heartbeat_service.cpp +++ b/curvefs/src/mds/heartbeat/heartbeat_service.cpp @@ -36,6 +36,7 @@ void HeartbeatServiceImpl::MetaServerHeartbeat( const ::curvefs::mds::heartbeat::MetaServerHeartbeatRequest *request, ::curvefs::mds::heartbeat::MetaServerHeartbeatResponse *response, ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); heartbeatManager_->MetaServerHeartbeat(*request, response); } diff --git a/curvefs/src/mds/mds_service.cpp b/curvefs/src/mds/mds_service.cpp index b9544bb38a..76ea58fd5a 100644 --- a/curvefs/src/mds/mds_service.cpp +++ b/curvefs/src/mds/mds_service.cpp @@ -29,13 +29,13 @@ namespace mds { using mds::Mountpoint; -void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::CreateFsRequest* request, - ::curvefs::mds::CreateFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::CreateFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::CreateFsRequest *request, + ::curvefs::mds::CreateFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); + const std::string &fsName = request->fsname(); uint64_t blockSize = request->blocksize(); FSType type = request->fstype(); bool enableSumInDir = request->enablesumindir(); @@ -46,59 +46,56 @@ void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, LOG(INFO) << "CreateFs request: " << request->ShortDebugString(); // create volume fs - auto createVolumeFs = - [&]() { - if (!request->fsdetail().has_volume()) { - response->set_statuscode(FSStatusCode::PARAM_ERROR); - LOG(ERROR) - << "CreateFs request, type is volume, but has no volume" - << ", fsName = " << fsName; - return; - } - const auto& volume = request->fsdetail().volume(); - FSStatusCode status = - fsManager_->CreateFs(request, response->mutable_fsinfo()); - - if (status != FSStatusCode::OK) { - response->clear_fsinfo(); - response->set_statuscode(status); - LOG(ERROR) << "CreateFs fail, fsName = " << fsName - << ", blockSize = " << blockSize - << ", volume.volumeName = " << volume.volumename() - << ", enableSumInDir = " << enableSumInDir - << ", owner = " << request->owner() - << ", capacity = " << request->capacity() - << ", errCode = " << FSStatusCode_Name(status); - return; - } - }; + auto createVolumeFs = [&]() { + if (!request->fsdetail().has_volume()) { + response->set_statuscode(FSStatusCode::PARAM_ERROR); + LOG(ERROR) << "CreateFs request, type is volume, but has no volume" + << ", fsName = " << fsName; + return; + } + const auto &volume = request->fsdetail().volume(); + FSStatusCode status = + fsManager_->CreateFs(request, response->mutable_fsinfo()); + + if (status != FSStatusCode::OK) { + response->clear_fsinfo(); + response->set_statuscode(status); + LOG(ERROR) << "CreateFs fail, fsName = " << fsName + << ", blockSize = " << blockSize + << ", volume.volumeName = " << volume.volumename() + << ", enableSumInDir = " << enableSumInDir + << ", owner = " << request->owner() + << ", capacity = " << request->capacity() + << ", errCode = " << FSStatusCode_Name(status); + return; + } + }; // create s3 fs - auto createS3Fs = - [&]() { - if (!request->fsdetail().has_s3info()) { - response->set_statuscode(FSStatusCode::PARAM_ERROR); - LOG(ERROR) << "CreateFs request, type is s3, but has no s3info" - << ", fsName = " << fsName; - return; - } - const auto& s3Info = request->fsdetail().s3info(); - FSStatusCode status = - fsManager_->CreateFs(request, response->mutable_fsinfo()); - - if (status != FSStatusCode::OK) { - response->clear_fsinfo(); - response->set_statuscode(status); - LOG(ERROR) << "CreateFs fail, fsName = " << fsName - << ", blockSize = " << blockSize - << ", s3Info.bucketname = " << s3Info.bucketname() - << ", enableSumInDir = " << enableSumInDir - << ", owner = " << request->owner() - << ", capacity = " << request->capacity() - << ", errCode = " << FSStatusCode_Name(status); - return; - } - }; + auto createS3Fs = [&]() { + if (!request->fsdetail().has_s3info()) { + response->set_statuscode(FSStatusCode::PARAM_ERROR); + LOG(ERROR) << "CreateFs request, type is s3, but has no s3info" + << ", fsName = " << fsName; + return; + } + const auto &s3Info = request->fsdetail().s3info(); + FSStatusCode status = + fsManager_->CreateFs(request, response->mutable_fsinfo()); + + if (status != FSStatusCode::OK) { + response->clear_fsinfo(); + response->set_statuscode(status); + LOG(ERROR) << "CreateFs fail, fsName = " << fsName + << ", blockSize = " << blockSize + << ", s3Info.bucketname = " << s3Info.bucketname() + << ", enableSumInDir = " << enableSumInDir + << ", owner = " << request->owner() + << ", capacity = " << request->capacity() + << ", errCode = " << FSStatusCode_Name(status); + return; + } + }; auto createHybridFs = [&]() { // not support now @@ -118,23 +115,22 @@ void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, }; switch (type) { - case ::curvefs::common::FSType::TYPE_VOLUME: - createVolumeFs(); - break; - case ::curvefs::common::FSType::TYPE_S3: - createS3Fs(); - break; - case ::curvefs::common::FSType::TYPE_HYBRID: - createHybridFs(); - break; - default: - response->set_statuscode(FSStatusCode::PARAM_ERROR); - LOG(ERROR) << "CreateFs fail, fs type is invalid" - << ", fsName = " << fsName - << ", blockSize = " << blockSize << ", fsType = " << type - << ", errCode = " - << FSStatusCode_Name(FSStatusCode::PARAM_ERROR); - break; + case ::curvefs::common::FSType::TYPE_VOLUME: + createVolumeFs(); + break; + case ::curvefs::common::FSType::TYPE_S3: + createS3Fs(); + break; + case ::curvefs::common::FSType::TYPE_HYBRID: + createHybridFs(); + break; + default: + response->set_statuscode(FSStatusCode::PARAM_ERROR); + LOG(ERROR) << "CreateFs fail, fs type is invalid" + << ", fsName = " << fsName << ", blockSize = " << blockSize + << ", fsType = " << type << ", errCode = " + << FSStatusCode_Name(FSStatusCode::PARAM_ERROR); + break; } if (response->statuscode() != FSStatusCode::OK) { @@ -146,14 +142,14 @@ void MdsServiceImpl::CreateFs(::google::protobuf::RpcController* controller, << ", capacity = " << request->capacity(); } -void MdsServiceImpl::MountFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::MountFsRequest* request, - ::curvefs::mds::MountFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::MountFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::MountFsRequest *request, + ::curvefs::mds::MountFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); - const Mountpoint& mount = request->mountpoint(); + const std::string &fsName = request->fsname(); + const Mountpoint &mount = request->mountpoint(); LOG(INFO) << "MountFs request, fsName = " << fsName << ", mountPoint = " << mount.ShortDebugString(); FSStatusCode status = @@ -173,14 +169,14 @@ void MdsServiceImpl::MountFs(::google::protobuf::RpcController* controller, << ", mps: " << response->mutable_fsinfo()->mountpoints_size(); } -void MdsServiceImpl::UmountFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::UmountFsRequest* request, - ::curvefs::mds::UmountFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::UmountFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::UmountFsRequest *request, + ::curvefs::mds::UmountFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); - const Mountpoint& mount = request->mountpoint(); + const std::string &fsName = request->fsname(); + const Mountpoint &mount = request->mountpoint(); LOG(INFO) << "UmountFs request, " << request->ShortDebugString(); FSStatusCode status = fsManager_->UmountFs(fsName, mount); if (status != FSStatusCode::OK) { @@ -196,16 +192,16 @@ void MdsServiceImpl::UmountFs(::google::protobuf::RpcController* controller, << ", mountPoint = " << mount.ShortDebugString(); } -void MdsServiceImpl::GetFsInfo(::google::protobuf::RpcController* controller, - const ::curvefs::mds::GetFsInfoRequest* request, - ::curvefs::mds::GetFsInfoResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::GetFsInfo(::google::protobuf::RpcController *controller, + const ::curvefs::mds::GetFsInfoRequest *request, + ::curvefs::mds::GetFsInfoResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); LOG(INFO) << "GetFsInfo request: " << request->ShortDebugString(); - FsInfo* fsInfo = response->mutable_fsinfo(); + FsInfo *fsInfo = response->mutable_fsinfo(); FSStatusCode status = FSStatusCode::OK; if (request->has_fsid() && request->has_fsname()) { status = @@ -231,13 +227,13 @@ void MdsServiceImpl::GetFsInfo(::google::protobuf::RpcController* controller, << response->ShortDebugString(); } -void MdsServiceImpl::DeleteFs(::google::protobuf::RpcController* controller, - const ::curvefs::mds::DeleteFsRequest* request, - ::curvefs::mds::DeleteFsResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::DeleteFs(::google::protobuf::RpcController *controller, + const ::curvefs::mds::DeleteFsRequest *request, + ::curvefs::mds::DeleteFsResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); - brpc::Controller* cntl = static_cast(controller); - const std::string& fsName = request->fsname(); + const std::string &fsName = request->fsname(); LOG(INFO) << "DeleteFs request, fsName = " << fsName; FSStatusCode status = fsManager_->DeleteFs(fsName); response->set_statuscode(status); @@ -251,10 +247,12 @@ void MdsServiceImpl::DeleteFs(::google::protobuf::RpcController* controller, } void MdsServiceImpl::AllocateS3Chunk( - ::google::protobuf::RpcController* controller, - const ::curvefs::mds::AllocateS3ChunkRequest* request, - ::curvefs::mds::AllocateS3ChunkResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController *controller, + const ::curvefs::mds::AllocateS3ChunkRequest *request, + ::curvefs::mds::AllocateS3ChunkResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; + brpc::ClosureGuard guard(done); VLOG(9) << "start to allocate chunkId."; @@ -286,10 +284,13 @@ void MdsServiceImpl::AllocateS3Chunk( } void MdsServiceImpl::ListClusterFsInfo( - ::google::protobuf::RpcController* controller, - const ::curvefs::mds::ListClusterFsInfoRequest* request, - ::curvefs::mds::ListClusterFsInfoResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController *controller, + const ::curvefs::mds::ListClusterFsInfoRequest *request, + ::curvefs::mds::ListClusterFsInfoResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; + (void)request; + brpc::ClosureGuard guard(done); LOG(INFO) << "start to check cluster fs info."; fsManager_->GetAllFsInfo(response->mutable_fsinfo()); @@ -302,26 +303,28 @@ void MdsServiceImpl::RefreshSession( const ::curvefs::mds::RefreshSessionRequest *request, ::curvefs::mds::RefreshSessionResponse *response, ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard guard(done); fsManager_->RefreshSession(request, response); response->set_statuscode(FSStatusCode::OK); } void MdsServiceImpl::GetLatestTxId( - ::google::protobuf::RpcController* controller, - const GetLatestTxIdRequest* request, - GetLatestTxIdResponse* response, - ::google::protobuf::Closure* done) { + ::google::protobuf::RpcController *controller, + const GetLatestTxIdRequest *request, GetLatestTxIdResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard guard(done); VLOG(3) << "GetLatestTxId [request]: " << request->DebugString(); fsManager_->GetLatestTxId(request, response); VLOG(3) << "GetLatestTxId [response]: " << response->DebugString(); } -void MdsServiceImpl::CommitTx(::google::protobuf::RpcController* controller, - const CommitTxRequest* request, - CommitTxResponse* response, - ::google::protobuf::Closure* done) { +void MdsServiceImpl::CommitTx(::google::protobuf::RpcController *controller, + const CommitTxRequest *request, + CommitTxResponse *response, + ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard guard(done); VLOG(3) << "CommitTx [request]: " << request->DebugString(); fsManager_->CommitTx(request, response); diff --git a/curvefs/src/mds/schedule/leaderScheduler.cpp b/curvefs/src/mds/schedule/leaderScheduler.cpp index e3b45bb8e3..ab24032949 100644 --- a/curvefs/src/mds/schedule/leaderScheduler.cpp +++ b/curvefs/src/mds/schedule/leaderScheduler.cpp @@ -201,6 +201,7 @@ bool LeaderScheduler::TransferLeaderOut(MetaServerIdType source, uint16_t replicaNum, PoolIdType poolId, Operator *op, CopySetInfo *selectedCopySet) { + (void)poolId; // find all copyset with source metaserver as its leader as the candidate std::vector candidateInfos; for (auto &cInfo : topo_->GetCopySetInfosInMetaServer(source)) { diff --git a/curvefs/src/mds/schedule/recoverScheduler.cpp b/curvefs/src/mds/schedule/recoverScheduler.cpp index f3007c9e04..be4bc16d7c 100644 --- a/curvefs/src/mds/schedule/recoverScheduler.cpp +++ b/curvefs/src/mds/schedule/recoverScheduler.cpp @@ -71,7 +71,7 @@ int RecoverScheduler::Schedule() { // alarm if over half of the replicas are offline int deadBound = copysetInfo.peers.size() - (copysetInfo.peers.size() / 2 + 1); - if (offlinelists.size() > deadBound) { + if (static_cast(offlinelists.size()) > deadBound) { LOG(ERROR) << "recoverSchdeuler find " << copysetInfo.CopySetInfoStr() << " has " << offlinelists.size() diff --git a/curvefs/src/mds/topology/topology.cpp b/curvefs/src/mds/topology/topology.cpp index a26c85fc3b..9e3acbd181 100644 --- a/curvefs/src/mds/topology/topology.cpp +++ b/curvefs/src/mds/topology/topology.cpp @@ -300,8 +300,9 @@ TopoStatusCode TopologyImpl::UpdateServer(const Server &data) { } } -TopoStatusCode TopologyImpl::UpdateMetaServerOnlineState( - const OnlineState &onlineState, MetaServerIdType id) { +TopoStatusCode +TopologyImpl::UpdateMetaServerOnlineState(const OnlineState &onlineState, + MetaServerIdType id) { ReadLockGuard rlockMetaServerMap(metaServerMutex_); auto it = metaServerMap_.find(id); if (it != metaServerMap_.end()) { @@ -454,8 +455,8 @@ ZoneIdType TopologyImpl::FindZone(const std::string &zoneName, return static_cast(UNINITIALIZE_ID); } -ServerIdType TopologyImpl::FindServerByHostName( - const std::string &hostName) const { +ServerIdType +TopologyImpl::FindServerByHostName(const std::string &hostName) const { ReadLockGuard rlockServer(serverMutex_); for (auto it = serverMap_.begin(); it != serverMap_.end(); it++) { if (it->second.GetHostName() == hostName) { @@ -616,8 +617,9 @@ TopoStatusCode TopologyImpl::UpdatePartition(const Partition &data) { } } -TopoStatusCode TopologyImpl::UpdatePartitionStatistic( - uint32_t partitionId, PartitionStatistic statistic) { +TopoStatusCode +TopologyImpl::UpdatePartitionStatistic(uint32_t partitionId, + PartitionStatistic statistic) { WriteLockGuard wlockPartition(partitionMutex_); auto it = partitionMap_.find(partitionId); if (it != partitionMap_.end()) { @@ -686,8 +688,8 @@ std::list TopologyImpl::GetAvailableCopysetKeyList() const { ReadLockGuard rlockCopySet(copySetMutex_); std::list result; for (auto const &it : copySetMap_) { - if (it.second.GetPartitionNum() - >= option_.maxPartitionNumberInCopyset) { + if (it.second.GetPartitionNum() >= + option_.maxPartitionNumberInCopyset) { continue; } result.push_back(it.first); @@ -700,8 +702,8 @@ std::vector TopologyImpl::GetAvailableCopysetList() const { ReadLockGuard rlockCopySet(copySetMutex_); std::vector result; for (auto const &it : copySetMap_) { - if (it.second.GetPartitionNum() - >= option_.maxPartitionNumberInCopyset) { + if (it.second.GetPartitionNum() >= + option_.maxPartitionNumberInCopyset) { continue; } result.push_back(it.second); @@ -740,8 +742,8 @@ int TopologyImpl::GetAvailableCopysetNum() const { ReadLockGuard rlockCopySet(copySetMutex_); int num = 0; for (auto const &it : copySetMap_) { - if (it.second.GetPartitionNum() - >= option_.maxPartitionNumberInCopyset) { + if (it.second.GetPartitionNum() >= + option_.maxPartitionNumberInCopyset) { continue; } num++; @@ -749,8 +751,8 @@ int TopologyImpl::GetAvailableCopysetNum() const { return num; } -std::list TopologyImpl::GetPartitionOfFs( - FsIdType id, PartitionFilter filter) const { +std::list +TopologyImpl::GetPartitionOfFs(FsIdType id, PartitionFilter filter) const { std::list ret; ReadLockGuard rlockPartitionMap(partitionMutex_); for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { @@ -761,8 +763,9 @@ std::list TopologyImpl::GetPartitionOfFs( return ret; } -std::list TopologyImpl::GetPartitionInfosInPool( - PoolIdType poolId, PartitionFilter filter) const { +std::list +TopologyImpl::GetPartitionInfosInPool(PoolIdType poolId, + PartitionFilter filter) const { std::list ret; ReadLockGuard rlockPartitionMap(partitionMutex_); for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { @@ -773,8 +776,8 @@ std::list TopologyImpl::GetPartitionInfosInPool( return ret; } -std::list TopologyImpl::GetPartitionInfosInCopyset( - CopySetIdType copysetId) const { +std::list +TopologyImpl::GetPartitionInfosInCopyset(CopySetIdType copysetId) const { std::list ret; ReadLockGuard rlockPartitionMap(partitionMutex_); for (auto it = partitionMap_.begin(); it != partitionMap_.end(); it++) { @@ -786,8 +789,8 @@ std::list TopologyImpl::GetPartitionInfosInCopyset( } // getList -std::vector TopologyImpl::GetMetaServerInCluster( - MetaServerFilter filter) const { +std::vector +TopologyImpl::GetMetaServerInCluster(MetaServerFilter filter) const { std::vector ret; ReadLockGuard rlockMetaServerMap(metaServerMutex_); for (auto it = metaServerMap_.begin(); it != metaServerMap_.end(); it++) { @@ -799,8 +802,8 @@ std::vector TopologyImpl::GetMetaServerInCluster( return ret; } -std::vector TopologyImpl::GetServerInCluster( - ServerFilter filter) const { +std::vector +TopologyImpl::GetServerInCluster(ServerFilter filter) const { std::vector ret; ReadLockGuard rlockServer(serverMutex_); for (auto it = serverMap_.begin(); it != serverMap_.end(); it++) { @@ -811,8 +814,8 @@ std::vector TopologyImpl::GetServerInCluster( return ret; } -std::vector TopologyImpl::GetZoneInCluster( - ZoneFilter filter) const { +std::vector +TopologyImpl::GetZoneInCluster(ZoneFilter filter) const { std::vector ret; ReadLockGuard rlockZone(zoneMutex_); for (auto it = zoneMap_.begin(); it != zoneMap_.end(); it++) { @@ -823,8 +826,8 @@ std::vector TopologyImpl::GetZoneInCluster( return ret; } -std::vector TopologyImpl::GetPoolInCluster( - PoolFilter filter) const { +std::vector +TopologyImpl::GetPoolInCluster(PoolFilter filter) const { std::vector ret; ReadLockGuard rlockPool(poolMutex_); for (auto it = poolMap_.begin(); it != poolMap_.end(); it++) { @@ -835,8 +838,9 @@ std::vector TopologyImpl::GetPoolInCluster( return ret; } -std::list TopologyImpl::GetMetaServerInServer( - ServerIdType id, MetaServerFilter filter) const { +std::list +TopologyImpl::GetMetaServerInServer(ServerIdType id, + MetaServerFilter filter) const { std::list ret; ReadLockGuard rlockMetaServerMap(metaServerMutex_); for (auto it = metaServerMap_.begin(); it != metaServerMap_.end(); it++) { @@ -848,8 +852,9 @@ std::list TopologyImpl::GetMetaServerInServer( return ret; } -std::list TopologyImpl::GetMetaServerInZone( - ZoneIdType id, MetaServerFilter filter) const { +std::list +TopologyImpl::GetMetaServerInZone(ZoneIdType id, + MetaServerFilter filter) const { std::list ret; std::list serverList = GetServerInZone(id); for (ServerIdType s : serverList) { @@ -859,8 +864,9 @@ std::list TopologyImpl::GetMetaServerInZone( return ret; } -std::list TopologyImpl::GetMetaServerInPool( - PoolIdType id, MetaServerFilter filter) const { +std::list +TopologyImpl::GetMetaServerInPool(PoolIdType id, + MetaServerFilter filter) const { std::list ret; std::list zoneList = GetZoneInPool(id); for (ZoneIdType z : zoneList) { @@ -870,13 +876,13 @@ std::list TopologyImpl::GetMetaServerInPool( return ret; } -uint32_t TopologyImpl::GetMetaServerNumInPool( - PoolIdType id, MetaServerFilter filter) const { +uint32_t TopologyImpl::GetMetaServerNumInPool(PoolIdType id, + MetaServerFilter filter) const { return GetMetaServerInPool(id, filter).size(); } -std::list TopologyImpl::GetServerInZone( - ZoneIdType id, ServerFilter filter) const { +std::list +TopologyImpl::GetServerInZone(ZoneIdType id, ServerFilter filter) const { std::list ret; ReadLockGuard rlockServer(serverMutex_); for (auto it = serverMap_.begin(); it != serverMap_.end(); it++) { @@ -899,8 +905,8 @@ std::list TopologyImpl::GetZoneInPool(PoolIdType id, return ret; } -std::vector TopologyImpl::GetCopySetsInPool( - PoolIdType poolId, CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetsInPool(PoolIdType poolId, CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -911,14 +917,14 @@ std::vector TopologyImpl::GetCopySetsInPool( return ret; } -uint32_t TopologyImpl::GetCopySetNumInPool( - PoolIdType poolId, CopySetFilter filter) const { +uint32_t TopologyImpl::GetCopySetNumInPool(PoolIdType poolId, + CopySetFilter filter) const { return GetCopySetsInPool(poolId, filter).size(); } -std::vector TopologyImpl::GetCopySetsInCluster( - CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetsInCluster(CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -929,8 +935,9 @@ std::vector TopologyImpl::GetCopySetsInCluster( return ret; } -std::vector TopologyImpl::GetCopySetInfosInPool( - PoolIdType poolId, CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetInfosInPool(PoolIdType poolId, + CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -941,8 +948,9 @@ std::vector TopologyImpl::GetCopySetInfosInPool( return ret; } -std::vector TopologyImpl::GetCopySetsInMetaServer( - MetaServerIdType id, CopySetFilter filter) const { +std::vector +TopologyImpl::GetCopySetsInMetaServer(MetaServerIdType id, + CopySetFilter filter) const { std::vector ret; ReadLockGuard rlockCopySet(copySetMutex_); for (const auto &it : copySetMap_) { @@ -1052,8 +1060,9 @@ TopoStatusCode TopologyImpl::Init(const TopologyOption &option) { } // for upgrade and keep compatibility - // the old version have no partitionIndex in etcd, so need update here of upgrade // NOLINT - // if the fs in old cluster already delete some partitions, it is incompatible. // NOLINT + // the old version have no partitionIndex in etcd, so need update here of + // upgrade // NOLINT if the fs in old cluster already delete some + // partitions, it is incompatible. // NOLINT if (!RefreshPartitionIndexOfFS(partitionMap_)) { LOG(ERROR) << "[TopologyImpl::init], RefreshPartitionIndexOfFS fail."; return TopoStatusCode::TOPO_STORGE_FAIL; @@ -1278,8 +1287,8 @@ bool TopologyImpl::GetClusterInfo(ClusterInformation *info) { } // update partition tx, and ensure atomicity -TopoStatusCode TopologyImpl::UpdatePartitionTxIds( - std::vector txIds) { +TopoStatusCode +TopologyImpl::UpdatePartitionTxIds(std::vector txIds) { std::vector partitions; WriteLockGuard wlockPartition(partitionMutex_); for (auto item : txIds) { @@ -1380,20 +1389,20 @@ uint32_t TopologyImpl::GetLeaderNumInMetaserver(MetaServerIdType id) const { } void TopologyImpl::GetAvailableMetaserversUnlock( - std::vector* vec) { + std::vector *vec) { for (const auto &it : metaServerMap_) { - if (it.second.GetOnlineState() == OnlineState::ONLINE - && it.second.GetMetaServerSpace().IsMetaserverResourceAvailable() - && GetCopysetNumInMetaserver(it.first) - < option_.maxCopysetNumInMetaserver) { + if (it.second.GetOnlineState() == OnlineState::ONLINE && + it.second.GetMetaServerSpace().IsMetaserverResourceAvailable() && + GetCopysetNumInMetaserver(it.first) < + option_.maxCopysetNumInMetaserver) { vec->emplace_back(&(it.second)); } } } TopoStatusCode TopologyImpl::GenCandidateMapUnlock( - PoolIdType poolId, - std::map>* candidateMap) { + PoolIdType poolId, + std::map> *candidateMap) { // 1. get all online and available metaserver std::vector metaservers; GetAvailableMetaserversUnlock(&metaservers); @@ -1402,7 +1411,7 @@ TopoStatusCode TopologyImpl::GenCandidateMapUnlock( Server server; if (!GetServer(serverId, &server)) { LOG(ERROR) << "get server failed when choose metaservers," - << " the serverId = " << serverId; + << " the serverId = " << serverId; return TopoStatusCode::TOPO_SERVER_NOT_FOUND; } @@ -1418,8 +1427,8 @@ TopoStatusCode TopologyImpl::GenCandidateMapUnlock( } TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( - PoolIdType poolId, uint16_t replicaNum, - std::list* copysetList) { + PoolIdType poolId, uint16_t replicaNum, + std::list *copysetList) { // 1. genarate candidateMap std::map> candidateMap; auto ret = GenCandidateMapUnlock(poolId, &candidateMap); @@ -1432,8 +1441,9 @@ TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( // 2. return error if candidate map has no enough replicaNum if (candidateMap.size() < replicaNum) { LOG(WARNING) << "can not find available metaserver for copyset, " - << "poolId = " << poolId << " need replica num = " - << replicaNum << ", but only has available zone num = " + << "poolId = " << poolId + << " need replica num = " << replicaNum + << ", but only has available zone num = " << candidateMap.size(); return TopoStatusCode::TOPO_METASERVER_NOT_FOUND; } @@ -1461,13 +1471,13 @@ TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( std::shuffle(zoneIds.begin(), zoneIds.end(), randomGenerator); std::vector msIds; - for (int i = 0; i < minSize; i++) { + for (uint32_t i = 0; i < minSize; i++) { for (const auto &zoneId : zoneIds) { msIds.push_back(candidateMap[zoneId][i]); } } - for (int i = 0; i < msIds.size() / replicaNum; i++) { + for (size_t i = 0; i < msIds.size() / replicaNum; i++) { CopysetCreateInfo copysetInfo; copysetInfo.poolId = poolId; copysetInfo.copysetId = UNINITIALIZE_ID; @@ -1485,7 +1495,7 @@ TopoStatusCode TopologyImpl::GenCopysetAddrBatchForPool( // Check if there is no copy on the pool. // Generate copyset on the empty copyset pools. void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( - std::list* copysetList) { + std::list *copysetList) { for (const auto &it : poolMap_) { PoolIdType poolId = it.first; uint32_t metaserverNum = GetMetaServerNumInPool(poolId); @@ -1494,7 +1504,7 @@ void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( } uint32_t copysetNum = GetCopySetNumInPool(poolId); - if (copysetNum !=0) { + if (copysetNum != 0) { continue; } @@ -1505,17 +1515,16 @@ void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( continue; } std::list tempCopysetList; - TopoStatusCode ret = GenCopysetAddrBatchForPool(poolId, replicaNum, - &tempCopysetList); + TopoStatusCode ret = + GenCopysetAddrBatchForPool(poolId, replicaNum, &tempCopysetList); if (TopoStatusCode::TOPO_OK == ret) { LOG(INFO) << "Initial Generate copyset addr for pool " << poolId << " success, gen copyset num = " << tempCopysetList.size(); copysetList->splice(copysetList->end(), tempCopysetList); } else { - LOG(WARNING) << "Initial Generate copyset addr for pool " - << poolId << " fail, statusCode = " - << TopoStatusCode_Name(ret); + LOG(WARNING) << "Initial Generate copyset addr for pool " << poolId + << " fail, statusCode = " << TopoStatusCode_Name(ret); } } @@ -1529,10 +1538,10 @@ void TopologyImpl::GenCopysetIfPoolEmptyUnlocked( // 3. according to the pool order of step 2, generate copyset add in the pool // in turn until enough copyset add is generated TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( - uint32_t needCreateNum, std::list* copysetList) { + uint32_t needCreateNum, std::list *copysetList) { LOG(INFO) << "GenSubsequentCopysetAddrBatch needCreateNum = " - << needCreateNum << ", copysetList size = " - << copysetList->size() << " begin"; + << needCreateNum << ", copysetList size = " << copysetList->size() + << " begin"; MetaServerFilter filter = [](const MetaServer &ms) { return ms.GetOnlineState() == OnlineState::ONLINE; @@ -1546,8 +1555,8 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( } // sort pool list by copyset average num - std::sort(poolList.begin(), poolList.end(), - [=](const Pool& a, const Pool& b) { + std::sort( + poolList.begin(), poolList.end(), [=](const Pool &a, const Pool &b) { PoolIdType poolId1 = a.GetId(); PoolIdType poolId2 = b.GetId(); uint32_t copysetNum1 = GetCopySetNumInPool(poolId1); @@ -1564,8 +1573,8 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( PoolIdType poolId = it->GetId(); uint16_t replicaNum = it->GetReplicaNum(); std::list tempCopysetList; - TopoStatusCode ret = GenCopysetAddrBatchForPool(poolId, - replicaNum, &tempCopysetList); + TopoStatusCode ret = GenCopysetAddrBatchForPool(poolId, replicaNum, + &tempCopysetList); if (TopoStatusCode::TOPO_OK == ret) { copysetList->splice(copysetList->end(), tempCopysetList); if (copysetList->size() >= needCreateNum) { @@ -1573,9 +1582,10 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( } it++; } else { - LOG(WARNING) << "Generate " << needCreateNum - << " copyset addr for pool " << poolId - << "fail, statusCode = " << TopoStatusCode_Name(ret); + LOG(WARNING) + << "Generate " << needCreateNum << " copyset addr for pool " + << poolId + << "fail, statusCode = " << TopoStatusCode_Name(ret); it = poolList.erase(it); } } @@ -1596,8 +1606,9 @@ TopoStatusCode TopologyImpl::GenSubsequentCopysetAddrBatchUnlocked( // in this step is enough, return the list. // 2. Sort the pools according to the average number of copies, // and traverse each pool to create copies until the number is sufficient. -TopoStatusCode TopologyImpl::GenCopysetAddrBatch(uint32_t needCreateNum, - std::list* copysetList) { +TopoStatusCode +TopologyImpl::GenCopysetAddrBatch(uint32_t needCreateNum, + std::list *copysetList) { ReadLockGuard rlockPool(poolMutex_); ReadLockGuard rlockMetaserver(metaServerMutex_); ReadLockGuard rlockCopyset(copySetMutex_); @@ -1680,14 +1691,14 @@ bool TopologyImpl::RefreshPartitionIndexOfFS( std::list TopologyImpl::ListMemcacheServers() const { ReadLockGuard rlockMemcacheCluster(memcacheClusterMutex_); std::list ret; - for (auto const& cluster : memcacheClusterMap_) { - auto const& servers = cluster.second.GetServers(); + for (auto const &cluster : memcacheClusterMap_) { + auto const &servers = cluster.second.GetServers(); ret.insert(ret.begin(), servers.cbegin(), servers.cend()); } return ret; } -TopoStatusCode TopologyImpl::AddMemcacheCluster(const MemcacheCluster& data) { +TopoStatusCode TopologyImpl::AddMemcacheCluster(const MemcacheCluster &data) { WriteLockGuard wlockMemcacheCluster(memcacheClusterMutex_); // storage_ to storage TopoStatusCode ret = TopoStatusCode::TOPO_OK; @@ -1700,7 +1711,7 @@ TopoStatusCode TopologyImpl::AddMemcacheCluster(const MemcacheCluster& data) { return ret; } -TopoStatusCode TopologyImpl::AddMemcacheCluster(MemcacheCluster&& data) { +TopoStatusCode TopologyImpl::AddMemcacheCluster(MemcacheCluster &&data) { WriteLockGuard wlockMemcacheCluster(memcacheClusterMutex_); // storage_ to storage TopoStatusCode ret = TopoStatusCode::TOPO_OK; @@ -1716,14 +1727,15 @@ TopoStatusCode TopologyImpl::AddMemcacheCluster(MemcacheCluster&& data) { std::list TopologyImpl::ListMemcacheClusters() const { std::list ret; ReadLockGuard rlockMemcacheCluster(memcacheClusterMutex_); - for (auto const& cluster : memcacheClusterMap_) { + for (auto const &cluster : memcacheClusterMap_) { ret.emplace_back(cluster.second); } return ret; } -TopoStatusCode TopologyImpl::AllocOrGetMemcacheCluster( - FsIdType fsId, MemcacheClusterInfo* cluster) { +TopoStatusCode +TopologyImpl::AllocOrGetMemcacheCluster(FsIdType fsId, + MemcacheClusterInfo *cluster) { TopoStatusCode ret = TopoStatusCode::TOPO_OK; WriteLockGuard wlockFs2MemcacheCluster(fs2MemcacheClusterMutex_); ReadLockGuard rlockMemcacheCluster(memcacheClusterMutex_); diff --git a/curvefs/src/mds/topology/topology_item.cpp b/curvefs/src/mds/topology/topology_item.cpp index e62466b32b..b1774b701a 100644 --- a/curvefs/src/mds/topology/topology_item.cpp +++ b/curvefs/src/mds/topology/topology_item.cpp @@ -24,6 +24,7 @@ #include #include +#include #include "json/json.h" #include "src/common/string_util.h" @@ -53,11 +54,15 @@ bool ClusterInformation::ParseFromString(const std::string &value) { bool Pool::TransRedundanceAndPlaceMentPolicyFromJsonStr( const std::string &jsonStr, RedundanceAndPlaceMentPolicy *rap) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value rapJson; - if (!reader.parse(jsonStr, rapJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + &rapJson, &errormsg)) { return false; } + if (!rapJson["replicaNum"].isNull()) { rap->replicaNum = rapJson["replicaNum"].asInt(); } else { @@ -204,13 +209,17 @@ std::string CopySetInfo::GetCopySetMembersStr() const { } bool CopySetInfo::SetCopySetMembersByJson(const std::string &jsonStr) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value copysetMemJson; - if (!reader.parse(jsonStr, copysetMemJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + ©setMemJson, &errormsg)) { return false; } + peers_.clear(); - for (int i = 0; i < copysetMemJson.size(); i++) { + for (uint32_t i = 0; i < copysetMemJson.size(); i++) { if (copysetMemJson[i].isInt()) { peers_.insert(copysetMemJson[i].asInt()); } else { @@ -299,14 +308,14 @@ common::PartitionInfo Partition::ToPartitionInfo() { return info; } -bool MemcacheCluster::ParseFromString(const std::string& value) { +bool MemcacheCluster::ParseFromString(const std::string &value) { MemcacheClusterInfo data; bool ret = data.ParseFromString(value); (*this) = static_cast(data); return ret; } -bool MemcacheCluster::SerializeToString(std::string* value) const { +bool MemcacheCluster::SerializeToString(std::string *value) const { return static_cast(*this).SerializeToString(value); } diff --git a/curvefs/src/mds/topology/topology_manager.cpp b/curvefs/src/mds/topology/topology_manager.cpp index 00448ffd02..c546857bbc 100644 --- a/curvefs/src/mds/topology/topology_manager.cpp +++ b/curvefs/src/mds/topology/topology_manager.cpp @@ -76,13 +76,13 @@ void TopologyManager::RegistMetaServer(const MetaServerRegistRequest *request, response->set_metaserverid(ms.GetId()); response->set_token(ms.GetToken()); LOG(WARNING) << "Received duplicated registMetaServer message, " - << "metaserver is empty, hostip = " - << hostIp << ", port = " << port; + << "metaserver is empty, hostip = " << hostIp + << ", port = " << port; } else { response->set_statuscode(TopoStatusCode::TOPO_METASERVER_EXIST); LOG(ERROR) << "Received duplicated registMetaServer message, " - << "metaserver is not empty, hostip = " - << hostIp << ", port = " << port; + << "metaserver is not empty, hostip = " << hostIp + << ", port = " << port; } return; @@ -577,6 +577,7 @@ void TopologyManager::GetPool(const GetPoolRequest *request, void TopologyManager::ListPool(const ListPoolRequest *request, ListPoolResponse *response) { + (void)request; response->set_statuscode(TopoStatusCode::TOPO_OK); auto poolList = topology_->GetPoolInCluster(); for (PoolIdType id : poolList) { @@ -598,15 +599,17 @@ void TopologyManager::ListPool(const ListPoolRequest *request, } } -TopoStatusCode TopologyManager::CreatePartitionsAndGetMinPartition( - FsIdType fsId, PartitionInfo *partition) { +TopoStatusCode +TopologyManager::CreatePartitionsAndGetMinPartition(FsIdType fsId, + PartitionInfo *partition) { CreatePartitionRequest request; CreatePartitionResponse response; request.set_fsid(fsId); request.set_count(option_.createPartitionNumber); CreatePartitions(&request, &response); if (TopoStatusCode::TOPO_OK != response.statuscode() || - response.partitioninfolist_size() != request.count()) { + response.partitioninfolist_size() != + static_cast(request.count())) { return TopoStatusCode::TOPO_CREATE_PARTITION_FAIL; } // return the min one @@ -629,9 +632,8 @@ TopoStatusCode TopologyManager::CreatePartitionsAndGetMinPartition( return TopoStatusCode::TOPO_OK; } -TopoStatusCode TopologyManager::CreatePartitionOnCopyset(FsIdType fsId, - const CopySetInfo& copyset, - PartitionInfo *info) { +TopoStatusCode TopologyManager::CreatePartitionOnCopyset( + FsIdType fsId, const CopySetInfo ©set, PartitionInfo *info) { // get copyset members std::set copysetMembers = copyset.GetCopySetMembers(); std::set copysetMemberAddr; @@ -639,7 +641,7 @@ TopoStatusCode TopologyManager::CreatePartitionOnCopyset(FsIdType fsId, MetaServer metaserver; if (topology_->GetMetaServer(item, &metaserver)) { std::string addr = metaserver.GetInternalIp() + ":" + - std::to_string(metaserver.GetInternalPort()); + std::to_string(metaserver.GetInternalPort()); copysetMemberAddr.emplace(addr); } else { LOG(WARNING) << "Get metaserver info failed."; @@ -662,24 +664,23 @@ TopoStatusCode TopologyManager::CreatePartitionOnCopyset(FsIdType fsId, << ", " << copysetId << "), partitionId = " << partitionId << ", start = " << idStart << ", end = " << idEnd; - FSStatusCode retcode = metaserverClient_->CreatePartition( - fsId, poolId, copysetId, partitionId, idStart, idEnd, - copysetMemberAddr); + FSStatusCode retcode = + metaserverClient_->CreatePartition(fsId, poolId, copysetId, partitionId, + idStart, idEnd, copysetMemberAddr); if (FSStatusCode::OK != retcode) { LOG(ERROR) << "CreatePartition failed, " - << "fsId = " << fsId << ", poolId = " << poolId - << ", copysetId = " << copysetId - << ", partitionId = " << partitionId; + << "fsId = " << fsId << ", poolId = " << poolId + << ", copysetId = " << copysetId + << ", partitionId = " << partitionId; return TopoStatusCode::TOPO_CREATE_PARTITION_FAIL; } - Partition partition(fsId, poolId, copysetId, partitionId, idStart, - idEnd); + Partition partition(fsId, poolId, copysetId, partitionId, idStart, idEnd); TopoStatusCode ret = topology_->AddPartition(partition); if (TopoStatusCode::TOPO_OK != ret) { // TODO(wanghai): delete partition on metaserver LOG(ERROR) << "Add partition failed after create partition." - << " error code = " << ret; + << " error code = " << ret; return ret; } @@ -705,7 +706,7 @@ void TopologyManager::CreatePartitions(const CreatePartitionRequest *request, // get lock and avoid multiMountpoint create concurrently NameLockGuard lock(createPartitionMutex_, std::to_string(fsId)); - while (partitionInfoList->size() < count) { + while (partitionInfoList->size() < static_cast(count)) { int32_t createNum = count - topology_->GetAvailableCopysetNum(); // if available copyset is not enough, create copyset first if (createNum > 0) { @@ -718,28 +719,28 @@ void TopologyManager::CreatePartitions(const CreatePartitionRequest *request, } std::vector copysetVec = - topology_->GetAvailableCopysetList(); + topology_->GetAvailableCopysetList(); if (copysetVec.size() == 0) { LOG(ERROR) << "Get available copyset fail when create partition."; response->set_statuscode( - TopoStatusCode::TOPO_GET_AVAILABLE_COPYSET_ERROR); + TopoStatusCode::TOPO_GET_AVAILABLE_COPYSET_ERROR); return; } // sort copysetVec by partition num desent std::sort(copysetVec.begin(), copysetVec.end(), - [](const CopySetInfo& a, const CopySetInfo& b) { - return a.GetPartitionNum() < b.GetPartitionNum(); - }); + [](const CopySetInfo &a, const CopySetInfo &b) { + return a.GetPartitionNum() < b.GetPartitionNum(); + }); uint32_t copysetNum = copysetVec.size(); - int32_t tempCount = std::min(copysetNum, - count - partitionInfoList->size()); + int32_t tempCount = + std::min(copysetNum, count - partitionInfoList->size()); for (int i = 0; i < tempCount; i++) { PartitionInfo *info = partitionInfoList->Add(); - TopoStatusCode ret = CreatePartitionOnCopyset(fsId, - copysetVec[i], info); + TopoStatusCode ret = + CreatePartitionOnCopyset(fsId, copysetVec[i], info); if (ret != TopoStatusCode::TOPO_OK) { LOG(ERROR) << "create partition on copyset fail, fsId = " << fsId << ", poolId = " << copysetVec[i].GetPoolId() @@ -764,9 +765,8 @@ TopoStatusCode TopologyManager::DeletePartition(uint32_t partitionId) { return TopoStatusCode::TOPO_OK; } -void TopologyManager::DeletePartition( - const DeletePartitionRequest *request, - DeletePartitionResponse *response) { +void TopologyManager::DeletePartition(const DeletePartitionRequest *request, + DeletePartitionResponse *response) { uint32_t partitionId = request->partitionid(); Partition partition; if (!topology_->GetPartition(partitionId, &partition)) { @@ -795,15 +795,14 @@ void TopologyManager::DeletePartition( return; } - auto fret = metaserverClient_->DeletePartition(poolId, copysetId, - partitionId, copysetMemberAddr); + auto fret = metaserverClient_->DeletePartition( + poolId, copysetId, partitionId, copysetMemberAddr); if (fret == FSStatusCode::OK || fret == FSStatusCode::UNDER_DELETING) { - ret = topology_->UpdatePartitionStatus( - partitionId, PartitionStatus::DELETING); + ret = topology_->UpdatePartitionStatus(partitionId, + PartitionStatus::DELETING); if (ret != TopoStatusCode::TOPO_OK) { LOG(ERROR) << "DeletePartition failed, partitionId = " - << partitionId << ", ret = " - << TopoStatusCode_Name(ret); + << partitionId << ", ret = " << TopoStatusCode_Name(ret); } response->set_statuscode(ret); return; @@ -869,8 +868,8 @@ TopoStatusCode TopologyManager::CreateEnoughCopyset(int32_t createNum) { return TopoStatusCode::TOPO_OK; } -TopoStatusCode TopologyManager::CreateCopyset( - const CopysetCreateInfo ©set) { +TopoStatusCode +TopologyManager::CreateCopyset(const CopysetCreateInfo ©set) { LOG(INFO) << "Create new copyset: " << copyset.ToString(); // translate metaserver id to metaserver addr std::set metaServerAddrs; @@ -886,9 +885,8 @@ TopoStatusCode TopologyManager::CreateCopyset( } } - if (TopoStatusCode::TOPO_OK != - topology_->AddCopySetCreating( - CopySetKey(copyset.poolId, copyset.copysetId))) { + if (TopoStatusCode::TOPO_OK != topology_->AddCopySetCreating(CopySetKey( + copyset.poolId, copyset.copysetId))) { LOG(WARNING) << "the copyset key = (" << copyset.poolId << ", " << copyset.copysetId << ") is already creating."; } @@ -918,8 +916,8 @@ TopoStatusCode TopologyManager::CreateCopyset( return TopoStatusCode::TOPO_OK; } -TopoStatusCode TopologyManager::CommitTxId( - const std::vector& txIds) { +TopoStatusCode +TopologyManager::CommitTxId(const std::vector &txIds) { if (txIds.size() == 0) { return TopoStatusCode::TOPO_OK; } @@ -1004,7 +1002,7 @@ void TopologyManager::GetLatestPartitionsTxId( for (auto iter = txIds.begin(); iter != txIds.end(); iter++) { Partition out; - topology_ ->GetPartition(iter->partitionid(), &out); + topology_->GetPartition(iter->partitionid(), &out); if (out.GetTxId() != iter->txid()) { PartitionTxId tmp; tmp.set_partitionid(iter->partitionid()); @@ -1015,7 +1013,7 @@ void TopologyManager::GetLatestPartitionsTxId( } void TopologyManager::ListPartitionOfFs(FsIdType fsId, - std::list* list) { + std::list *list) { for (auto &partition : topology_->GetPartitionOfFs(fsId)) { list->emplace_back(partition.ToPartitionInfo()); } @@ -1060,9 +1058,10 @@ void TopologyManager::GetCopysetOfPartition( response->set_statuscode(TopoStatusCode::TOPO_OK); } -TopoStatusCode TopologyManager::GetCopysetMembers( - const PoolIdType poolId, const CopySetIdType copysetId, - std::set *addrs) { +TopoStatusCode +TopologyManager::GetCopysetMembers(const PoolIdType poolId, + const CopySetIdType copysetId, + std::set *addrs) { CopySetKey key(poolId, copysetId); CopySetInfo info; if (topology_->GetCopySet(key, &info)) { @@ -1086,9 +1085,9 @@ TopoStatusCode TopologyManager::GetCopysetMembers( return TopoStatusCode::TOPO_OK; } -void TopologyManager::GetCopysetInfo(const uint32_t& poolId, - const uint32_t& copysetId, - CopysetValue* copysetValue) { +void TopologyManager::GetCopysetInfo(const uint32_t &poolId, + const uint32_t ©setId, + CopysetValue *copysetValue) { // default is ok, when find error set to error code copysetValue->set_statuscode(TopoStatusCode::TOPO_OK); CopySetKey key(poolId, copysetId); @@ -1098,10 +1097,10 @@ void TopologyManager::GetCopysetInfo(const uint32_t& poolId, valueCopysetInfo->set_poolid(info.GetPoolId()); valueCopysetInfo->set_copysetid(info.GetId()); // set peers - for (auto const& msId : info.GetCopySetMembers()) { + for (auto const &msId : info.GetCopySetMembers()) { MetaServer ms; if (topology_->GetMetaServer(msId, &ms)) { - common::Peer* peer = valueCopysetInfo->add_peers(); + common::Peer *peer = valueCopysetInfo->add_peers(); peer->set_id(ms.GetId()); peer->set_address(BuildPeerIdWithIpPort(ms.GetInternalIp(), ms.GetInternalPort())); @@ -1133,12 +1132,12 @@ void TopologyManager::GetCopysetInfo(const uint32_t& poolId, valueCopysetInfo->set_allocated_leaderpeer(peer); // set partitioninfolist - for (auto const& i : info.GetPartitionIds()) { + for (auto const &i : info.GetPartitionIds()) { Partition tmp; if (!topology_->GetPartition(i, &tmp)) { - LOG(WARNING) << "poolId=" << poolId - << " copysetid=" << copysetId - << " has pattition error, partitionId=" << i; + LOG(WARNING) + << "poolId=" << poolId << " copysetid=" << copysetId + << " has pattition error, partitionId=" << i; copysetValue->set_statuscode( TopoStatusCode::TOPO_PARTITION_NOT_FOUND); } else { @@ -1167,9 +1166,9 @@ void TopologyManager::GetCopysetInfo(const uint32_t& poolId, } } -void TopologyManager::GetCopysetsInfo(const GetCopysetsInfoRequest* request, - GetCopysetsInfoResponse* response) { - for (auto const& i : request->copysetkeys()) { +void TopologyManager::GetCopysetsInfo(const GetCopysetsInfoRequest *request, + GetCopysetsInfoResponse *response) { + for (auto const &i : request->copysetkeys()) { GetCopysetInfo(i.poolid(), i.copysetid(), response->add_copysetvalues()); } @@ -1185,10 +1184,10 @@ void TopologyManager::ListCopysetsInfo(ListCopysetInfoResponse *response) { valueCopysetInfo->set_poolid(i.GetPoolId()); valueCopysetInfo->set_copysetid(i.GetId()); // set peers - for (auto const& msId : i.GetCopySetMembers()) { + for (auto const &msId : i.GetCopySetMembers()) { MetaServer ms; if (topology_->GetMetaServer(msId, &ms)) { - common::Peer* peer = valueCopysetInfo->add_peers(); + common::Peer *peer = valueCopysetInfo->add_peers(); peer->set_id(ms.GetId()); peer->set_address(BuildPeerIdWithIpPort(ms.GetInternalIp(), ms.GetInternalPort())); @@ -1220,12 +1219,12 @@ void TopologyManager::ListCopysetsInfo(ListCopysetInfoResponse *response) { valueCopysetInfo->set_allocated_leaderpeer(peer); // set partitioninfolist - for (auto const& j : i.GetPartitionIds()) { + for (auto const &j : i.GetPartitionIds()) { Partition tmp; if (!topology_->GetPartition(j, &tmp)) { - LOG(WARNING) << "poolId=" << i.GetPoolId() - << " copysetid=" << i.GetId() - << " has pattition error, partitionId=" << j; + LOG(WARNING) + << "poolId=" << i.GetPoolId() << " copysetid=" << i.GetId() + << " has pattition error, partitionId=" << j; copysetValue->set_statuscode( TopoStatusCode::TOPO_PARTITION_NOT_FOUND); } else { @@ -1259,10 +1258,10 @@ void TopologyManager::GetTopology(ListTopologyResponse *response) { ListMetaserverOfCluster(response->mutable_metaservers()); } -void TopologyManager::ListZone(ListZoneResponse* response) { +void TopologyManager::ListZone(ListZoneResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); auto zoneIdVec = topology_->GetZoneInCluster(); - for (auto const& zoneId : zoneIdVec) { + for (auto const &zoneId : zoneIdVec) { Zone zone; if (topology_->GetZone(zoneId, &zone)) { auto zoneInfo = response->add_zoneinfos(); @@ -1278,10 +1277,10 @@ void TopologyManager::ListZone(ListZoneResponse* response) { } } -void TopologyManager::ListServer(ListServerResponse* response) { +void TopologyManager::ListServer(ListServerResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); auto serverIdVec = topology_->GetServerInCluster(); - for (auto const& serverId : serverIdVec) { + for (auto const &serverId : serverIdVec) { Server server; if (topology_->GetServer(serverId, &server)) { auto serverInfo = response->add_serverinfos(); @@ -1303,13 +1302,13 @@ void TopologyManager::ListServer(ListServerResponse* response) { } void TopologyManager::ListMetaserverOfCluster( - ListMetaServerResponse* response) { + ListMetaServerResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); auto metaserverIdList = topology_->GetMetaServerInCluster(); - for (auto const& id : metaserverIdList) { + for (auto const &id : metaserverIdList) { MetaServer ms; if (topology_->GetMetaServer(id, &ms)) { - MetaServerInfo* msInfo = response->add_metaserverinfos(); + MetaServerInfo *msInfo = response->add_metaserverinfos(); msInfo->set_metaserverid(ms.GetId()); msInfo->set_hostname(ms.GetHostName()); msInfo->set_internalip(ms.GetInternalIp()); @@ -1328,14 +1327,15 @@ void TopologyManager::ListMetaserverOfCluster( } } -TopoStatusCode TopologyManager::UpdatePartitionStatus( - PartitionIdType partitionId, PartitionStatus status) { +TopoStatusCode +TopologyManager::UpdatePartitionStatus(PartitionIdType partitionId, + PartitionStatus status) { return topology_->UpdatePartitionStatus(partitionId, status); } void TopologyManager::RegistMemcacheCluster( - const RegistMemcacheClusterRequest* request, - RegistMemcacheClusterResponse* response) { + const RegistMemcacheClusterRequest *request, + RegistMemcacheClusterResponse *response) { response->set_statuscode(TopoStatusCode::TOPO_OK); // register memcacheCluster as server WriteLockGuard lock(registMemcacheClusterMutex_); @@ -1345,7 +1345,7 @@ void TopologyManager::RegistMemcacheCluster( MemcacheCluster mCluster( 0, std::list(request->servers().begin(), request->servers().end())); - for (auto const& cluster : clusterList) { + for (auto const &cluster : clusterList) { mCluster.SetId(cluster.GetId()); if (cluster == mCluster) { // has registered memcache cluster @@ -1357,10 +1357,8 @@ void TopologyManager::RegistMemcacheCluster( // Guarantee the uniqueness of memcacheServer std::list serverRegisted = topology_->ListMemcacheServers(); std::list serverList; - for (auto const& server : request->servers()) { - auto cmp = [server](const MemcacheServer& ms) { - return ms == server; - }; + for (auto const &server : request->servers()) { + auto cmp = [server](const MemcacheServer &ms) { return ms == server; }; if (std::find_if(serverRegisted.begin(), serverRegisted.end(), cmp) != serverRegisted.end()) { LOG(ERROR) << "Regist MemcacheCluster failed! Server[" @@ -1388,11 +1386,11 @@ void TopologyManager::RegistMemcacheCluster( } void TopologyManager::ListMemcacheCluster( - ListMemcacheClusterResponse* response) { + ListMemcacheClusterResponse *response) { std::list clusterList = topology_->ListMemcacheClusters(); if (!clusterList.empty()) { response->set_statuscode(TopoStatusCode::TOPO_OK); - for (auto& cluster : clusterList) { + for (auto &cluster : clusterList) { (*response->add_memcacheclusters()) = std::move(cluster); } } else { @@ -1402,8 +1400,8 @@ void TopologyManager::ListMemcacheCluster( } void TopologyManager::AllocOrGetMemcacheCluster( - const AllocOrGetMemcacheClusterRequest* request, - AllocOrGetMemcacheClusterResponse* response) { + const AllocOrGetMemcacheClusterRequest *request, + AllocOrGetMemcacheClusterResponse *response) { auto statusCode = topology_->AllocOrGetMemcacheCluster( request->fsid(), response->mutable_cluster()); response->set_statuscode(statusCode); diff --git a/curvefs/src/mds/topology/topology_metric.cpp b/curvefs/src/mds/topology/topology_metric.cpp index 592d8d51b0..042c2896bd 100644 --- a/curvefs/src/mds/topology/topology_metric.cpp +++ b/curvefs/src/mds/topology/topology_metric.cpp @@ -40,8 +40,11 @@ std::map gFsMetrics; void TopologyMetricService::UpdateTopologyMetrics() { // process metaserver - std::vector metaservers = topo_->GetMetaServerInCluster( - [](const MetaServer &ms) { return true; }); + std::vector metaservers = + topo_->GetMetaServerInCluster([](const MetaServer &ms) { + (void)ms; + return true; + }); for (auto msId : metaservers) { auto it = gMetaServerMetrics.find(msId); @@ -103,10 +106,10 @@ void TopologyMetricService::UpdateTopologyMetrics() { auto fileType2InodeNum = pit->GetFileType2InodeNum(); auto itFsId2FileType2InodeNum = fsId2FileType2InodeNum.find(fsId); if (itFsId2FileType2InodeNum == fsId2FileType2InodeNum.end()) { - fsId2FileType2InodeNum.emplace( - fsId, std::move(fileType2InodeNum)); + fsId2FileType2InodeNum.emplace(fsId, + std::move(fileType2InodeNum)); } else { - for (auto const& fileType2Inode : fileType2InodeNum) { + for (auto const &fileType2Inode : fileType2InodeNum) { auto itFileType2InodeNum = itFsId2FileType2InodeNum->second.find( fileType2Inode.first); @@ -202,7 +205,7 @@ void TopologyMetricService::UpdateTopologyMetrics() { } // set fsId2FileType2InodeNum metric - for (auto const& fsId2FileType2InodeNumPair : fsId2FileType2InodeNum) { + for (auto const &fsId2FileType2InodeNumPair : fsId2FileType2InodeNum) { auto it = gFsMetrics.find(fsId2FileType2InodeNumPair.first); if (it == gFsMetrics.end()) { FsMetricPtr cptr(new FsMetric(fsId2FileType2InodeNumPair.first)); @@ -211,7 +214,7 @@ void TopologyMetricService::UpdateTopologyMetrics() { .first; } // set according to fstype - for (auto const& fileType2InodeNumPair : + for (auto const &fileType2InodeNumPair : fsId2FileType2InodeNumPair.second) { auto it2 = it->second->fileType2InodeNum_.find( fileType2InodeNumPair.first); // find file type diff --git a/curvefs/src/mds/topology/topology_service.cpp b/curvefs/src/mds/topology/topology_service.cpp index 6205b440e4..590253afa7 100644 --- a/curvefs/src/mds/topology/topology_service.cpp +++ b/curvefs/src/mds/topology/topology_service.cpp @@ -755,6 +755,8 @@ void TopologyServiceImpl::StatMetadataUsage( const ::curvefs::mds::topology::StatMetadataUsageRequest* request, ::curvefs::mds::topology::StatMetadataUsageResponse* response, ::google::protobuf::Closure* done) { + (void)controller; + (void)request; brpc::ClosureGuard guard(done); LOG(INFO) << "start to state metadata usage."; topologyManager_->GetMetaServersSpace(response->mutable_metadatausages()); diff --git a/curvefs/src/mds/topology/topology_storge_etcd.cpp b/curvefs/src/mds/topology/topology_storge_etcd.cpp index 2b3708bb36..b59eb23a09 100644 --- a/curvefs/src/mds/topology/topology_storge_etcd.cpp +++ b/curvefs/src/mds/topology/topology_storge_etcd.cpp @@ -490,8 +490,8 @@ bool TopologyStorageEtcd::UpdatePartitions( OpType::OpPut, const_cast(keys[i].data()), const_cast(values[i].data()), - keys[i].size(), - values[i].size() + static_cast(keys[i].size()), + static_cast(values[i].size()) }; ops.emplace_back(op); } diff --git a/curvefs/src/metaserver/copyset/meta_operator.cpp b/curvefs/src/metaserver/copyset/meta_operator.cpp index ef88f4a601..63fa531a19 100644 --- a/curvefs/src/metaserver/copyset/meta_operator.cpp +++ b/curvefs/src/metaserver/copyset/meta_operator.cpp @@ -37,8 +37,8 @@ #include "curvefs/src/metaserver/streaming_utils.h" #include "src/common/timeutility.h" -static bvar::LatencyRecorder g_concurrent_fast_apply_wait_latency( - "concurrent_fast_apply_wait"); +static bvar::LatencyRecorder + g_concurrent_fast_apply_wait_latency("concurrent_fast_apply_wait"); namespace curvefs { @@ -77,9 +77,7 @@ void MetaOperator::Propose() { } } -void MetaOperator::RedirectRequest() { - Redirect(); -} +void MetaOperator::RedirectRequest() { Redirect(); } bool MetaOperator::ProposeTask() { timerPropose.start(); @@ -115,67 +113,66 @@ void MetaOperator::FastApplyTask() { } bool GetInodeOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool ListDentryOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool BatchGetInodeAttrOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool BatchGetXAttrOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool GetDentryOperator::CanBypassPropose() const { - auto* req = static_cast(request_); + auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } bool GetVolumeExtentOperator::CanBypassPropose() const { - const auto* req = static_cast(request_); + const auto *req = static_cast(request_); return req->has_appliedindex() && node_->GetAppliedIndex() >= req->appliedindex(); } -#define OPERATOR_ON_APPLY(TYPE) \ - void TYPE##Operator::OnApply(int64_t index, \ - google::protobuf::Closure* done, \ - uint64_t startTimeUs) { \ - brpc::ClosureGuard doneGuard(done); \ - uint64_t timeUs = TimeUtility::GetTimeofDayUs(); \ - node_->GetMetric()->WaitInQueueLatency( \ - OperatorType::TYPE, timeUs - startTimeUs); \ - auto status = node_->GetMetaStore()->TYPE( \ - static_cast(request_), \ - static_cast(response_)); \ - uint64_t executeTime = TimeUtility::GetTimeofDayUs() - timeUs; \ - node_->GetMetric()->ExecuteLatency( \ - OperatorType::TYPE, executeTime); \ - if (status == MetaStatusCode::OK) { \ - node_->UpdateAppliedIndex(index); \ - static_cast(response_)->set_appliedindex( \ - std::max(index, node_->GetAppliedIndex())); \ - node_->GetMetric()->OnOperatorComplete( \ - OperatorType::TYPE, \ - TimeUtility::GetTimeofDayUs() - startTimeUs, true); \ - } else { \ - node_->GetMetric()->OnOperatorComplete( \ - OperatorType::TYPE, \ - TimeUtility::GetTimeofDayUs() - startTimeUs, false); \ - } \ +#define OPERATOR_ON_APPLY(TYPE) \ + void TYPE##Operator::OnApply(int64_t index, \ + google::protobuf::Closure *done, \ + uint64_t startTimeUs) { \ + brpc::ClosureGuard doneGuard(done); \ + uint64_t timeUs = TimeUtility::GetTimeofDayUs(); \ + node_->GetMetric()->WaitInQueueLatency(OperatorType::TYPE, \ + timeUs - startTimeUs); \ + auto status = node_->GetMetaStore()->TYPE( \ + static_cast(request_), \ + static_cast(response_)); \ + uint64_t executeTime = TimeUtility::GetTimeofDayUs() - timeUs; \ + node_->GetMetric()->ExecuteLatency(OperatorType::TYPE, executeTime); \ + if (status == MetaStatusCode::OK) { \ + node_->UpdateAppliedIndex(index); \ + static_cast(response_)->set_appliedindex( \ + std::max(index, node_->GetAppliedIndex())); \ + node_->GetMetric()->OnOperatorComplete( \ + OperatorType::TYPE, \ + TimeUtility::GetTimeofDayUs() - startTimeUs, true); \ + } else { \ + node_->GetMetric()->OnOperatorComplete( \ + OperatorType::TYPE, \ + TimeUtility::GetTimeofDayUs() - startTimeUs, false); \ + } \ } OPERATOR_ON_APPLY(GetDentry); @@ -193,7 +190,7 @@ OPERATOR_ON_APPLY(CreateManageInode); OPERATOR_ON_APPLY(CreatePartition); OPERATOR_ON_APPLY(DeletePartition); OPERATOR_ON_APPLY(PrepareRenameTx); -OPERATOR_ON_APPLY(UpdateVolumeExtent);; +OPERATOR_ON_APPLY(UpdateVolumeExtent); #undef OPERATOR_ON_APPLY @@ -201,11 +198,11 @@ OPERATOR_ON_APPLY(UpdateVolumeExtent);; // so we redefine OnApply() and OnApplyFromLog() instead of using macro. // It may not be an elegant implementation, can you provide a better idea? void GetOrModifyS3ChunkInfoOperator::OnApply(int64_t index, - google::protobuf::Closure* done, + google::protobuf::Closure *done, uint64_t startTimeUs) { MetaStatusCode rc; - auto request = static_cast(request_); - auto response = static_cast(response_); + auto request = static_cast(request_); + auto response = static_cast(response_); auto metastore = node_->GetMetaStore(); std::shared_ptr connection; std::shared_ptr iterator; @@ -228,9 +225,8 @@ void GetOrModifyS3ChunkInfoOperator::OnApply(int64_t index, TimeUtility::GetTimeofDayUs() - startTimeUs, false); } - brpc::Controller* cntl = static_cast(cntl_); - if (rc != MetaStatusCode::OK || - !request->returns3chunkinfomap() || + brpc::Controller *cntl = static_cast(cntl_); + if (rc != MetaStatusCode::OK || !request->returns3chunkinfomap() || !request->supportstreaming()) { return; } @@ -251,12 +247,12 @@ void GetOrModifyS3ChunkInfoOperator::OnApply(int64_t index, } void GetVolumeExtentOperator::OnApply(int64_t index, - google::protobuf::Closure* done, + google::protobuf::Closure *done, uint64_t startTimeUs) { brpc::ClosureGuard doneGuard(done); - const auto* request = static_cast(request_); - auto* response = static_cast(response_); - auto* metaStore = node_->GetMetaStore(); + const auto *request = static_cast(request_); + auto *response = static_cast(response_); + auto *metaStore = node_->GetMetaStore(); auto st = metaStore->GetVolumeExtent(request, response); node_->GetMetric()->OnOperatorComplete( @@ -278,7 +274,7 @@ void GetVolumeExtentOperator::OnApply(int64_t index, response->clear_slices(); // accept client's streaming request - auto* cntl = static_cast(cntl_); + auto *cntl = static_cast(cntl_); auto streamingServer = metaStore->GetStreamServer(); auto connection = streamingServer->Accept(cntl); if (connection == nullptr) { @@ -298,15 +294,15 @@ void GetVolumeExtentOperator::OnApply(int64_t index, } } -#define OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ - void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ - std::unique_ptr selfGuard(this); \ - TYPE##Response response; \ - auto status = node_->GetMetaStore()->TYPE( \ - static_cast(request_), &response); \ - node_->GetMetric()->OnOperatorCompleteFromLog( \ - OperatorType::TYPE, TimeUtility::GetTimeofDayUs() - startTimeUs, \ - status == MetaStatusCode::OK); \ +#define OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ + void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ + std::unique_ptr selfGuard(this); \ + TYPE##Response response; \ + auto status = node_->GetMetaStore()->TYPE( \ + static_cast(request_), &response); \ + node_->GetMetric()->OnOperatorCompleteFromLog( \ + OperatorType::TYPE, TimeUtility::GetTimeofDayUs() - startTimeUs, \ + status == MetaStatusCode::OK); \ } OPERATOR_ON_APPLY_FROM_LOG(CreateDentry); @@ -328,7 +324,7 @@ void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(uint64_t startTimeUs) { GetOrModifyS3ChunkInfoRequest request; GetOrModifyS3ChunkInfoResponse response; std::shared_ptr iterator; - request = *static_cast(request_); + request = *static_cast(request_); request.set_returns3chunkinfomap(false); auto status = node_->GetMetaStore()->GetOrModifyS3ChunkInfo( &request, &response, &iterator); @@ -338,9 +334,10 @@ void GetOrModifyS3ChunkInfoOperator::OnApplyFromLog(uint64_t startTimeUs) { status == MetaStatusCode::OK); } -#define READONLY_OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ - void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ - std::unique_ptr selfGuard(this); \ +#define READONLY_OPERATOR_ON_APPLY_FROM_LOG(TYPE) \ + void TYPE##Operator::OnApplyFromLog(uint64_t startTimeUs) { \ + (void)startTimeUs; \ + std::unique_ptr selfGuard(this); \ } // below operator are readonly, so on apply from log do nothing @@ -353,10 +350,10 @@ READONLY_OPERATOR_ON_APPLY_FROM_LOG(GetVolumeExtent); #undef READONLY_OPERATOR_ON_APPLY_FROM_LOG -#define OPERATOR_REDIRECT(TYPE) \ - void TYPE##Operator::Redirect() { \ - static_cast(response_)->set_statuscode( \ - MetaStatusCode::REDIRECTED); \ +#define OPERATOR_REDIRECT(TYPE) \ + void TYPE##Operator::Redirect() { \ + static_cast(response_)->set_statuscode( \ + MetaStatusCode::REDIRECTED); \ } OPERATOR_REDIRECT(GetDentry); @@ -380,9 +377,9 @@ OPERATOR_REDIRECT(UpdateVolumeExtent); #undef OPERATOR_REDIRECT -#define OPERATOR_ON_FAILED(TYPE) \ - void TYPE##Operator::OnFailed(MetaStatusCode code) { \ - static_cast(response_)->set_statuscode(code); \ +#define OPERATOR_ON_FAILED(TYPE) \ + void TYPE##Operator::OnFailed(MetaStatusCode code) { \ + static_cast(response_)->set_statuscode(code); \ } OPERATOR_ON_FAILED(GetDentry); @@ -406,9 +403,9 @@ OPERATOR_ON_FAILED(UpdateVolumeExtent); #undef OPERATOR_ON_FAILED -#define OPERATOR_HASH_CODE(TYPE) \ - uint64_t TYPE##Operator::HashCode() const { \ - return static_cast(request_)->partitionid(); \ +#define OPERATOR_HASH_CODE(TYPE) \ + uint64_t TYPE##Operator::HashCode() const { \ + return static_cast(request_)->partitionid(); \ } OPERATOR_HASH_CODE(GetDentry); @@ -431,20 +428,20 @@ OPERATOR_HASH_CODE(UpdateVolumeExtent); #undef OPERATOR_HASH_CODE -#define PARTITION_OPERATOR_HASH_CODE(TYPE) \ - uint64_t TYPE##Operator::HashCode() const { \ - return static_cast(request_) \ - ->partition() \ - .partitionid(); \ +#define PARTITION_OPERATOR_HASH_CODE(TYPE) \ + uint64_t TYPE##Operator::HashCode() const { \ + return static_cast(request_) \ + ->partition() \ + .partitionid(); \ } PARTITION_OPERATOR_HASH_CODE(CreatePartition); #undef PARTITION_OPERATOR_HASH_CODE -#define OPERATOR_TYPE(TYPE) \ - OperatorType TYPE##Operator::GetOperatorType() const { \ - return OperatorType::TYPE; \ +#define OPERATOR_TYPE(TYPE) \ + OperatorType TYPE##Operator::GetOperatorType() const { \ + return OperatorType::TYPE; \ } OPERATOR_TYPE(GetDentry); diff --git a/curvefs/src/metaserver/dentry_storage.cpp b/curvefs/src/metaserver/dentry_storage.cpp index dae9d7fd7d..5f4cfb2255 100644 --- a/curvefs/src/metaserver/dentry_storage.cpp +++ b/curvefs/src/metaserver/dentry_storage.cpp @@ -77,7 +77,7 @@ void DentryVector::Insert(const Dentry& dentry) { } void DentryVector::Delete(const Dentry& dentry) { - for (size_t i = 0; i < vec_->dentrys_size(); i++) { + for (int i = 0; i < vec_->dentrys_size(); i++) { if (vec_->dentrys(i) == dentry) { vec_->mutable_dentrys()->DeleteSubrange(i, 1); nPendingDel_ += 1; diff --git a/curvefs/src/metaserver/metastore.cpp b/curvefs/src/metaserver/metastore.cpp index fdb8ff9131..381afd96eb 100644 --- a/curvefs/src/metaserver/metastore.cpp +++ b/curvefs/src/metaserver/metastore.cpp @@ -23,6 +23,7 @@ #include #include +#include #include #include // NOLINT @@ -63,12 +64,13 @@ using ::curvefs::metaserver::storage::RocksDBStorage; using ::curvefs::metaserver::storage::StorageOptions; namespace { -const char* const kMetaDataFilename = "metadata"; +const char *const kMetaDataFilename = "metadata"; bvar::LatencyRecorder g_storage_checkpoint_latency("storage_checkpoint"); } // namespace -std::unique_ptr MetaStoreImpl::Create( - copyset::CopysetNode* node, const StorageOptions& storageOptions) { +std::unique_ptr +MetaStoreImpl::Create(copyset::CopysetNode *node, + const StorageOptions &storageOptions) { auto store = absl::WrapUnique(new MetaStoreImpl(node, storageOptions)); auto succ = store->InitStorage(); if (succ) { @@ -79,13 +81,12 @@ std::unique_ptr MetaStoreImpl::Create( return nullptr; } -MetaStoreImpl::MetaStoreImpl(copyset::CopysetNode* node, - const StorageOptions& storageOptions) - : copysetNode_(node), - streamServer_(std::make_shared()), +MetaStoreImpl::MetaStoreImpl(copyset::CopysetNode *node, + const StorageOptions &storageOptions) + : copysetNode_(node), streamServer_(std::make_shared()), storageOptions_(storageOptions) {} -bool MetaStoreImpl::Load(const std::string& pathname) { +bool MetaStoreImpl::Load(const std::string &pathname) { // Load from raft snap file to memory WriteLockGuard writeLockGuard(rwLock_); MetaStoreFStream fstream(&partitionMap_, kvStorage_, @@ -113,12 +114,12 @@ bool MetaStoreImpl::Load(const std::string& pathname) { std::shared_ptr recycleCleaner = std::make_shared(GetPartition(partitionId)); RecycleManager::GetInstance().Add(partitionId, recycleCleaner, - copysetNode_); + copysetNode_); } } auto startCompacts = [this]() { - for (auto& part : partitionMap_) { + for (auto &part : partitionMap_) { part.second->StartS3Compact(); } }; @@ -139,9 +140,9 @@ bool MetaStoreImpl::Load(const std::string& pathname) { return true; } -void MetaStoreImpl::SaveBackground(const std::string& path, - DumpFileClosure* child, - OnSnapshotSaveDoneClosure* done) { +void MetaStoreImpl::SaveBackground(const std::string &path, + DumpFileClosure *child, + OnSnapshotSaveDoneClosure *done) { LOG(INFO) << "Save metadata to file background."; MetaStoreFStream fstream(&partitionMap_, kvStorage_, copysetNode_->GetPoolId(), @@ -157,8 +158,8 @@ void MetaStoreImpl::SaveBackground(const std::string& path, done->Run(); } -bool MetaStoreImpl::Save(const std::string& dir, - OnSnapshotSaveDoneClosure* done) { +bool MetaStoreImpl::Save(const std::string &dir, + OnSnapshotSaveDoneClosure *done) { brpc::ClosureGuard doneGuard(done); WriteLockGuard writeLockGuard(rwLock_); @@ -188,10 +189,10 @@ bool MetaStoreImpl::Save(const std::string& dir, // add files to snapshot writer // file is a relative path under the given directory - auto* writer = done->GetSnapshotWriter(); + auto *writer = done->GetSnapshotWriter(); writer->add_file(kMetaDataFilename); - for (const auto& f : files) { + for (const auto &f : files) { writer->add_file(f); } @@ -236,11 +237,12 @@ bool MetaStoreImpl::Destroy() { return true; } -MetaStatusCode MetaStoreImpl::CreatePartition( - const CreatePartitionRequest* request, CreatePartitionResponse* response) { +MetaStatusCode +MetaStoreImpl::CreatePartition(const CreatePartitionRequest *request, + CreatePartitionResponse *response) { WriteLockGuard writeLockGuard(rwLock_); MetaStatusCode status; - const auto& partition = request->partition(); + const auto &partition = request->partition(); auto it = partitionMap_.find(partition.partitionid()); if (it != partitionMap_.end()) { // keep idempotence @@ -255,8 +257,9 @@ MetaStatusCode MetaStoreImpl::CreatePartition( return MetaStatusCode::OK; } -MetaStatusCode MetaStoreImpl::DeletePartition( - const DeletePartitionRequest* request, DeletePartitionResponse* response) { +MetaStatusCode +MetaStoreImpl::DeletePartition(const DeletePartitionRequest *request, + DeletePartitionResponse *response) { WriteLockGuard writeLockGuard(rwLock_); uint32_t partitionId = request->partitionid(); auto it = partitionMap_.find(partitionId); @@ -301,13 +304,13 @@ MetaStatusCode MetaStoreImpl::DeletePartition( } bool MetaStoreImpl::GetPartitionInfoList( - std::list* partitionInfoList) { + std::list *partitionInfoList) { // when metastore is loading, it will hold the rwLock_ for a long time. // and heartbeat will stuck when try to GetPartitionInfoList if use // ReadLockGuard to get the rwLock_ int ret = rwLock_.TryRDLock(); if (ret == 0) { - for (const auto& it : partitionMap_) { + for (const auto &it : partitionMap_) { PartitionInfo partitionInfo = it.second->GetPartitionInfo(); partitionInfoList->push_back(std::move(partitionInfo)); } @@ -325,8 +328,8 @@ std::shared_ptr MetaStoreImpl::GetStreamServer() { } // dentry -MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest* request, - CreateDentryResponse* response) { +MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest *request, + CreateDentryResponse *response) { ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); if (partition == nullptr) { @@ -339,11 +342,11 @@ MetaStatusCode MetaStoreImpl::CreateDentry(const CreateDentryRequest* request, return status; } -MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest* request, - GetDentryResponse* response) { +MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest *request, + GetDentryResponse *response) { uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->parentinodeid(); - const auto& name = request->name(); + const auto &name = request->name(); auto txId = request->txid(); ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); @@ -368,8 +371,8 @@ MetaStatusCode MetaStoreImpl::GetDentry(const GetDentryRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest* request, - DeleteDentryResponse* response) { +MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest *request, + DeleteDentryResponse *response) { uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->parentinodeid(); std::string name = request->name(); @@ -395,8 +398,8 @@ MetaStatusCode MetaStoreImpl::DeleteDentry(const DeleteDentryRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest* request, - ListDentryResponse* response) { +MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest *request, + ListDentryResponse *response) { uint32_t fsId = request->fsid(); uint64_t parentInodeId = request->dirinodeid(); auto txId = request->txid(); @@ -432,8 +435,9 @@ MetaStatusCode MetaStoreImpl::ListDentry(const ListDentryRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::PrepareRenameTx( - const PrepareRenameTxRequest* request, PrepareRenameTxResponse* response) { +MetaStatusCode +MetaStoreImpl::PrepareRenameTx(const PrepareRenameTxRequest *request, + PrepareRenameTxResponse *response) { ReadLockGuard readLockGuard(rwLock_); MetaStatusCode rc; auto partitionId = request->partitionid(); @@ -451,8 +455,8 @@ MetaStatusCode MetaStoreImpl::PrepareRenameTx( } // inode -MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest* request, - CreateInodeResponse* response) { +MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest *request, + CreateInodeResponse *response) { InodeParam param; param.fsId = request->fsid(); param.length = request->length(); @@ -464,7 +468,8 @@ MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest* request, param.rdev = request->rdev(); if (request->has_create()) { param.timestamp = absl::make_optional( - {request->create().sec(), request->create().nsec()}); + timespec{static_cast(request->create().sec()), + request->create().nsec()}); } param.symlink = ""; @@ -497,8 +502,9 @@ MetaStatusCode MetaStoreImpl::CreateInode(const CreateInodeRequest* request, return status; } -MetaStatusCode MetaStoreImpl::CreateRootInode( - const CreateRootInodeRequest* request, CreateRootInodeResponse* response) { +MetaStatusCode +MetaStoreImpl::CreateRootInode(const CreateRootInodeRequest *request, + CreateRootInodeResponse *response) { InodeParam param; param.fsId = request->fsid(); param.uid = request->uid(); @@ -510,7 +516,8 @@ MetaStatusCode MetaStoreImpl::CreateRootInode( param.parent = 0; if (request->has_create()) { param.timestamp = absl::make_optional( - {request->create().sec(), request->create().nsec()}); + timespec{static_cast(request->create().sec()), + request->create().nsec()}); } ReadLockGuard readLockGuard(rwLock_); @@ -532,9 +539,9 @@ MetaStatusCode MetaStoreImpl::CreateRootInode( return status; } -MetaStatusCode MetaStoreImpl::CreateManageInode( - const CreateManageInodeRequest* request, - CreateManageInodeResponse* response) { +MetaStatusCode +MetaStoreImpl::CreateManageInode(const CreateManageInodeRequest *request, + CreateManageInodeResponse *response) { InodeParam param; param.fsId = request->fsid(); param.uid = request->uid(); @@ -560,9 +567,8 @@ MetaStatusCode MetaStoreImpl::CreateManageInode( return status; } - MetaStatusCode status = - partition->CreateManageInode(param, request->managetype(), - response->mutable_inode()); + MetaStatusCode status = partition->CreateManageInode( + param, request->managetype(), response->mutable_inode()); response->set_statuscode(status); if (status != MetaStatusCode::OK) { LOG(ERROR) << "CreateManageInode fail, fsId = " << param.fsId @@ -585,8 +591,8 @@ MetaStatusCode MetaStoreImpl::CreateManageInode( return MetaStatusCode::OK; } -MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest* request, - GetInodeResponse* response) { +MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest *request, + GetInodeResponse *response) { uint32_t fsId = request->fsid(); uint64_t inodeId = request->inodeid(); @@ -598,7 +604,7 @@ MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest* request, return status; } - Inode* inode = response->mutable_inode(); + Inode *inode = response->mutable_inode(); MetaStatusCode rc = partition->GetInode(fsId, inodeId, inode); // NOTE: the following two cases we should padding inode's s3chunkinfo: // (1): for RPC requests which unsupport streaming @@ -623,9 +629,9 @@ MetaStatusCode MetaStoreImpl::GetInode(const GetInodeRequest* request, return rc; } -MetaStatusCode MetaStoreImpl::BatchGetInodeAttr( - const BatchGetInodeAttrRequest* request, - BatchGetInodeAttrResponse* response) { +MetaStatusCode +MetaStoreImpl::BatchGetInodeAttr(const BatchGetInodeAttrRequest *request, + BatchGetInodeAttrResponse *response) { ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); if (partition == nullptr) { @@ -649,8 +655,8 @@ MetaStatusCode MetaStoreImpl::BatchGetInodeAttr( return status; } -MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest* request, - BatchGetXAttrResponse* response) { +MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest *request, + BatchGetXAttrResponse *response) { ReadLockGuard readLockGuard(rwLock_); std::shared_ptr partition = GetPartition(request->partitionid()); if (partition == nullptr) { @@ -674,8 +680,8 @@ MetaStatusCode MetaStoreImpl::BatchGetXAttr(const BatchGetXAttrRequest* request, return status; } -MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest* request, - DeleteInodeResponse* response) { +MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest *request, + DeleteInodeResponse *response) { uint32_t fsId = request->fsid(); uint64_t inodeId = request->inodeid(); @@ -692,8 +698,8 @@ MetaStatusCode MetaStoreImpl::DeleteInode(const DeleteInodeRequest* request, return status; } -MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest* request, - UpdateInodeResponse* response) { +MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest *request, + UpdateInodeResponse *response) { ReadLockGuard readLockGuard(rwLock_); VLOG(9) << "UpdateInode inode " << request->inodeid(); std::shared_ptr partition = GetPartition(request->partitionid()); @@ -709,9 +715,9 @@ MetaStatusCode MetaStoreImpl::UpdateInode(const UpdateInodeRequest* request, } MetaStatusCode MetaStoreImpl::GetOrModifyS3ChunkInfo( - const GetOrModifyS3ChunkInfoRequest* request, - GetOrModifyS3ChunkInfoResponse* response, - std::shared_ptr* iterator) { + const GetOrModifyS3ChunkInfoRequest *request, + GetOrModifyS3ChunkInfoResponse *response, + std::shared_ptr *iterator) { MetaStatusCode rc; ReadLockGuard readLockGuard(rwLock_); auto partition = GetPartition(request->partitionid()); @@ -736,9 +742,9 @@ MetaStatusCode MetaStoreImpl::GetOrModifyS3ChunkInfo( return rc; } -void MetaStoreImpl::PrepareStreamBuffer(butil::IOBuf* buffer, +void MetaStoreImpl::PrepareStreamBuffer(butil::IOBuf *buffer, uint64_t chunkIndex, - const std::string& value) { + const std::string &value) { buffer->clear(); buffer->append(std::to_string(chunkIndex)); buffer->append(":"); @@ -782,8 +788,9 @@ std::shared_ptr MetaStoreImpl::GetPartition(uint32_t partitionId) { return nullptr; } -MetaStatusCode MetaStoreImpl::GetVolumeExtent( - const GetVolumeExtentRequest* request, GetVolumeExtentResponse* response) { +MetaStatusCode +MetaStoreImpl::GetVolumeExtent(const GetVolumeExtentRequest *request, + GetVolumeExtentResponse *response) { ReadLockGuard guard(rwLock_); auto partition = GetPartition(request->partitionid()); if (!partition) { @@ -804,9 +811,9 @@ MetaStatusCode MetaStoreImpl::GetVolumeExtent( return st; } -MetaStatusCode MetaStoreImpl::UpdateVolumeExtent( - const UpdateVolumeExtentRequest* request, - UpdateVolumeExtentResponse* response) { +MetaStatusCode +MetaStoreImpl::UpdateVolumeExtent(const UpdateVolumeExtentRequest *request, + UpdateVolumeExtentResponse *response) { ReadLockGuard guard(rwLock_); auto partition = GetPartition(request->partitionid()); if (!partition) { diff --git a/curvefs/src/metaserver/metastore_fstream.cpp b/curvefs/src/metaserver/metastore_fstream.cpp index 7744eaae4c..edf0cb8c32 100644 --- a/curvefs/src/metaserver/metastore_fstream.cpp +++ b/curvefs/src/metaserver/metastore_fstream.cpp @@ -37,34 +37,32 @@ namespace curvefs { namespace metaserver { using ::curvefs::common::PartitionInfo; -using ::curvefs::metaserver::Inode; using ::curvefs::metaserver::Dentry; +using ::curvefs::metaserver::Inode; +using ::curvefs::metaserver::storage::ContainerIterator; using ::curvefs::metaserver::storage::ENTRY_TYPE; -using ::curvefs::metaserver::storage::SaveToFile; -using ::curvefs::metaserver::storage::LoadFromFile; using ::curvefs::metaserver::storage::IteratorWrapper; -using ::curvefs::metaserver::storage::ContainerIterator; +using ::curvefs::metaserver::storage::LoadFromFile; +using ::curvefs::metaserver::storage::SaveToFile; using ContainerType = std::unordered_map; using STORAGE_TYPE = ::curvefs::metaserver::storage::KVStorage::STORAGE_TYPE; -using ChildrenType = ::curvefs::metaserver::storage::MergeIterator::ChildrenType; // NOLINT +using ChildrenType = + ::curvefs::metaserver::storage::MergeIterator::ChildrenType; // NOLINT using DumpFileClosure = ::curvefs::metaserver::storage::DumpFileClosure; using Key4S3ChunkInfoList = ::curvefs::metaserver::storage::Key4S3ChunkInfoList; using ::curvefs::metaserver::storage::Key4VolumeExtentSlice; -MetaStoreFStream::MetaStoreFStream(PartitionMap* partitionMap, +MetaStoreFStream::MetaStoreFStream(PartitionMap *partitionMap, std::shared_ptr kvStorage, - PoolId poolId, - CopysetId copysetId) - : partitionMap_(partitionMap), - kvStorage_(std::move(kvStorage)), - conv_(std::make_shared()), - poolId_(poolId), + PoolId poolId, CopysetId copysetId) + : partitionMap_(partitionMap), kvStorage_(std::move(kvStorage)), + conv_(std::make_shared()), poolId_(poolId), copysetId_(copysetId) {} -std::shared_ptr MetaStoreFStream::GetPartition( - uint32_t partitionId) { +std::shared_ptr +MetaStoreFStream::GetPartition(uint32_t partitionId) { auto iter = partitionMap_->find(partitionId); if (iter != partitionMap_->end()) { return iter->second; @@ -73,8 +71,9 @@ std::shared_ptr MetaStoreFStream::GetPartition( } bool MetaStoreFStream::LoadPartition(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { + (void)key; PartitionInfo partitionInfo; if (!conv_->ParseFromString(value, &partitionInfo)) { LOG(ERROR) << "Decode PartitionInfo failed"; @@ -97,9 +96,9 @@ bool MetaStoreFStream::LoadPartition(uint32_t partitionId, return true; } -bool MetaStoreFStream::LoadInode(uint32_t partitionId, - const std::string& key, - const std::string& value) { +bool MetaStoreFStream::LoadInode(uint32_t partitionId, const std::string &key, + const std::string &value) { + (void)key; auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -121,10 +120,10 @@ bool MetaStoreFStream::LoadInode(uint32_t partitionId, return true; } -bool MetaStoreFStream::LoadDentry(uint8_t version, - uint32_t partitionId, - const std::string& key, - const std::string& value) { +bool MetaStoreFStream::LoadDentry(uint8_t version, uint32_t partitionId, + const std::string &key, + const std::string &value) { + (void)key; auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -154,8 +153,9 @@ bool MetaStoreFStream::LoadDentry(uint8_t version, } bool MetaStoreFStream::LoadPendingTx(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { + (void)key; auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -176,8 +176,8 @@ bool MetaStoreFStream::LoadPendingTx(uint32_t partitionId, } bool MetaStoreFStream::LoadInodeS3ChunkInfoList(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { auto partition = GetPartition(partitionId); if (nullptr == partition) { LOG(ERROR) << "Partition not found, partitionId = " << partitionId; @@ -209,8 +209,8 @@ bool MetaStoreFStream::LoadInodeS3ChunkInfoList(uint32_t partitionId, } bool MetaStoreFStream::LoadVolumeExtentList(uint32_t partitionId, - const std::string& key, - const std::string& value) { + const std::string &key, + const std::string &value) { auto partition = GetPartition(partitionId); if (!partition) { LOG(ERROR) << "Partition not found, partitionId: " << partitionId; @@ -244,7 +244,7 @@ bool MetaStoreFStream::LoadVolumeExtentList(uint32_t partitionId, std::shared_ptr MetaStoreFStream::NewPartitionIterator() { std::string value; auto container = std::make_shared(); - for (const auto& item : *partitionMap_) { + for (const auto &item : *partitionMap_) { auto partitionId = item.first; auto partition = item.second; auto partitionInfo = partition->GetPartitionInfo(); @@ -256,36 +256,36 @@ std::shared_ptr MetaStoreFStream::NewPartitionIterator() { container->emplace(std::to_string(partitionId), value); } - auto iterator = std::make_shared>( - container); - return std::make_shared( - ENTRY_TYPE::PARTITION, 0, iterator); + auto iterator = + std::make_shared>(container); + return std::make_shared(ENTRY_TYPE::PARTITION, 0, + iterator); } -std::shared_ptr MetaStoreFStream::NewInodeIterator( - std::shared_ptr partition) { +std::shared_ptr +MetaStoreFStream::NewInodeIterator(std::shared_ptr partition) { auto partitionId = partition->GetPartitionId(); auto iterator = partition->GetAllInode(); if (iterator->Status() != 0) { return nullptr; } - return std::make_shared( - ENTRY_TYPE::INODE, partitionId, iterator); + return std::make_shared(ENTRY_TYPE::INODE, partitionId, + iterator); } -std::shared_ptr MetaStoreFStream::NewDentryIterator( - std::shared_ptr partition) { +std::shared_ptr +MetaStoreFStream::NewDentryIterator(std::shared_ptr partition) { auto partitionId = partition->GetPartitionId(); auto iterator = partition->GetAllDentry(); if (iterator->Status() != 0) { return nullptr; } - return std::make_shared( - ENTRY_TYPE::DENTRY, partitionId, iterator); + return std::make_shared(ENTRY_TYPE::DENTRY, partitionId, + iterator); } -std::shared_ptr MetaStoreFStream::NewPendingTxIterator( - std::shared_ptr partition) { +std::shared_ptr +MetaStoreFStream::NewPendingTxIterator(std::shared_ptr partition) { std::string value; PrepareRenameTxRequest pendingTx; auto container = std::make_shared(); @@ -297,10 +297,10 @@ std::shared_ptr MetaStoreFStream::NewPendingTxIterator( } auto partitionId = partition->GetPartitionId(); - auto iterator = std::make_shared>( - container); - return std::make_shared( - ENTRY_TYPE::PENDING_TX, partitionId, iterator); + auto iterator = + std::make_shared>(container); + return std::make_shared(ENTRY_TYPE::PENDING_TX, + partitionId, iterator); } std::shared_ptr MetaStoreFStream::NewInodeS3ChunkInfoListIterator( @@ -310,12 +310,12 @@ std::shared_ptr MetaStoreFStream::NewInodeS3ChunkInfoListIterator( if (iterator->Status() != 0) { return nullptr; } - return std::make_shared( - ENTRY_TYPE::S3_CHUNK_INFO_LIST, partitionId, iterator); + return std::make_shared(ENTRY_TYPE::S3_CHUNK_INFO_LIST, + partitionId, iterator); } -std::shared_ptr MetaStoreFStream::NewVolumeExtentListIterator( - Partition* partition) { +std::shared_ptr +MetaStoreFStream::NewVolumeExtentListIterator(Partition *partition) { auto partitionId = partition->GetPartitionId(); auto iterator = partition->GetAllVolumeExtentList(); if (iterator->Status() != 0) { @@ -326,7 +326,7 @@ std::shared_ptr MetaStoreFStream::NewVolumeExtentListIterator( partitionId, std::move(iterator)); } -bool MetaStoreFStream::Load(const std::string& pathname, uint8_t* version) { +bool MetaStoreFStream::Load(const std::string &pathname, uint8_t *version) { uint64_t totalPartition = 0; uint64_t totalInode = 0; uint64_t totalDentry = 0; @@ -334,32 +334,30 @@ bool MetaStoreFStream::Load(const std::string& pathname, uint8_t* version) { uint64_t totalVolumeExtent = 0; uint64_t totalPendingTx = 0; - auto callback = [&](uint8_t version, - ENTRY_TYPE entryType, - uint32_t partitionId, - const std::string& key, - const std::string& value) -> bool { + auto callback = [&](uint8_t version, ENTRY_TYPE entryType, + uint32_t partitionId, const std::string &key, + const std::string &value) -> bool { switch (entryType) { - case ENTRY_TYPE::PARTITION: - ++totalPartition; - return LoadPartition(partitionId, key, value); - case ENTRY_TYPE::INODE: - ++totalInode; - return LoadInode(partitionId, key, value); - case ENTRY_TYPE::DENTRY: - ++totalDentry; - return LoadDentry(version, partitionId, key, value); - case ENTRY_TYPE::PENDING_TX: - ++totalPendingTx; - return LoadPendingTx(partitionId, key, value); - case ENTRY_TYPE::S3_CHUNK_INFO_LIST: - ++totalS3ChunkInfoList; - return LoadInodeS3ChunkInfoList(partitionId, key, value); - case ENTRY_TYPE::VOLUME_EXTENT: - ++totalVolumeExtent; - return LoadVolumeExtentList(partitionId, key, value); - case ENTRY_TYPE::UNKNOWN: - break; + case ENTRY_TYPE::PARTITION: + ++totalPartition; + return LoadPartition(partitionId, key, value); + case ENTRY_TYPE::INODE: + ++totalInode; + return LoadInode(partitionId, key, value); + case ENTRY_TYPE::DENTRY: + ++totalDentry; + return LoadDentry(version, partitionId, key, value); + case ENTRY_TYPE::PENDING_TX: + ++totalPendingTx; + return LoadPendingTx(partitionId, key, value); + case ENTRY_TYPE::S3_CHUNK_INFO_LIST: + ++totalS3ChunkInfoList; + return LoadInodeS3ChunkInfoList(partitionId, key, value); + case ENTRY_TYPE::VOLUME_EXTENT: + ++totalVolumeExtent; + return LoadVolumeExtentList(partitionId, key, value); + case ENTRY_TYPE::UNKNOWN: + break; } LOG(ERROR) << "Load failed, unknown entry type"; @@ -388,16 +386,15 @@ bool MetaStoreFStream::Load(const std::string& pathname, uint8_t* version) { return ret; } -bool MetaStoreFStream::Save(const std::string& path, - DumpFileClosure* done) { +bool MetaStoreFStream::Save(const std::string &path, DumpFileClosure *done) { ChildrenType children; children.push_back(NewPartitionIterator()); - for (const auto& item : *partitionMap_) { + for (const auto &item : *partitionMap_) { children.push_back(NewPendingTxIterator(item.second)); } - for (const auto& child : children) { + for (const auto &child : children) { if (nullptr == child) { if (done != nullptr) { done->Runned(); diff --git a/curvefs/src/metaserver/recycle_cleaner.cpp b/curvefs/src/metaserver/recycle_cleaner.cpp index 95341206f5..bc087a704a 100644 --- a/curvefs/src/metaserver/recycle_cleaner.cpp +++ b/curvefs/src/metaserver/recycle_cleaner.cpp @@ -63,7 +63,7 @@ bool RecycleCleaner::IsDirTimeOut(const std::string& dir) { struct tm tmDir; memset(&tmDir, 0, sizeof(tmDir)); - char* c = strptime(dir.c_str(), "%Y-%m-%d-%H", &tmDir); + (void)strptime(dir.c_str(), "%Y-%m-%d-%H", &tmDir); time_t dirTime = mktime(&tmDir); if (dirTime <= 0) { diff --git a/curvefs/src/metaserver/s3compact_manager.cpp b/curvefs/src/metaserver/s3compact_manager.cpp index c41a95d659..4c5bea7648 100644 --- a/curvefs/src/metaserver/s3compact_manager.cpp +++ b/curvefs/src/metaserver/s3compact_manager.cpp @@ -22,6 +22,7 @@ #include "curvefs/src/metaserver/s3compact_manager.h" +#include #include #include @@ -45,7 +46,7 @@ void S3AdapterManager::Init() { std::lock_guard lock(mtx_); if (inited_) return; used_.resize(size_); - for (int i = 0; i < size_; i++) { + for (uint64_t i = 0; i < size_; i++) { s3adapters_.emplace_back(new S3Adapter()); } for (auto& s3adapter : s3adapters_) { diff --git a/curvefs/src/metaserver/storage/dumpfile.cpp b/curvefs/src/metaserver/storage/dumpfile.cpp index 3465c1919e..dd2d03c4f0 100644 --- a/curvefs/src/metaserver/storage/dumpfile.cpp +++ b/curvefs/src/metaserver/storage/dumpfile.cpp @@ -178,7 +178,7 @@ DUMPFILE_ERROR DumpFile::Write(const char* buffer, if (ret < 0) { LOG(ERROR) << "Write file failed, retCode = " << ret; return DUMPFILE_ERROR::WRITE_FAILED; - } else if (ret != length) { + } else if (ret != static_cast(length)) { LOG(ERROR) << "Write file failed, expect write " << length << " bytes, actual write " << ret << " bytes"; return DUMPFILE_ERROR::WRITE_FAILED; @@ -192,7 +192,7 @@ DUMPFILE_ERROR DumpFile::Read(char* buffer, off_t offset, size_t length) { if (ret < 0) { LOG(ERROR) << "Read file failed, retCode = " << ret; return DUMPFILE_ERROR::READ_FAILED; - } else if (ret != length) { + } else if (ret != static_cast(length)) { LOG(ERROR) << "Read file failed, expect read " << length << " bytes, actual read " << ret << " bytes"; return DUMPFILE_ERROR::READ_FAILED; @@ -375,6 +375,7 @@ DUMPFILE_ERROR DumpFile::WaitSaveDone(pid_t childpid) { } void DumpFile::SignalHandler(int signo, siginfo_t* siginfo, void* ucontext) { + (void)ucontext; auto pid = (siginfo && siginfo->si_pid) ? siginfo->si_pid : -1; LOG(INFO) << "Signal " << signo << " received from " << pid; _exit(2); @@ -398,7 +399,6 @@ DUMPFILE_ERROR DumpFile::InitSignals() { DUMPFILE_ERROR DumpFile::CloseSockets() { std::vector names; - pid_t pid = getpid(); if (fs_->List("/proc/self/fd", &names) != 0) { return DUMPFILE_ERROR::LIST_FAILED; } diff --git a/curvefs/src/metaserver/storage/iterator.h b/curvefs/src/metaserver/storage/iterator.h index 0c591cf2e6..cd6e7393fe 100644 --- a/curvefs/src/metaserver/storage/iterator.h +++ b/curvefs/src/metaserver/storage/iterator.h @@ -50,11 +50,11 @@ class Iterator { virtual std::string Value() = 0; - virtual const ValueType* RawValue() const { return nullptr; } + virtual const ValueType *RawValue() const { return nullptr; } virtual int Status() = 0; - virtual bool ParseFromValue(ValueType* value) { return true; } + virtual bool ParseFromValue(ValueType * /*value*/) { return true; } virtual void DisablePrefixChecking() {} }; @@ -64,24 +64,21 @@ class MergeIterator : public Iterator { using ChildrenType = std::vector>; public: - explicit MergeIterator(const ChildrenType& children) - : current_(nullptr), children_(children) { - } + explicit MergeIterator(const ChildrenType &children) + : children_(children), current_(nullptr) {} uint64_t Size() override { uint64_t size = 0; - for (const auto& child : children_) { + for (const auto &child : children_) { size += child->Size(); } return size; } - bool Valid() override { - return (current_ != nullptr) && (Status() == 0); - } + bool Valid() override { return (current_ != nullptr) && (Status() == 0); } void SeekToFirst() override { - for (const auto& child : children_) { + for (const auto &child : children_) { child->SeekToFirst(); } FindCurrent(); @@ -92,16 +89,12 @@ class MergeIterator : public Iterator { FindCurrent(); } - std::string Key() override { - return current_->Key(); - } + std::string Key() override { return current_->Key(); } - std::string Value() override { - return current_->Value(); - } + std::string Value() override { return current_->Value(); } int Status() override { - for (const auto& child : children_) { + for (const auto &child : children_) { if (child->Status() != 0) { return child->Status(); } @@ -112,7 +105,7 @@ class MergeIterator : public Iterator { private: void FindCurrent() { current_ = nullptr; - for (const auto& child : children_) { + for (const auto &child : children_) { if (child->Valid()) { current_ = child; break; @@ -125,39 +118,24 @@ class MergeIterator : public Iterator { std::shared_ptr current_; }; -template -class ContainerIterator : public Iterator { +template class ContainerIterator : public Iterator { public: explicit ContainerIterator(std::shared_ptr container) : container_(container) {} - uint64_t Size() override { - return container_->size(); - } + uint64_t Size() override { return container_->size(); } - bool Valid() override { - return iter_ != container_->end(); - } + bool Valid() override { return iter_ != container_->end(); } - void SeekToFirst() override { - iter_ = container_->begin(); - } + void SeekToFirst() override { iter_ = container_->begin(); } - void Next() override { - iter_++; - } + void Next() override { iter_++; } - std::string Key() override { - return iter_->first; - } + std::string Key() override { return iter_->first; } - std::string Value() override { - return iter_->second; - } + std::string Value() override { return iter_->second; } - int Status() override { - return 0; - } + int Status() override { return 0; } protected: const std::shared_ptr container_; diff --git a/curvefs/src/metaserver/storage/memory_storage.cpp b/curvefs/src/metaserver/storage/memory_storage.cpp index 5979d8f977..feef149e1f 100644 --- a/curvefs/src/metaserver/storage/memory_storage.cpp +++ b/curvefs/src/metaserver/storage/memory_storage.cpp @@ -301,11 +301,14 @@ StorageOptions MemoryStorage::GetStorageOptions() const { bool MemoryStorage::Checkpoint(const std::string& dir, std::vector* files) { + (void)dir; + (void)files; LOG(WARNING) << "Not supported"; return false; } bool MemoryStorage::Recover(const std::string& dir) { + (void)dir; LOG(WARNING) << "Not supported"; return false; } diff --git a/curvefs/src/metaserver/storage/memory_storage.h b/curvefs/src/metaserver/storage/memory_storage.h index f5c18a6474..127d51da80 100644 --- a/curvefs/src/metaserver/storage/memory_storage.h +++ b/curvefs/src/metaserver/storage/memory_storage.h @@ -149,10 +149,10 @@ class MemoryStorageIterator : public Iterator { public: MemoryStorageIterator(std::shared_ptr container, const std::string& prefix) - : container_(container), - prefix_(prefix), + : prefix_(prefix), + status_(0), prefixChecking_(true), - status_(0) {} + container_(container) {} // NOTE: now we can't caclute the size for range operate uint64_t Size() override { diff --git a/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp b/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp index 61d5b80442..dbbae10828 100644 --- a/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp +++ b/curvefs/src/metaserver/storage/rocksdb_event_listener.cpp @@ -44,6 +44,7 @@ MetricEventListener::MetricEventListener() void MetricEventListener::OnFlushBegin(rocksdb::DB* db, const rocksdb::FlushJobInfo& /*info*/) { + (void)db; flushing_ << 1; rocksdbFlushStart = butil::cpuwide_time_us(); } @@ -51,6 +52,7 @@ void MetricEventListener::OnFlushBegin(rocksdb::DB* db, void MetricEventListener::OnFlushCompleted( rocksdb::DB* db, const rocksdb::FlushJobInfo& info) { + (void)db; flushing_ << -1; flushLatency_ << (butil::cpuwide_time_us() - rocksdbFlushStart); flushedBytes_ << info.table_properties.data_size; @@ -64,6 +66,7 @@ void MetricEventListener::OnMemTableSealed( void MetricEventListener::OnCompactionBegin( rocksdb::DB* db, const rocksdb::CompactionJobInfo& /*info*/) { + (void)db; compacting_ << 1; rocksdbCompactionStart = butil::cpuwide_time_us(); } @@ -71,6 +74,7 @@ void MetricEventListener::OnCompactionBegin( void MetricEventListener::OnCompactionCompleted( rocksdb::DB* db, const rocksdb::CompactionJobInfo& /*info*/) { + (void)db; compacting_ << -1; compactionLatency_ << (butil::cpuwide_time_us() - rocksdbCompactionStart); } diff --git a/curvefs/src/metaserver/storage/rocksdb_storage.h b/curvefs/src/metaserver/storage/rocksdb_storage.h index 5ab36cc5a1..e0023dd8e2 100644 --- a/curvefs/src/metaserver/storage/rocksdb_storage.h +++ b/curvefs/src/metaserver/storage/rocksdb_storage.h @@ -374,13 +374,13 @@ class RocksDBStorageIterator : public Iterator { } private: + RocksDBStorage* storage_; std::string prefix_; uint64_t size_; int status_; - bool ordered_; bool prefixChecking_; + bool ordered_; std::unique_ptr iter_; - RocksDBStorage* storage_; rocksdb::ReadOptions readOptions_; }; diff --git a/curvefs/src/metaserver/storage/storage_fstream.h b/curvefs/src/metaserver/storage/storage_fstream.h index 1b2d3551b5..c4f24aa28d 100644 --- a/curvefs/src/metaserver/storage/storage_fstream.h +++ b/curvefs/src/metaserver/storage/storage_fstream.h @@ -37,8 +37,8 @@ namespace curvefs { namespace metaserver { namespace storage { -using ::curve::common::StringToUl; using ::curve::common::SplitString; +using ::curve::common::StringToUl; using ::curvefs::common::PartitionInfo; enum class ENTRY_TYPE { @@ -63,7 +63,7 @@ static const std::vector pairs{ }; static std::string Type2Str(ENTRY_TYPE t) { - for (const auto& pair : pairs) { + for (const auto &pair : pairs) { if (pair.first == t) { return pair.second; } @@ -71,8 +71,8 @@ static std::string Type2Str(ENTRY_TYPE t) { return ""; } -static ENTRY_TYPE Str2Type(const std::string& s) { - for (const auto& pair : pairs) { +static ENTRY_TYPE Str2Type(const std::string &s) { + for (const auto &pair : pairs) { if (pair.second == s) { return pair.first; } @@ -80,13 +80,12 @@ static ENTRY_TYPE Str2Type(const std::string& s) { return ENTRY_TYPE::UNKNOWN; } -static std::string InternalKey(ENTRY_TYPE t, - uint32_t partitionId, - const std::string& ukey) { +static std::string InternalKey(ENTRY_TYPE t, uint32_t partitionId, + const std::string &ukey) { return absl::StrCat(Type2Str(t), partitionId, ":", ukey); } -static std::pair UserKey(const std::string& ikey) { +static std::pair UserKey(const std::string &ikey) { std::string prefix, ukey; std::vector items; SplitString(ikey, ":", &items); @@ -99,14 +98,14 @@ static std::pair UserKey(const std::string& ikey) { return std::make_pair(prefix, ukey); } -static std::pair Extract(const std::string& prefix) { +static std::pair Extract(const std::string &prefix) { if (prefix.size() == 0) { return std::make_pair(ENTRY_TYPE::UNKNOWN, 0); } std::vector items{ prefix.substr(0, 1), // eg: i - prefix.substr(1), // eg: 100 + prefix.substr(1), // eg: 100 }; ENTRY_TYPE entryType = Str2Type(items[0]); @@ -117,10 +116,9 @@ static std::pair Extract(const std::string& prefix) { return std::make_pair(entryType, partitionId); } -inline bool SaveToFile(const std::string& pathname, - std::shared_ptr iterator, - bool background, - DumpFileClosure* done = nullptr) { +inline bool SaveToFile(const std::string &pathname, + std::shared_ptr iterator, bool background, + DumpFileClosure *done = nullptr) { auto dumpfile = DumpFile(pathname); if (dumpfile.Open() != DUMPFILE_ERROR::OK) { LOG(ERROR) << "Open dumpfile failed"; @@ -144,15 +142,12 @@ inline bool SaveToFile(const std::string& pathname, return (rc == DUMPFILE_ERROR::OK) && (iterator->Status() == 0); } -template -inline bool InvokeCallback(uint8_t version, - ENTRY_TYPE entryType, - uint32_t partitionId, - const std::string& key, - const std::string& value, - Callback&& callback) { - bool succ = std::forward(callback)( - version, entryType, partitionId, key, value); +template +inline bool InvokeCallback(uint8_t version, ENTRY_TYPE entryType, + uint32_t partitionId, const std::string &key, + const std::string &value, Callback &&callback) { + bool succ = std::forward(callback)(version, entryType, + partitionId, key, value); if (!succ) { LOG(ERROR) << "Invoke callback for entry failed."; return false; @@ -160,16 +155,16 @@ inline bool InvokeCallback(uint8_t version, return true; } -#define CASE_TYPE_CALLBACK(TYPE) \ - case ENTRY_TYPE::TYPE: \ - if (!InvokeCallback(version, entryType, partitionId, \ - key, value, callback)) { \ - return false; \ - } \ +#define CASE_TYPE_CALLBACK(TYPE) \ + case ENTRY_TYPE::TYPE: \ + if (!InvokeCallback(version, entryType, partitionId, key, value, \ + callback)) { \ + return false; \ + } \ break template -inline bool LoadFromFile(const std::string& pathname, uint8_t* version, +inline bool LoadFromFile(const std::string &pathname, uint8_t *version, Callback callback) { auto dumpfile = DumpFile(pathname); if (dumpfile.Open() != DUMPFILE_ERROR::OK) { @@ -179,8 +174,8 @@ inline bool LoadFromFile(const std::string& pathname, uint8_t* version, auto iter = dumpfile.Load(); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - auto ikey = iter->Key(); // internal key - auto ukey = UserKey(ikey); // + auto ikey = iter->Key(); // internal key + auto ukey = UserKey(ikey); // auto pair = Extract(ukey.first); // prefix ENTRY_TYPE entryType = pair.first; @@ -195,9 +190,9 @@ inline bool LoadFromFile(const std::string& pathname, uint8_t* version, CASE_TYPE_CALLBACK(PENDING_TX); CASE_TYPE_CALLBACK(S3_CHUNK_INFO_LIST); CASE_TYPE_CALLBACK(VOLUME_EXTENT); - default: - LOG(ERROR) << "Unknown entry type, key = " << key; - return false; + default: + LOG(ERROR) << "Unknown entry type, key = " << key; + return false; } } @@ -209,45 +204,29 @@ inline bool LoadFromFile(const std::string& pathname, uint8_t* version, // contain entry type and partition id. class IteratorWrapper : public Iterator { public: - IteratorWrapper(ENTRY_TYPE entryType, - uint32_t partitionId, + IteratorWrapper(ENTRY_TYPE entryType, uint32_t partitionId, std::shared_ptr iterator) - : entryType_(entryType), - partitionId_(partitionId), + : entryType_(entryType), partitionId_(partitionId), iterator_(std::move(iterator)) {} - uint64_t Size() override { - return iterator_->Size(); - } + uint64_t Size() override { return iterator_->Size(); } - bool Valid() override { - return iterator_->Valid(); - } + bool Valid() override { return iterator_->Valid(); } - void SeekToFirst() override { - iterator_->SeekToFirst(); - } + void SeekToFirst() override { iterator_->SeekToFirst(); } - void Next() override { - iterator_->Next(); - } + void Next() override { iterator_->Next(); } std::string Key() override { auto key = iterator_->Key(); return InternalKey(entryType_, partitionId_, key); } - std::string Value() override { - return iterator_->Value(); - } + std::string Value() override { return iterator_->Value(); } - bool ParseFromValue(ValueType* value) override { - return true; - } + bool ParseFromValue(ValueType * /*value*/) override { return true; } - int Status() override { - return iterator_->Status(); - } + int Status() override { return iterator_->Status(); } protected: ENTRY_TYPE entryType_; diff --git a/curvefs/src/metaserver/transaction.cpp b/curvefs/src/metaserver/transaction.cpp index 22c637a91f..fb3659f96d 100644 --- a/curvefs/src/metaserver/transaction.cpp +++ b/curvefs/src/metaserver/transaction.cpp @@ -81,7 +81,7 @@ inline bool RenameTx::operator==(const RenameTx& rhs) { std::ostream& operator<<(std::ostream& os, const RenameTx& renameTx) { auto dentrys = renameTx.dentrys_; os << "txId = " << renameTx.txId_; - for (int i = 0; i < dentrys.size(); i++) { + for (size_t i = 0; i < dentrys.size(); i++) { os << ", dentry[" << i << "] = (" << dentrys[i].ShortDebugString() << ")"; } diff --git a/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp b/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp index d8e04c5f63..3ddc1d5a62 100644 --- a/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp +++ b/curvefs/src/tools/list/curvefs_copysetinfo_list.cpp @@ -49,11 +49,10 @@ int CopysetInfoListTool::Init() { return 0; } -void CopysetInfoListTool::AddUpdateFlags() { - AddUpdateFlagsFunc(SetMdsAddr); -} +void CopysetInfoListTool::AddUpdateFlags() { AddUpdateFlagsFunc(SetMdsAddr); } -bool CopysetInfoListTool::AfterSendRequestToHost(const std::string& host) { +bool CopysetInfoListTool::AfterSendRequestToHost(const std::string &host) { + (void)host; bool ret = true; if (controller_->Failed()) { errorOutput_ << "get all copysetInfo from [ " << FLAGS_mdsAddr @@ -62,11 +61,12 @@ bool CopysetInfoListTool::AfterSendRequestToHost(const std::string& host) { << std::endl; ret = false; } else if (show_) { - for (auto const& i : response_->copysetvalues()) { + for (auto const &i : response_->copysetvalues()) { std::cout << "copyset[" << copyset::GetCopysetKey(i.copysetinfo().poolid(), i.copysetinfo().copysetid()) - << "]:" << std::endl << i.DebugString() << std::endl; + << "]:" << std::endl + << i.DebugString() << std::endl; } std::cout << std::endl; } diff --git a/curvefs/src/tools/query/curvefs_copyset_query.cpp b/curvefs/src/tools/query/curvefs_copyset_query.cpp index 2c2947e298..ab7c308643 100644 --- a/curvefs/src/tools/query/curvefs_copyset_query.cpp +++ b/curvefs/src/tools/query/curvefs_copyset_query.cpp @@ -126,7 +126,7 @@ bool CopysetQueryTool::AfterSendRequestToHost(const std::string& host) { for (auto const& j : key2Status_[i]) { std::cout << j.ShortDebugString() << std::endl; } - if (key2Status_[i].size() != + if (static_cast(key2Status_[i].size()) != key2Info_[i][0].copysetinfo().peers().size()) { std::cerr << "copysetStatus not match the number of " "copysetInfo's peers!" diff --git a/curvefs/src/volume/block_device_aio.cpp b/curvefs/src/volume/block_device_aio.cpp index 922d0424af..8ad673beaf 100644 --- a/curvefs/src/volume/block_device_aio.cpp +++ b/curvefs/src/volume/block_device_aio.cpp @@ -41,29 +41,29 @@ using ::curve::common::is_aligned; namespace { -const char* ToString(LIBCURVE_OP op) { +const char *ToString(LIBCURVE_OP op) { switch (op) { - case LIBCURVE_OP_READ: - return "Read"; - case LIBCURVE_OP_WRITE: - return "Write"; - case LIBCURVE_OP_DISCARD: - return "Discard"; - default: - return "Unknown"; + case LIBCURVE_OP_READ: + return "Read"; + case LIBCURVE_OP_WRITE: + return "Write"; + case LIBCURVE_OP_DISCARD: + return "Discard"; + default: + return "Unknown"; } } -std::ostream& operator<<(std::ostream& os, CurveAioContext* aio) { +std::ostream &operator<<(std::ostream &os, CurveAioContext *aio) { os << "[off: " << aio->offset << ", len: " << aio->length << ", ret: " << aio->ret << ", type: " << ToString(aio->op) << "]"; return os; } -void AioReadCallBack(CurveAioContext* aio) { - AioRead* read = reinterpret_cast(reinterpret_cast(aio) - - offsetof(AioRead, aio)); +void AioReadCallBack(CurveAioContext *aio) { + AioRead *read = reinterpret_cast(reinterpret_cast(aio) - + offsetof(AioRead, aio)); { std::lock_guard lock(read->mtx); @@ -72,9 +72,9 @@ void AioReadCallBack(CurveAioContext* aio) { read->cond.notify_one(); } -void AioWriteCallBack(CurveAioContext* aio) { - AioWrite* write = reinterpret_cast(reinterpret_cast(aio) - - offsetof(AioWrite, aio)); +void AioWriteCallBack(CurveAioContext *aio) { + AioWrite *write = reinterpret_cast( + reinterpret_cast(aio) - offsetof(AioWrite, aio)); { std::lock_guard lock(write->mtx); @@ -83,19 +83,16 @@ void AioWriteCallBack(CurveAioContext* aio) { write->cond.notify_one(); } -void AioWritePaddingReadCallBack(CurveAioContext* aio) { - AioWrite::PaddingRead* padding = reinterpret_cast( - reinterpret_cast(aio) - offsetof(AioWrite::PaddingRead, aio)); +void AioWritePaddingReadCallBack(CurveAioContext *aio) { + AioWrite::PaddingRead *padding = reinterpret_cast( + reinterpret_cast(aio) - offsetof(AioWrite::PaddingRead, aio)); padding->base->OnPaddingReadComplete(aio); } } // namespace -AioRead::AioRead(off_t offset, - size_t length, - char* data, - FileClient* dev, +AioRead::AioRead(off_t offset, size_t length, char *data, FileClient *dev, int fd) : aio(), offset(offset), length(length), data(data), dev(dev), fd(fd) {} @@ -158,11 +155,8 @@ ssize_t AioRead::Wait() { return length; } -AioWrite::AioWrite(off_t offset, - size_t length, - const char* data, - FileClient* dev, - int fd) +AioWrite::AioWrite(off_t offset, size_t length, const char *data, + FileClient *dev, int fd) : offset(offset), length(length), data(data), dev(dev), fd(fd) {} void AioWrite::Issue() { @@ -173,7 +167,7 @@ void AioWrite::Issue() { aio.cb = AioWriteCallBack; aio.offset = offset; aio.length = length; - aio.buf = const_cast(data); + aio.buf = const_cast(data); int ret = dev->AioWrite(fd, &aio); if (ret < 0) { @@ -207,7 +201,8 @@ void AioWrite::Issue() { ++idx; } - if (offset + length > lastPaddingEnd && offset + length != alignedEnd) { + if (static_cast(offset + length) > lastPaddingEnd && + offset + length != alignedEnd) { off_t start = alignedEnd - IO_ALIGNED_BLOCK_SIZE; if (paddingStart && start == lastPaddingEnd) { aux->paddingReads[idx - 1].length += IO_ALIGNED_BLOCK_SIZE; @@ -226,7 +221,7 @@ void AioWrite::Issue() { aux->npadding.store(idx, std::memory_order_release); for (int i = 0; i < idx; ++i) { - auto& pad = aux->paddingReads[i]; + auto &pad = aux->paddingReads[i]; pad.aio.ret = -1; pad.aio.op = LIBCURVE_OP_READ; @@ -255,7 +250,7 @@ ssize_t AioWrite::Wait() { return length; } -void AioWrite::OnPaddingReadComplete(CurveAioContext* read) { +void AioWrite::OnPaddingReadComplete(CurveAioContext *read) { if (static_cast(read->ret) != static_cast(read->length)) { LOG(ERROR) << "AioRead error: " << read; aux->error.store(true, std::memory_order_release); diff --git a/curvefs/src/volume/block_device_client.cpp b/curvefs/src/volume/block_device_client.cpp index 001bf48aab..206282e3f4 100644 --- a/curvefs/src/volume/block_device_client.cpp +++ b/curvefs/src/volume/block_device_client.cpp @@ -22,6 +22,7 @@ #include "curvefs/src/volume/block_device_client.h" +#include #include #include @@ -52,10 +53,10 @@ BlockDeviceClientImpl::BlockDeviceClientImpl() : fd_(-1), fileClient_(std::make_shared()) {} BlockDeviceClientImpl::BlockDeviceClientImpl( - const std::shared_ptr& fileClient) + const std::shared_ptr &fileClient) : fd_(-1), fileClient_(fileClient) {} -bool BlockDeviceClientImpl::Init(const BlockDeviceClientOptions& options) { +bool BlockDeviceClientImpl::Init(const BlockDeviceClientOptions &options) { auto ret = fileClient_->Init(options.configPath); if (ret != LIBCURVE_ERROR::OK) { LOG(ERROR) << "Init file client error: " << ret; @@ -65,12 +66,10 @@ bool BlockDeviceClientImpl::Init(const BlockDeviceClientOptions& options) { return true; } -void BlockDeviceClientImpl::UnInit() { - fileClient_->UnInit(); -} +void BlockDeviceClientImpl::UnInit() { fileClient_->UnInit(); } -bool BlockDeviceClientImpl::Open(const std::string& filename, - const std::string& owner) { +bool BlockDeviceClientImpl::Open(const std::string &filename, + const std::string &owner) { UserInfo userInfo(owner); curve::client::OpenFlags flags; auto retCode = fileClient_->Open(filename, userInfo, flags); @@ -101,9 +100,9 @@ bool BlockDeviceClientImpl::Close() { return true; } -bool BlockDeviceClientImpl::Stat(const std::string& filename, - const std::string& owner, - BlockDeviceStat* statInfo) { +bool BlockDeviceClientImpl::Stat(const std::string &filename, + const std::string &owner, + BlockDeviceStat *statInfo) { FileStatInfo fileStatInfo; UserInfo userInfo(owner); auto retCode = fileClient_->StatFile(filename, userInfo, &fileStatInfo); @@ -122,7 +121,7 @@ bool BlockDeviceClientImpl::Stat(const std::string& filename, return true; } -ssize_t BlockDeviceClientImpl::Read(char* buf, off_t offset, size_t length) { +ssize_t BlockDeviceClientImpl::Read(char *buf, off_t offset, size_t length) { VLOG(9) << "read request, offset: " << offset << ", length: " << length; LatencyUpdater updater(&g_read_latency); @@ -138,7 +137,7 @@ ssize_t BlockDeviceClientImpl::Read(char* buf, off_t offset, size_t length) { return request.Wait(); } -ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { +ssize_t BlockDeviceClientImpl::Readv(const std::vector &iov) { if (iov.size() == 1) { VLOG(9) << "read block offset: " << iov[0].offset << ", length: " << iov[0].length; @@ -148,7 +147,7 @@ ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { std::vector> requests; requests.reserve(iov.size()); - for (const auto& io : iov) { + for (const auto &io : iov) { requests.push_back(absl::make_unique( io.offset, io.length, io.data, fileClient_.get(), fd_)); @@ -157,7 +156,7 @@ ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { bool error = false; ssize_t total = 0; - for (const auto& r : requests) { + for (const auto &r : requests) { auto nr = r->Wait(); if (nr < 0) { error = true; @@ -171,8 +170,7 @@ ssize_t BlockDeviceClientImpl::Readv(const std::vector& iov) { return error ? -1 : total; } -ssize_t BlockDeviceClientImpl::Write(const char* buf, - off_t offset, +ssize_t BlockDeviceClientImpl::Write(const char *buf, off_t offset, size_t length) { VLOG(9) << "write request, offset: " << offset << ", length: " << length; @@ -189,7 +187,7 @@ ssize_t BlockDeviceClientImpl::Write(const char* buf, return request.Wait(); } -ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { +ssize_t BlockDeviceClientImpl::Writev(const std::vector &iov) { if (iov.size() == 1) { return Write(iov[0].data, iov[0].offset, iov[0].length); } @@ -197,7 +195,7 @@ ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { std::vector> requests; requests.reserve(iov.size()); - for (const auto& io : iov) { + for (const auto &io : iov) { requests.push_back(absl::make_unique( io.offset, io.length, io.data, fileClient_.get(), fd_)); @@ -206,7 +204,7 @@ ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { bool error = false; ssize_t total = 0; - for (const auto& r : requests) { + for (const auto &r : requests) { auto nr = r->Wait(); if (nr < 0) { error = true; @@ -220,12 +218,11 @@ ssize_t BlockDeviceClientImpl::Writev(const std::vector& iov) { return error ? -1 : total; } -bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, - off_t writeStart, +bool BlockDeviceClientImpl::WritePadding(char *writeBuffer, off_t writeStart, off_t writeEnd, off_t offset, // actual offset size_t length) { // actual length - std::vector> readvec; // Align reads + std::vector> readvec; // Align reads off_t readEnd = 0; // Padding leading @@ -235,7 +232,8 @@ bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, } // Padding trailing - if (offset + length > readEnd && offset + length != writeEnd) { + if (static_cast(offset + length) > readEnd && + static_cast(offset + length) != writeEnd) { off_t readStart = writeEnd - IO_ALIGNED_BLOCK_SIZE; if (readvec.size() == 1 && readStart == readEnd) { readvec[0].second = IO_ALIGNED_BLOCK_SIZE * 2; @@ -244,10 +242,10 @@ bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, } } - for (const auto& item : readvec) { + for (const auto &item : readvec) { auto retCode = AlignRead(writeBuffer + item.first - writeStart, item.first, item.second); - if (retCode != item.second) { + if (retCode != static_cast(item.second)) { return false; } } @@ -255,14 +253,13 @@ bool BlockDeviceClientImpl::WritePadding(char* writeBuffer, return true; } -ssize_t BlockDeviceClientImpl::AlignRead(char* buf, - off_t offset, +ssize_t BlockDeviceClientImpl::AlignRead(char *buf, off_t offset, size_t length) { auto ret = fileClient_->Read(fd_, buf, offset, length); if (ret < 0) { LOG(ERROR) << "Read file failed, retCode = " << ret; return -1; - } else if (ret != length) { + } else if (static_cast(ret) != length) { LOG(ERROR) << "Read file failed, expect read " << length << " bytes, actual read " << ret << " bytes"; return -1; @@ -271,14 +268,13 @@ ssize_t BlockDeviceClientImpl::AlignRead(char* buf, return length; } -ssize_t BlockDeviceClientImpl::AlignWrite(const char* buf, - off_t offset, +ssize_t BlockDeviceClientImpl::AlignWrite(const char *buf, off_t offset, size_t length) { auto ret = fileClient_->Write(fd_, buf, offset, length); if (ret < 0) { LOG(ERROR) << "Write file failed, retCode = " << ret; return -1; - } else if (ret != length) { + } else if (static_cast(ret) != length) { LOG(ERROR) << "Write file failed, expect write " << length << " bytes, actual write " << ret << " bytes"; return -1; @@ -288,15 +284,14 @@ ssize_t BlockDeviceClientImpl::AlignWrite(const char* buf, } bool BlockDeviceClientImpl::ConvertFileStatus(int fileStatus, - BlockDeviceStatus* bdStatus) { - static const std::map fileStatusMap { - { 0, BlockDeviceStatus::CREATED }, - { 1, BlockDeviceStatus::DELETING }, - { 2, BlockDeviceStatus::CLONING }, - { 3, BlockDeviceStatus::CLONE_META_INSTALLED }, - { 4, BlockDeviceStatus::CLONED }, - { 5, BlockDeviceStatus::BEING_CLONED } - }; + BlockDeviceStatus *bdStatus) { + static const std::map fileStatusMap{ + {0, BlockDeviceStatus::CREATED}, + {1, BlockDeviceStatus::DELETING}, + {2, BlockDeviceStatus::CLONING}, + {3, BlockDeviceStatus::CLONE_META_INSTALLED}, + {4, BlockDeviceStatus::CLONED}, + {5, BlockDeviceStatus::BEING_CLONED}}; auto iter = fileStatusMap.find(fileStatus); if (iter == fileStatusMap.end()) { diff --git a/curvefs/test/client/chunk_cache_manager_test.cpp b/curvefs/test/client/chunk_cache_manager_test.cpp index 0862bd2d87..f9110140d9 100644 --- a/curvefs/test/client/chunk_cache_manager_test.cpp +++ b/curvefs/test/client/chunk_cache_manager_test.cpp @@ -77,7 +77,6 @@ class ChunkCacheManagerTest : public testing::Test { TEST_F(ChunkCacheManagerTest, test_write_new_data) { uint64_t offset = 0; uint64_t len = 1024; - int length = len; char *buf = new char[len]; chunkCacheManager_->WriteNewDataCache(s3ClientAdaptor_, offset, len, buf); diff --git a/curvefs/test/client/client_s3_adaptor_test.cpp b/curvefs/test/client/client_s3_adaptor_test.cpp index 3c13248af5..aeebc631c6 100644 --- a/curvefs/test/client/client_s3_adaptor_test.cpp +++ b/curvefs/test/client/client_s3_adaptor_test.cpp @@ -123,7 +123,7 @@ TEST_F(ClientS3AdaptorTest, write_success) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t length = 1024; - char buf[length] = {0}; + char *buf = new char[length]; memset(buf, 'a', length); auto fileCache = std::make_shared(); EXPECT_CALL(*mockFsCacheManager_, FindOrCreateFileCacheManager(_, _)) @@ -135,32 +135,35 @@ TEST_F(ClientS3AdaptorTest, write_success) { EXPECT_CALL(*mockFsCacheManager_, MemCacheRatio()).WillOnce(Return(10)); EXPECT_CALL(*fileCache, Write(_, _, _)).WillOnce(Return(length)); ASSERT_EQ(length, s3ClientAdaptor_->Write(inodeId, offset, length, buf)); + delete[] buf; } TEST_F(ClientS3AdaptorTest, read_success) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t length = 1024; - char buf[length] = {0}; + char *buf = new char[length]; memset(buf, 'a', length); auto fileCache = std::make_shared(); EXPECT_CALL(*mockFsCacheManager_, FindOrCreateFileCacheManager(_, _)) .WillOnce(Return(fileCache)); EXPECT_CALL(*fileCache, Read(_, _, _, _)).WillOnce(Return(length)); ASSERT_EQ(length, s3ClientAdaptor_->Read(inodeId, offset, length, buf)); + delete[] buf; } TEST_F(ClientS3AdaptorTest, read_fail) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t length = 1024; - char buf[length] = {0}; + char *buf = new char[length]; memset(buf, 'a', length); auto fileCache = std::make_shared(); EXPECT_CALL(*mockFsCacheManager_, FindOrCreateFileCacheManager(_, _)) .WillOnce(Return(fileCache)); EXPECT_CALL(*fileCache, Read(_, _, _, _)).WillOnce(Return(-1)); ASSERT_EQ(-1, s3ClientAdaptor_->Read(inodeId, offset, length, buf)); + delete[] buf; } TEST_F(ClientS3AdaptorTest, truncate_small) { diff --git a/curvefs/test/client/client_s3_test.cpp b/curvefs/test/client/client_s3_test.cpp index 4acbf89037..fc59fef990 100644 --- a/curvefs/test/client/client_s3_test.cpp +++ b/curvefs/test/client/client_s3_test.cpp @@ -111,7 +111,6 @@ TEST_F(ClientS3Test, uploadync) { TEST_F(ClientS3Test, downloadAsync) { const std::string obj("test"); - uint64_t offset = 0; uint64_t len = 1024; char* buf = new char[len]; diff --git a/curvefs/test/client/file_cache_manager_test.cpp b/curvefs/test/client/file_cache_manager_test.cpp index 13091d1a1d..d96c6b5b4b 100644 --- a/curvefs/test/client/file_cache_manager_test.cpp +++ b/curvefs/test/client/file_cache_manager_test.cpp @@ -142,7 +142,7 @@ TEST_F(FileCacheManagerTest, test_flush_fail) { TEST_F(FileCacheManagerTest, test_new_write) { uint64_t offset = 0; uint64_t len = 5 * 1024 * 1024; - char buf[len] = {0}; + char *buf = new char[len]; memset(buf, 'a', len); EXPECT_CALL(*mockChunkCacheManager_, FindWriteableDataCache(_, _, _, _)) @@ -154,13 +154,15 @@ TEST_F(FileCacheManagerTest, test_new_write) { fileCacheManager_->SetChunkCacheManagerForTest(0, mockChunkCacheManager_); fileCacheManager_->SetChunkCacheManagerForTest(1, mockChunkCacheManager_); ASSERT_EQ(len, fileCacheManager_->Write(offset, len, buf)); + delete[] buf; } TEST_F(FileCacheManagerTest, test_old_write) { uint64_t offset = 0; uint64_t len = 1024; - char buf[len] = {0}; + char *buf = new char[len]; + memset(buf, 0, len); auto dataCache = std::make_shared( s3ClientAdaptor_, nullptr, offset, 0, nullptr, nullptr); EXPECT_CALL(*dataCache, Write(_, _, _, _)).WillOnce(Return()); @@ -170,14 +172,16 @@ TEST_F(FileCacheManagerTest, test_old_write) { fileCacheManager_->SetChunkCacheManagerForTest(0, mockChunkCacheManager_); ASSERT_EQ(len, fileCacheManager_->Write(offset, len, buf)); fileCacheManager_->ReleaseCache(); + delete[] buf; } TEST_F(FileCacheManagerTest, test_read_cache) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t len = 5 * 1024 * 1024; - char buf[len] = {0}; + char *buf = new char[len]; ReadRequest request; + memset(buf, 0, len); std::vector requests; std::vector emptyRequests; requests.emplace_back(request); @@ -191,14 +195,16 @@ TEST_F(FileCacheManagerTest, test_read_cache) { fileCacheManager_->SetChunkCacheManagerForTest(1, mockChunkCacheManager_); ASSERT_EQ(len, fileCacheManager_->Read(inodeId, offset, len, buf)); + delete[] buf; } TEST_F(FileCacheManagerTest, test_read_getinode_fail) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t len = 1024; - char buf[len] = {0}; + char *buf = new char[len]; + memset(buf, 0, len); ReadRequest request; std::vector requests; request.index = 0; @@ -214,13 +220,13 @@ TEST_F(FileCacheManagerTest, test_read_getinode_fail) { EXPECT_CALL(*mockInodeManager_, GetInode(_, _)) .WillOnce(Return(CURVEFS_ERROR::NOTEXIST)); ASSERT_EQ(-1, fileCacheManager_->Read(inodeId, offset, len, buf)); + delete[] buf; } TEST_F(FileCacheManagerTest, test_read_s3) { uint64_t inodeId = 1; uint64_t offset = 0; uint64_t len = 1024; - int length = len; char *buf = new char[len]; char *tmpbuf = new char[len]; diff --git a/curvefs/test/client/mock_client_s3_cache_manager.h b/curvefs/test/client/mock_client_s3_cache_manager.h index d6586eb346..ddd2b03ad6 100644 --- a/curvefs/test/client/mock_client_s3_cache_manager.h +++ b/curvefs/test/client/mock_client_s3_cache_manager.h @@ -26,6 +26,7 @@ #include #include #include +#include #include "curvefs/src/client/s3/client_s3_cache_manager.h" namespace curvefs { diff --git a/curvefs/test/client/rpcclient/metaserver_client_test.cpp b/curvefs/test/client/rpcclient/metaserver_client_test.cpp index d332c03d4e..aaff7bd1b7 100644 --- a/curvefs/test/client/rpcclient/metaserver_client_test.cpp +++ b/curvefs/test/client/rpcclient/metaserver_client_test.cpp @@ -345,12 +345,8 @@ TEST_F(MetaServerClientImplTest, test_CreateDentry_rpc_error) { d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -379,12 +375,8 @@ TEST_F(MetaServerClientImplTest, test_CreateDentry_create_dentry_ok) { d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -414,12 +406,8 @@ TEST_F(MetaServerClientImplTest, test_CreateDentry_copyset_not_exist) { d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -460,12 +448,8 @@ TEST_F(MetaServerClientImplTest, d.set_txid(10); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::CreateDentryResponse response; @@ -492,12 +476,8 @@ TEST_F(MetaServerClientImplTest, test_DeleteDentry) { std::string name = "test"; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::DeleteDentryResponse response; @@ -614,12 +594,8 @@ TEST_F(MetaServerClientImplTest, test_GetInode) { uint64_t inodeid = 2; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::Inode out; out.set_inodeid(inodeid); @@ -725,12 +701,8 @@ TEST_F(MetaServerClientImplTest, test_UpdateInodeAttr) { inode.set_symlink("test9"); // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::Inode out; @@ -950,12 +922,8 @@ TEST_F(MetaServerClientImplTest, test_CreateInode) { inode.symlink = "test9"; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::Inode out; out.set_inodeid(100); @@ -1062,12 +1030,8 @@ TEST_F(MetaServerClientImplTest, test_DeleteInode) { uint64_t inodeid = 1; // out - MetaserverID metaServerID = 1; butil::EndPoint target; butil::str2endpoint(addr_.c_str(), &target); - LogicPoolID poolID = 1; - CopysetID copysetID = 100; - uint32_t partitionID = 200; uint64_t applyIndex = 10; curvefs::metaserver::DeleteInodeResponse response; diff --git a/curvefs/test/client/test_disk_cache_manager.cpp b/curvefs/test/client/test_disk_cache_manager.cpp index a0daa0fb57..1613d5d36f 100644 --- a/curvefs/test/client/test_disk_cache_manager.cpp +++ b/curvefs/test/client/test_disk_cache_manager.cpp @@ -196,12 +196,12 @@ TEST_F(TestDiskCacheManager, IsCached) { bool ret = diskCacheManager_->IsCached(fileName); ASSERT_EQ(false, ret); - diskCacheManager_->AddCache(fileName, false); + diskCacheManager_->AddCache(fileName); diskCacheManager_->AddCache(fileName2); ret = diskCacheManager_->IsCached(fileName2); ASSERT_EQ(true, ret); - diskCacheManager_->AddCache(fileName, false); + diskCacheManager_->AddCache(fileName); diskCacheManager_->AddCache(fileName2); ret = diskCacheManager_->IsCached(fileName); ASSERT_EQ(true, ret); @@ -236,11 +236,6 @@ TEST_F(TestDiskCacheManager, IsDiskCacheFull) { int ret = diskCacheManager_->IsDiskCacheFull(); ASSERT_EQ(true, ret); - struct statfs stat; - stat.f_frsize = 1; - stat.f_blocks = 1; - stat.f_bfree = 0; - stat.f_bavail = 0; ret = diskCacheManager_->IsDiskCacheFull(); ASSERT_EQ(true, ret); } @@ -285,7 +280,7 @@ TEST_F(TestDiskCacheManager, TrimRun_1) { diskCacheManager_->InitMetrics("test"); EXPECT_CALL(*wrapper, statfs(NotNull(), NotNull())) .WillRepeatedly(Return(-1)); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -318,7 +313,7 @@ TEST_F(TestDiskCacheManager, TrimCache_2) { diskCacheManager_->Init(client_, option); diskCacheManager_->InitMetrics("test"); diskCacheManager_->AddCache("test"); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -354,7 +349,7 @@ TEST_F(TestDiskCacheManager, TrimCache_4) { diskCacheManager_->Init(client_, option); diskCacheManager_->InitMetrics("test"); diskCacheManager_->AddCache("test"); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -391,7 +386,7 @@ TEST_F(TestDiskCacheManager, TrimCache_5) { diskCacheManager_->Init(client_, option); diskCacheManager_->InitMetrics("test"); diskCacheManager_->AddCache("test"); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); sleep(6); diskCacheManager_->UmountDiskCache(); } @@ -432,7 +427,7 @@ TEST_F(TestDiskCacheManager, TrimCache_noexceed) { .Times(2) .WillOnce(Return(-1)) .WillOnce(DoAll(SetArgPointee<1>(rf), Return(0))); - int ret = diskCacheManager_->TrimRun(); + (void)diskCacheManager_->TrimRun(); diskCacheManager_->InitMetrics("test"); sleep(6); diskCacheManager_->UmountDiskCache(); diff --git a/curvefs/test/client/test_disk_cache_write.cpp b/curvefs/test/client/test_disk_cache_write.cpp index 0b7e15a5be..765cc15542 100644 --- a/curvefs/test/client/test_disk_cache_write.cpp +++ b/curvefs/test/client/test_disk_cache_write.cpp @@ -193,7 +193,6 @@ TEST_F(TestDiskCacheWrite, ReadFile) { } TEST_F(TestDiskCacheWrite, UploadFile) { - uint64_t length = 10; EXPECT_CALL(*wrapper_, stat(NotNull(), NotNull())) .WillOnce(Return(-1)); std::string fileName = "test"; @@ -460,7 +459,7 @@ TEST_F(TestDiskCacheWrite, AsyncUploadRun) { })); diskCacheWrite_->AsyncUploadEnqueue("test"); diskCacheWrite_->AsyncUploadEnqueue("test"); - int ret = diskCacheWrite_->AsyncUploadRun(); + (void)diskCacheWrite_->AsyncUploadRun(); sleep(1); diskCacheWrite_->AsyncUploadEnqueue("test"); std::string t1 = "test"; diff --git a/curvefs/test/client/test_fuse_s3_client.cpp b/curvefs/test/client/test_fuse_s3_client.cpp index 3fa444bc9c..ae5fb641aa 100644 --- a/curvefs/test/client/test_fuse_s3_client.cpp +++ b/curvefs/test/client/test_fuse_s3_client.cpp @@ -209,7 +209,6 @@ TEST_F(TestFuseS3Client, test_Init_with_KVCache) { // GetInode failed; bad fd TEST_F(TestFuseS3Client, warmUp_inodeBadFd) { sleep(1); - fuse_ino_t parent = 1; std::string name = "test"; fuse_ino_t inodeid = 2; @@ -863,9 +862,9 @@ TEST_F(TestFuseS3Client, warmUp_FetchChildDentry_suc_ListDentry) { TEST_F(TestFuseS3Client, FuseInit_when_fs_exist) { MountOption mOpts; memset(&mOpts, 0, sizeof(mOpts)); - mOpts.fsName = "s3fs"; - mOpts.mountPoint = "host1:/test"; - mOpts.fsType = "s3"; + mOpts.fsName = const_cast("s3fs"); + mOpts.mountPoint = const_cast("host1:/test"); + mOpts.fsType = const_cast("s3"); std::string fsName = mOpts.fsName; FsInfo fsInfoExp; @@ -886,9 +885,9 @@ TEST_F(TestFuseS3Client, FuseInit_when_fs_exist) { TEST_F(TestFuseS3Client, FuseOpDestroy) { MountOption mOpts; memset(&mOpts, 0, sizeof(mOpts)); - mOpts.fsName = "s3fs"; - mOpts.mountPoint = "host1:/test"; - mOpts.fsType = "s3"; + mOpts.fsName = const_cast("s3fs"); + mOpts.mountPoint = const_cast("host1:/test"); + mOpts.fsType = const_cast("s3"); std::string fsName = mOpts.fsName; @@ -899,7 +898,7 @@ TEST_F(TestFuseS3Client, FuseOpDestroy) { } TEST_F(TestFuseS3Client, FuseOpWriteSmallSize) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char *buf = "xxx"; size_t size = 4; @@ -929,7 +928,7 @@ TEST_F(TestFuseS3Client, FuseOpWriteSmallSize) { } TEST_F(TestFuseS3Client, FuseOpWriteFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char *buf = "xxx"; size_t size = 4; @@ -959,7 +958,7 @@ TEST_F(TestFuseS3Client, FuseOpWriteFailed) { } TEST_F(TestFuseS3Client, FuseOpReadOverRange) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 4; off_t off = 5000; @@ -985,7 +984,7 @@ TEST_F(TestFuseS3Client, FuseOpReadOverRange) { } TEST_F(TestFuseS3Client, FuseOpReadFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 4; off_t off = 0; @@ -1016,9 +1015,9 @@ TEST_F(TestFuseS3Client, FuseOpReadFailed) { } TEST_F(TestFuseS3Client, FuseOpFsync) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; - struct fuse_file_info *fi; + struct fuse_file_info *fi = nullptr; Inode inode; inode.set_inodeid(ino); @@ -1046,9 +1045,9 @@ TEST_F(TestFuseS3Client, FuseOpFsync) { } TEST_F(TestFuseS3Client, FuseOpFlush) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; - struct fuse_file_info *fi; + struct fuse_file_info *fi = nullptr; Inode inode; inode.set_inodeid(ino); inode.set_length(0); @@ -1086,7 +1085,7 @@ TEST_F(TestFuseS3Client, FuseOpFlush) { TEST_F(TestFuseS3Client, FuseOpGetXattr_NotSummaryInfo) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "security.selinux"; size_t size = 100; @@ -1098,7 +1097,7 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_NotSummaryInfo) { TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char rname[] = "curve.dir.rfbytes"; const char name[] = "curve.dir.fbytes"; @@ -1200,7 +1199,7 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir) { TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir_Failed) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char rname[] = "curve.dir.rfbytes"; const char name[] = "curve.dir.fbytes"; @@ -1328,7 +1327,7 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_NotEnableSumInDir_Failed) { TEST_F(TestFuseS3Client, FuseOpGetXattr_EnableSumInDir) { client_->SetEnableSumInDir(true); // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "curve.dir.rentries"; size_t size = 100; @@ -1396,7 +1395,7 @@ TEST_F(TestFuseS3Client, FuseOpGetXattr_EnableSumInDir) { TEST_F(TestFuseS3Client, FuseOpGetXattr_EnableSumInDir_Failed) { client_->SetEnableSumInDir(true); // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "curve.dir.entries"; const char rname[] = "curve.dir.rentries"; @@ -1604,7 +1603,7 @@ TEST_F(TestFuseS3Client, FuseOpCreate_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpWrite_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char* buf = "xxx"; size_t size = 4; @@ -1629,8 +1628,6 @@ TEST_F(TestFuseS3Client, FuseOpWrite_EnableSummary) { parentInode.mutable_xattr()->insert({XATTRENTRIES, "1"}); parentInode.mutable_xattr()->insert({XATTRFBYTES, "0"}); - uint64_t parentId = 1; - auto parentInodeWrapper = std::make_shared( parentInode, metaClient_); EXPECT_CALL(*inodeManager_, ShipToFlush(_)) @@ -1660,7 +1657,7 @@ TEST_F(TestFuseS3Client, FuseOpWrite_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpLink_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; fuse_ino_t newparent = 2; const char* newname = "xxxx"; @@ -1705,7 +1702,7 @@ TEST_F(TestFuseS3Client, FuseOpLink_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpUnlink_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -1788,7 +1785,7 @@ TEST_F(TestFuseS3Client, FuseOpUnlink_EnableSummary) { TEST_F(TestFuseS3Client, FuseOpOpen_Trunc_EnableSummary) { client_->SetEnableSumInDir(true); - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; fi.flags = O_TRUNC | O_WRONLY; @@ -1816,8 +1813,6 @@ TEST_F(TestFuseS3Client, FuseOpOpen_Trunc_EnableSummary) { auto parentInodeWrapper = std::make_shared( parentInode, metaClient_); - uint64_t parentId = 1; - EXPECT_CALL(*inodeManager_, GetInode(_, _)) .WillOnce( DoAll(SetArgReferee<1>(inodeWrapper), Return(CURVEFS_ERROR::OK))) @@ -1846,9 +1841,8 @@ TEST_F(TestFuseS3Client, FuseOpListXattr) { std::memset(buf, 0, 256); size_t size = 0; - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; - struct fuse_file_info fi; InodeAttr inode; inode.set_inodeid(ino); inode.set_length(4096); @@ -1907,7 +1901,7 @@ TEST_F(TestFuseS3Client, FuseOpListXattr) { TEST_F(TestFuseS3Client, FuseOpSetXattr_TooLong) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "security.selinux"; size_t size = 64 * 1024 + 1; @@ -1921,7 +1915,7 @@ TEST_F(TestFuseS3Client, FuseOpSetXattr_TooLong) { TEST_F(TestFuseS3Client, FuseOpSetXattr) { // in - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char name[] = "security.selinux"; size_t size = 100; diff --git a/curvefs/test/client/test_fuse_volume_client.cpp b/curvefs/test/client/test_fuse_volume_client.cpp index a5f215f6bc..25cb576941 100644 --- a/curvefs/test/client/test_fuse_volume_client.cpp +++ b/curvefs/test/client/test_fuse_volume_client.cpp @@ -215,7 +215,7 @@ TEST_F(TestFuseVolumeClient, FuseOpDestroy) { } TEST_F(TestFuseVolumeClient, FuseOpLookup) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "test"; @@ -241,7 +241,7 @@ TEST_F(TestFuseVolumeClient, FuseOpLookup) { } TEST_F(TestFuseVolumeClient, FuseOpLookupFail) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "test"; @@ -269,7 +269,7 @@ TEST_F(TestFuseVolumeClient, FuseOpLookupFail) { } TEST_F(TestFuseVolumeClient, FuseOpLookupNameTooLong) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "aaaaaaaaaaaaaaaaaaaaa"; @@ -336,7 +336,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRead) { } TEST_F(TestFuseVolumeClient, FuseOpOpen) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; fi.flags = 0; @@ -357,7 +357,7 @@ TEST_F(TestFuseVolumeClient, FuseOpOpen) { } TEST_F(TestFuseVolumeClient, FuseOpOpenFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; fi.flags = 0; @@ -428,8 +428,6 @@ TEST_F(TestFuseVolumeClient, FuseOpMkDir) { fuse_ino_t parent = 1; const char *name = "xxx"; mode_t mode = 1; - struct fuse_file_info fi; - fi.flags = 0; fuse_ino_t ino = 2; Inode inode; @@ -512,7 +510,7 @@ TEST_F(TestFuseVolumeClient, FuseOpCreateNameTooLong) { } TEST_F(TestFuseVolumeClient, FuseOpUnlink) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -573,7 +571,7 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlink) { } TEST_F(TestFuseVolumeClient, FuseOpRmDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -637,7 +635,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRmDir) { } TEST_F(TestFuseVolumeClient, FuseOpUnlinkFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "xxx"; uint32_t nlink = 100; @@ -713,7 +711,7 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlinkFailed) { } TEST_F(TestFuseVolumeClient, FuseOpUnlinkNameTooLong) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "aaaaaaaaaaaaaaaaaaaaa"; @@ -722,7 +720,7 @@ TEST_F(TestFuseVolumeClient, FuseOpUnlinkNameTooLong) { } TEST_F(TestFuseVolumeClient, FuseOpOpenDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; @@ -742,7 +740,7 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenDir) { } TEST_F(TestFuseVolumeClient, FuseOpOpenDirFaild) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; @@ -762,7 +760,7 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenDirFaild) { } TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 100; off_t off = 0; @@ -808,7 +806,7 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDir) { } TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDirFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; size_t size = 100; off_t off = 0; @@ -851,7 +849,7 @@ TEST_F(TestFuseVolumeClient, FuseOpOpenAndFuseOpReadDirFailed) { } TEST_F(TestFuseVolumeClient, FuseOpRenameBasic) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "A"; fuse_ino_t newparent = 3; @@ -977,7 +975,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameBasic) { } TEST_F(TestFuseVolumeClient, FuseOpRenameOverwrite) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "A"; fuse_ino_t newparent = 3; @@ -1119,7 +1117,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameOverwrite) { } TEST_F(TestFuseVolumeClient, FuseOpRenameOverwriteDir) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name = "A"; fuse_ino_t newparent = 3; @@ -1162,7 +1160,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameOverwriteDir) { } TEST_F(TestFuseVolumeClient, FuseOpRenameNameTooLong) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t parent = 1; std::string name1 = "aaaaaaaaaaaaaaaaaaaaa"; std::string name2 = "xxx"; @@ -1184,7 +1182,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameNameTooLong) { } TEST_F(TestFuseVolumeClient, FuseOpRenameParallel) { - fuse_req_t req; + fuse_req_t req = nullptr; uint64_t txId = 0; auto dentry = GenDentry(1, 1, "A", 0, 10, FILE); dentry.set_type(FsFileType::TYPE_DIRECTORY); @@ -1308,7 +1306,7 @@ TEST_F(TestFuseVolumeClient, FuseOpRenameParallel) { } TEST_F(TestFuseVolumeClient, FuseOpGetAttr) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); @@ -1327,7 +1325,7 @@ TEST_F(TestFuseVolumeClient, FuseOpGetAttr) { } TEST_F(TestFuseVolumeClient, FuseOpGetAttrFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); @@ -1348,7 +1346,7 @@ TEST_F(TestFuseVolumeClient, FuseOpGetAttrFailed) { TEST_F(TestFuseVolumeClient, FuseOpGetAttrEnableCto) { curvefs::client::common::FLAGS_enableCto = true; - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); @@ -1372,7 +1370,7 @@ TEST_F(TestFuseVolumeClient, FuseOpGetAttrEnableCto) { } TEST_F(TestFuseVolumeClient, FuseOpSetAttr) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct stat attr; int to_set; @@ -1418,7 +1416,7 @@ TEST_F(TestFuseVolumeClient, FuseOpSetAttr) { } TEST_F(TestFuseVolumeClient, FuseOpSetAttrFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct stat attr; int to_set; @@ -1566,7 +1564,7 @@ TEST_F(TestFuseVolumeClient, FuseOpSymlinkNameTooLong) { } TEST_F(TestFuseVolumeClient, FuseOpLink) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; fuse_ino_t newparent = 2; const char *newname = "xxxx"; @@ -1617,7 +1615,7 @@ TEST_F(TestFuseVolumeClient, FuseOpLink) { } TEST_F(TestFuseVolumeClient, FuseOpLinkFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; fuse_ino_t newparent = 2; const char *newname = "xxxx"; @@ -1686,7 +1684,7 @@ TEST_F(TestFuseVolumeClient, FuseOpLinkFailed) { } TEST_F(TestFuseVolumeClient, FuseOpReadLink) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; const char *link = "/a/b/xxx"; @@ -1709,7 +1707,7 @@ TEST_F(TestFuseVolumeClient, FuseOpReadLink) { } TEST_F(TestFuseVolumeClient, FuseOpReadLinkFailed) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; EXPECT_CALL(*inodeManager_, GetInodeAttr(ino, _)) @@ -1721,7 +1719,7 @@ TEST_F(TestFuseVolumeClient, FuseOpReadLinkFailed) { } TEST_F(TestFuseVolumeClient, FuseOpRelease) { - fuse_req_t req; + fuse_req_t req = nullptr; fuse_ino_t ino = 1; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); diff --git a/curvefs/test/mds/fs_manager_test2.cpp b/curvefs/test/mds/fs_manager_test2.cpp index 127da10517..e6a3bfebad 100644 --- a/curvefs/test/mds/fs_manager_test2.cpp +++ b/curvefs/test/mds/fs_manager_test2.cpp @@ -159,7 +159,6 @@ TEST_F(FsManagerTest2, CreateFoundConflictFsNameAndNotIdenticalToPreviousOne) { std::string fsname = "hello"; FSType type = FSType::TYPE_S3; uint64_t blocksize = 4 * 1024; - bool enableSumInDir = false; FsDetail detail; auto* s3Info = detail.mutable_s3info(); s3Info->set_ak("hello"); @@ -247,7 +246,6 @@ TEST_F(FsManagerTest2, CreateFoundUnCompleteOperation) { std::string fsname = "hello"; FSType type = FSType::TYPE_S3; uint64_t blocksize = 4 * 1024; - bool enableSumInDir = false; FsDetail detail; auto* s3Info = detail.mutable_s3info(); s3Info->set_ak("hello"); @@ -330,7 +328,6 @@ TEST_F(FsManagerTest2, createHybridFs) { std::string fsname = "hello"; FSType type = FSType::TYPE_HYBRID; uint64_t blocksize = 4 * 1024; - bool enableSumInDir = false; FsDetail detail; auto* s3Info = detail.mutable_s3info(); s3Info->set_ak("hello"); diff --git a/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp b/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp index b82ee520f1..951832a3b3 100644 --- a/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp +++ b/curvefs/test/mds/heartbeat/copyset_conf_generator_test.cpp @@ -29,13 +29,13 @@ #include "curvefs/test/mds/mock/mock_coordinator.h" #include "curvefs/test/mds/mock/mock_topology.h" +using ::curvefs::mds::MockCoordinator; +using ::curvefs::mds::topology::CopySetIdType; using ::curvefs::mds::topology::MockIdGenerator; using ::curvefs::mds::topology::MockStorage; using ::curvefs::mds::topology::MockTokenGenerator; using ::curvefs::mds::topology::MockTopology; -using ::curvefs::mds::MockCoordinator; using ::curvefs::mds::topology::TopoStatusCode; -using ::curvefs::mds::topology::CopySetIdType; using ::curvefs::mds::topology::UNINITIALIZE_ID; using ::testing::_; using ::testing::DoAll; @@ -72,15 +72,14 @@ class TestCopysetConfGenerator : public ::testing::Test { }; TEST_F(TestCopysetConfGenerator, get_copyset_fail) { - MetaServerIdType reportId; + MetaServerIdType reportId = 1; PoolIdType poolId = 1; CopySetIdType copysetId = 2; ::curvefs::mds::topology::CopySetInfo reportCopySetInfo(poolId, copysetId); ::curvefs::mds::heartbeat::ConfigChangeInfo configChInfo; ::curvefs::mds::heartbeat::CopySetConf copysetConf; - EXPECT_CALL(*topology_, GetCopySet(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology_, GetCopySet(_, _)).WillOnce(Return(false)); bool ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, configChInfo, ©setConf); @@ -206,8 +205,8 @@ TEST_F(TestCopysetConfGenerator, get_report_copyset_follower2) { recordCopySetInfo.SetEpoch(3); EXPECT_CALL(*topology_, GetCopySet(_, _)) .WillOnce(DoAll(SetArgPointee<1>(recordCopySetInfo), Return(true))); - ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, - configChInfo, ©setConf); + ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, configChInfo, + ©setConf); ASSERT_FALSE(ret); } @@ -342,8 +341,7 @@ TEST_F(TestCopysetConfGenerator, get_report_copyset_follower7) { EXPECT_CALL(*coordinator_, MetaserverGoingToAdd(_, _)) .WillOnce(Return(false)); - EXPECT_CALL(*topology_, GetMetaServer(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology_, GetMetaServer(_, _)).WillOnce(Return(false)); sleep(1); bool ret = generator_->GenCopysetConf(reportId, reportCopySetInfo, diff --git a/curvefs/test/mds/mds_test.cpp b/curvefs/test/mds/mds_test.cpp index 6ab8c72c07..33939323ea 100644 --- a/curvefs/test/mds/mds_test.cpp +++ b/curvefs/test/mds/mds_test.cpp @@ -54,8 +54,8 @@ using ::curve::kvstorage::EtcdClientImp; namespace curvefs { namespace mds { -const char* kEtcdAddr = "127.0.0.1:20032"; -const char* kMdsListenAddr = "127.0.0.1:20035"; +const char *kEtcdAddr = "127.0.0.1:20032"; +const char *kMdsListenAddr = "127.0.0.1:20035"; class MdsTest : public ::testing::Test { protected: @@ -63,9 +63,7 @@ class MdsTest : public ::testing::Test { void TearDown() override {} - static void ClearEnv() { - system("rm -rf curve_fs_test_mds.etcd"); - } + static void ClearEnv() { system("rm -rf curve_fs_test_mds.etcd"); } static void StartEtcd() { etcdPid_ = fork(); @@ -88,7 +86,8 @@ class MdsTest : public ::testing::Test { } auto client = std::make_shared(); - EtcdConf conf{const_cast(kEtcdAddr), strlen(kEtcdAddr), 1000}; + EtcdConf conf{const_cast(kEtcdAddr), + static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = curve::common::TimeUtility::GetTimeofDaySec(); bool initSucc = false; while (curve::common::TimeUtility::GetTimeofDaySec() - now <= 50) { @@ -124,7 +123,7 @@ class MdsTest : public ::testing::Test { pid_t MdsTest::etcdPid_ = 0; void GetChunkIds(std::shared_ptr conf, - int numChunkIds, vector* data) { + int numChunkIds, vector *data) { brpc::Channel channel; std::string allocateServer(kMdsListenAddr); if (channel.Init(allocateServer.c_str(), NULL) != 0) { @@ -134,7 +133,7 @@ void GetChunkIds(std::shared_ptr conf, return; } - brpc::Controller* cntl = new brpc::Controller(); + brpc::Controller *cntl = new brpc::Controller(); AllocateS3ChunkRequest request; AllocateS3ChunkResponse response; curvefs::mds::MdsService_Stub stub(&channel); diff --git a/curvefs/test/mds/schedule/recoverScheduler_test.cpp b/curvefs/test/mds/schedule/recoverScheduler_test.cpp index be780392b3..d48c6a9ee1 100644 --- a/curvefs/test/mds/schedule/recoverScheduler_test.cpp +++ b/curvefs/test/mds/schedule/recoverScheduler_test.cpp @@ -192,7 +192,6 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { MetaServerIdType id1 = 1; MetaServerIdType id2 = 2; MetaServerIdType id3 = 3; - MetaServerIdType id4 = 4; Operator op; EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInPool(_)) .WillRepeatedly(Return(90)); diff --git a/curvefs/test/mds/topology/test_topology_manager.cpp b/curvefs/test/mds/topology/test_topology_manager.cpp index 0c6e1e6b29..a7794b7011 100644 --- a/curvefs/test/mds/topology/test_topology_manager.cpp +++ b/curvefs/test/mds/topology/test_topology_manager.cpp @@ -652,7 +652,6 @@ TEST_F(TestTopologyManager, test_RegistServer_PoolNotFound) { } TEST_F(TestTopologyManager, test_RegistServer_ZoneNotFound) { - ServerIdType id = 0x31; PoolIdType poolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPool(poolId, "pool1"); @@ -674,7 +673,6 @@ TEST_F(TestTopologyManager, test_RegistServer_ZoneNotFound) { } TEST_F(TestTopologyManager, test_RegistServer_AllocateIdFail) { - ServerIdType id = 0x31; PoolIdType poolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPool(poolId, "pool"); @@ -1110,7 +1108,6 @@ TEST_F(TestTopologyManager, test_CreateZone_success) { TEST_F(TestTopologyManager, test_CreateZone_AllocateIdFail) { PoolIdType poolId = 0x11; - ZoneIdType zoneId = 0x21; PrepareAddPool(poolId, "poolname1"); CreateZoneRequest request; @@ -1930,8 +1927,6 @@ TEST_F(TestTopologyManager, TEST_F(TestTopologyManager, test_CreatePartitionWithOutAvailableCopyset_HaveNoAvailableMetaserver) { PoolIdType poolId = 0x11; - CopySetIdType copysetId = 0x51; - PartitionIdType partitionId = 0x61; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -1969,8 +1964,6 @@ TEST_F(TestTopologyManager, TEST_F(TestTopologyManager, test_CreatePartitionWithOutAvailableCopyset_MetaServerSpaceIsFull) { PoolIdType poolId = 0x11; - CopySetIdType copysetId = 0x51; - PartitionIdType partitionId = 0x61; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -2081,8 +2074,6 @@ TEST_F(TestTopologyManager, TEST_F(TestTopologyManager, test_CreatePartitionWithOutAvailableCopyset_HaveOfflineMetaserver1) { PoolIdType poolId = 0x11; - CopySetIdType copysetId = 0x51; - PartitionIdType partitionId = 0x61; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -2681,8 +2672,6 @@ TEST_F(TestTopologyManager, test_ListPartitionEmpty_Success) { PoolIdType poolId = 0x11; CopySetIdType copysetId = 0x51; PartitionIdType pId1 = 0x61; - PartitionIdType pId2 = 0x62; - PartitionIdType pId3 = 0x63; Pool::RedundanceAndPlaceMentPolicy policy; policy.replicaNum = 3; @@ -2818,7 +2807,6 @@ TEST_F(TestTopologyManager, test_GetCopysetOfPartition_CopysetNotFound) { } TEST_F(TestTopologyManager, test_GetCopysetMembers_Success) { - FsIdType fsId = 0x01; PoolIdType poolId = 0x11; CopySetIdType copysetId = 0x51; @@ -2893,8 +2881,6 @@ TEST_F(TestTopologyManager, test_RegistMemcacheCluster_AllocateIdFail) { server.set_port(1); *request.add_servers() = server; - MemcacheClusterIdType mcCId(1); - EXPECT_CALL(*idGenerator_, GenMemCacheClusterId()) .WillOnce(Return(UNINITIALIZE_ID)); diff --git a/curvefs/test/metaserver/inode_manager_test.cpp b/curvefs/test/metaserver/inode_manager_test.cpp index 9faa65e68f..e1808c45bd 100644 --- a/curvefs/test/metaserver/inode_manager_test.cpp +++ b/curvefs/test/metaserver/inode_manager_test.cpp @@ -63,7 +63,6 @@ auto localfs = curve::fs::Ext4FileSystemImpl::getInstance(); class InodeManagerTest : public ::testing::Test { protected: void SetUp() override { - auto tablename = "partition:1"; dataDir_ = RandomStoragePath(); StorageOptions options; options.dataDir = dataDir_; @@ -415,7 +414,6 @@ TEST_F(InodeManagerTest, GetOrModifyS3ChunkInfo) { TEST_F(InodeManagerTest, UpdateInode) { // create inode - uint32_t fsId = 1; uint64_t ino = 2; Inode inode; diff --git a/curvefs/test/metaserver/metastore_test.cpp b/curvefs/test/metaserver/metastore_test.cpp index e1083af2c5..fbb41016b6 100644 --- a/curvefs/test/metaserver/metastore_test.cpp +++ b/curvefs/test/metaserver/metastore_test.cpp @@ -1645,7 +1645,7 @@ TEST_F(MetastoreTest, GetInodeWithPaddingS3Meta) { request.set_mode(777); request.set_type(FsFileType::TYPE_FILE); - auto rc = metastore.CreateInode(&request, &response); + (void)metastore.CreateInode(&request, &response); ASSERT_EQ(response.statuscode(), MetaStatusCode::OK); inodeId = response.inode().inodeid(); } diff --git a/curvefs/test/metaserver/recycle_cleaner_test.cpp b/curvefs/test/metaserver/recycle_cleaner_test.cpp index dce73d392e..ab8cdc1f3b 100644 --- a/curvefs/test/metaserver/recycle_cleaner_test.cpp +++ b/curvefs/test/metaserver/recycle_cleaner_test.cpp @@ -154,7 +154,7 @@ TEST_F(RecycleCleanerTest, time_func_test) { struct tm tmDir; memset(&tmDir, 0, sizeof(tmDir)); - char* c = strptime(now, "%Y-%m-%d-%H", &tmDir); + (void)strptime(now, "%Y-%m-%d-%H", &tmDir); time_t dirTime = mktime(&tmDir); LOG(INFO) << "befor, time = " << timeStamp; diff --git a/curvefs/test/metaserver/s3compact/s3compact_test.cpp b/curvefs/test/metaserver/s3compact/s3compact_test.cpp index 20823090e7..f0b7fd1896 100644 --- a/curvefs/test/metaserver/s3compact/s3compact_test.cpp +++ b/curvefs/test/metaserver/s3compact/s3compact_test.cpp @@ -259,7 +259,7 @@ TEST_F(S3CompactTest, test_GetNeedCompact) { ref->set_offset(i + 64 * j); ref->set_len(1); } - s3chunkinfoMap.insert({j, l}); + s3chunkinfoMap.insert({static_cast(j), l}); } ASSERT_EQ(impl_->GetNeedCompact(s3chunkinfoMap, 64 * 19 + 30, 64).size(), opts_.maxChunksPerCompact); diff --git a/curvefs/test/metaserver/storage/storage_test.cpp b/curvefs/test/metaserver/storage/storage_test.cpp index 34a82bd924..d47f41a1ee 100644 --- a/curvefs/test/metaserver/storage/storage_test.cpp +++ b/curvefs/test/metaserver/storage/storage_test.cpp @@ -845,7 +845,6 @@ void TestMixOperator(std::shared_ptr kvStorage) { void TestTransaction(std::shared_ptr kvStorage) { Status s; - size_t size; Dentry value; std::shared_ptr iterator; std::shared_ptr txn; diff --git a/nebd/src/common/stringstatus.cpp b/nebd/src/common/stringstatus.cpp index ab18f72c20..12e9806708 100644 --- a/nebd/src/common/stringstatus.cpp +++ b/nebd/src/common/stringstatus.cpp @@ -42,7 +42,7 @@ void StringStatus::Update() { int count = 0; for (auto &item : kvs_) { count += 1; - if (count == kvs_.size()) { + if (count == static_cast(kvs_.size())) { jsonStr += "\"" + item.first + "\"" + ":" + "\"" + item.second + "\""; } else { diff --git a/nebd/src/part1/libnebd.cpp b/nebd/src/part1/libnebd.cpp index 06898883cc..d9cb15d071 100644 --- a/nebd/src/part1/libnebd.cpp +++ b/nebd/src/part1/libnebd.cpp @@ -80,11 +80,19 @@ int nebd_lib_close(int fd) { } int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length) { + (void)fd; + (void)buf; + (void)offset; + (void)length; // not support sync read return -1; } int nebd_lib_pwrite(int fd, const void* buf, off_t offset, size_t length) { + (void)fd; + (void)buf; + (void)offset; + (void)length; // not support sync write return -1; } @@ -102,6 +110,7 @@ int nebd_lib_aio_pwrite(int fd, NebdClientAioContext* context) { } int nebd_lib_sync(int fd) { + (void)fd; return 0; } diff --git a/nebd/src/part1/nebd_client.cpp b/nebd/src/part1/nebd_client.cpp index 734fee9480..a8d942a5b7 100644 --- a/nebd/src/part1/nebd_client.cpp +++ b/nebd/src/part1/nebd_client.cpp @@ -243,6 +243,7 @@ int NebdClient::Extend(int fd, int64_t newsize) { auto task = [&](brpc::Controller* cntl, brpc::Channel* channel, bool* rpcFailed) -> int64_t { + (void)channel; nebd::client::NebdFileService_Stub stub(&channel_); nebd::client::ResizeRequest request; nebd::client::ResizeResponse response; @@ -359,7 +360,9 @@ int NebdClient::AioRead(int fd, NebdClientAioContext* aioctx) { return 0; } -static void EmptyDeleter(void* m) {} +static void EmptyDeleter(void* m) { + (void)m; +} int NebdClient::AioWrite(int fd, NebdClientAioContext* aioctx) { auto task = [this, fd, aioctx]() { @@ -622,6 +625,7 @@ void NebdClient::InitLogger(const LogOption& logOption) { int NebdClient::ExecAsyncRpcTask(void* meta, bthread::TaskIterator& iter) { // NOLINT + (void)meta; if (iter.is_queue_stopped()) { return 0; } diff --git a/nebd/src/part2/file_service.cpp b/nebd/src/part2/file_service.cpp index 5a7f24bee1..984a638bfc 100644 --- a/nebd/src/part2/file_service.cpp +++ b/nebd/src/part2/file_service.cpp @@ -118,6 +118,7 @@ void NebdFileServiceImpl::OpenFile( const nebd::client::OpenFileRequest* request, nebd::client::OpenFileResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -282,6 +283,7 @@ void NebdFileServiceImpl::GetInfo( const nebd::client::GetInfoRequest* request, nebd::client::GetInfoResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -306,6 +308,7 @@ void NebdFileServiceImpl::CloseFile( const nebd::client::CloseFileRequest* request, nebd::client::CloseFileResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -326,6 +329,7 @@ void NebdFileServiceImpl::ResizeFile( const nebd::client::ResizeRequest* request, nebd::client::ResizeResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); @@ -345,6 +349,7 @@ void NebdFileServiceImpl::InvalidateCache( const nebd::client::InvalidateCacheRequest* request, nebd::client::InvalidateCacheResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); response->set_retcode(RetCode::kNoOK); diff --git a/nebd/src/part2/heartbeat_service.cpp b/nebd/src/part2/heartbeat_service.cpp index 3b564bd80b..00e897975c 100644 --- a/nebd/src/part2/heartbeat_service.cpp +++ b/nebd/src/part2/heartbeat_service.cpp @@ -34,6 +34,7 @@ void NebdHeartbeatServiceImpl::KeepAlive( const nebd::client::HeartbeatRequest* request, nebd::client::HeartbeatResponse* response, google::protobuf::Closure* done) { + (void)cntl_base; brpc::ClosureGuard doneGuard(done); bool ok = true; uint64_t curTime = TimeUtility::GetTimeofDayMs(); diff --git a/nebd/src/part2/metafile_manager.cpp b/nebd/src/part2/metafile_manager.cpp index 0dd133b78f..288fde68ce 100644 --- a/nebd/src/part2/metafile_manager.cpp +++ b/nebd/src/part2/metafile_manager.cpp @@ -118,7 +118,7 @@ int NebdMetaFileManager::AtomicWriteFile(const Json::Value& root) { int writeSize = wrapper_->pwrite(fd, jsonString.c_str(), jsonString.size(), 0); wrapper_->close(fd); - if (writeSize != jsonString.size()) { + if (writeSize != static_cast(jsonString.size())) { LOG(ERROR) << "Write tmp file " << tmpFilePath << " fail"; return -1; } @@ -206,7 +206,6 @@ int NebdMetaFileParser::Parse(Json::Value root, for (const auto& volume : volumes) { std::string fileName; - int fd; NebdFileMeta meta; if (volume[kFileName].isNull()) { diff --git a/nebd/src/part2/request_executor_curve.cpp b/nebd/src/part2/request_executor_curve.cpp index 37f1afae37..670d05879c 100644 --- a/nebd/src/part2/request_executor_curve.cpp +++ b/nebd/src/part2/request_executor_curve.cpp @@ -270,6 +270,7 @@ int CurveRequestExecutor::AioWrite( int CurveRequestExecutor::Flush( NebdFileInstance* fd, NebdServerAioContext* aioctx) { + (void)fd; aioctx->ret = 0; aioctx->cb(aioctx); diff --git a/nebd/test/common/rw_lock_test.cpp b/nebd/test/common/rw_lock_test.cpp index aca4c3f84e..59c5b22787 100644 --- a/nebd/test/common/rw_lock_test.cpp +++ b/nebd/test/common/rw_lock_test.cpp @@ -78,7 +78,6 @@ TEST(RWLockTest, basic_test) { auto readFunc = [&] { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); - auto j = writeCnt + i; } }; { @@ -147,7 +146,6 @@ TEST(BthreadRWLockTest, basic_test) { auto readFunc = [&] { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); - auto j = writeCnt + i; } }; { diff --git a/nebd/test/utils/config_generator.h b/nebd/test/utils/config_generator.h index 4b9cef788e..450e78ffb9 100644 --- a/nebd/test/utils/config_generator.h +++ b/nebd/test/utils/config_generator.h @@ -33,7 +33,6 @@ namespace nebd { namespace common { static const char* kNebdClientConfigPath = "nebd/etc/nebd/nebd-client.conf"; -static const char* kNebdServerConfigPath = "nebd/etc/nebd/nebd-server.conf"; class NebdClientConfigGenerator { public: diff --git a/src/chunkserver/braft_cli_service.cpp b/src/chunkserver/braft_cli_service.cpp index 8c7d9ec7dc..6b21d87c44 100755 --- a/src/chunkserver/braft_cli_service.cpp +++ b/src/chunkserver/braft_cli_service.cpp @@ -207,6 +207,7 @@ void BRaftCliServiceImpl::transfer_leader( const TransferLeaderRequest *request, TransferLeaderResponse *response, ::google::protobuf::Closure *done) { + (void)response; brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; diff --git a/src/chunkserver/braft_cli_service2.cpp b/src/chunkserver/braft_cli_service2.cpp index 3fb8f569fc..0d13ac3176 100755 --- a/src/chunkserver/braft_cli_service2.cpp +++ b/src/chunkserver/braft_cli_service2.cpp @@ -161,6 +161,7 @@ static void change_peers_returned(brpc::Controller* cntl, scoped_refptr /*node*/, ::google::protobuf::Closure* done, const butil::Status& st) { + (void)request; brpc::ClosureGuard done_guard(done); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); @@ -275,6 +276,7 @@ void BRaftCliServiceImpl2::TransferLeader( const TransferLeaderRequest2 *request, TransferLeaderResponse2 *response, ::google::protobuf::Closure *done) { + (void)response; brpc::Controller *cntl = (brpc::Controller *) controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; @@ -305,6 +307,7 @@ void BRaftCliServiceImpl2::ResetPeer(RpcController* controller, const ResetPeerRequest2* request, ResetPeerResponse2* response, Closure* done) { + (void)response; brpc::Controller* cntl = (brpc::Controller*)controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; @@ -342,6 +345,7 @@ static void snapshot_returned(brpc::Controller* cntl, scoped_refptr node, ::google::protobuf::Closure* done, const butil::Status& st) { + (void)node; brpc::ClosureGuard done_guard(done); if (!st.ok()) { cntl->SetFailed(st.error_code(), "%s", st.error_cstr()); @@ -352,6 +356,7 @@ void BRaftCliServiceImpl2::Snapshot(RpcController* controller, const SnapshotRequest2* request, SnapshotResponse2* response, Closure* done) { + (void)response; brpc::Controller* cntl = (brpc::Controller*)controller; brpc::ClosureGuard done_guard(done); scoped_refptr node; @@ -374,6 +379,8 @@ void BRaftCliServiceImpl2::SnapshotAll(RpcController* controller, const SnapshotAllRequest* request, SnapshotAllResponse* response, Closure* done) { + (void)request; + (void)response; brpc::Controller* cntl = (brpc::Controller*)controller; brpc::ClosureGuard done_guard(done); braft::NodeManager *const nm = braft::NodeManager::GetInstance(); diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index fd8a33a6ca..aaa7e721ec 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -211,6 +211,8 @@ void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, const CreateS3CloneChunkRequest* request, CreateS3CloneChunkResponse* response, Closure* done) { + (void)controller; + (void)request; brpc::ClosureGuard doneGuard(done); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(INFO) << "Invalid request, serverSide Not implement yet"; @@ -238,7 +240,6 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, } // 判断request参数是否合法 - auto maxSize = copysetNodeManager_->GetCopysetNodeOptions().maxChunkSize; if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -425,6 +426,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, const GetChunkInfoRequest *request, GetChunkInfoResponse *response, Closure *done) { + (void)controller; ChunkServiceClosure* closure = new (std::nothrow) ChunkServiceClosure(inflightThrottle_, nullptr, @@ -494,6 +496,7 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, const GetChunkHashRequest *request, GetChunkHashResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); // 判断request参数是否合法 @@ -553,6 +556,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, const UpdateEpochRequest *request, UpdateEpochResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); bool success = epochMap_->UpdateEpoch(request->fileid(), request->epoch()); if (success) { diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index f47ee9978b..339ecbbe66 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -31,14 +31,12 @@ namespace curve { namespace chunkserver { IOMetric::IOMetric() - : rps_(&reqNum_, 1) - , iops_(&ioNum_, 1) - , eps_(&errorNum_, 1) - , bps_(&ioBytes_, 1) {} + : rps_(&reqNum_, 1), iops_(&ioNum_, 1), eps_(&errorNum_, 1), + bps_(&ioBytes_, 1) {} IOMetric::~IOMetric() {} -int IOMetric::Init(const std::string& prefix) { +int IOMetric::Init(const std::string &prefix) { // 暴露所有的metric if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; @@ -83,9 +81,7 @@ int IOMetric::Init(const std::string& prefix) { return 0; } -void IOMetric::OnRequest() { - reqNum_ << 1; -} +void IOMetric::OnRequest() { reqNum_ << 1; } void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { if (!hasError) { @@ -99,7 +95,7 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { } -int CSIOMetric::Init(const std::string& prefix) { +int CSIOMetric::Init(const std::string &prefix) { // 初始化io统计项metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; @@ -154,10 +150,8 @@ void CSIOMetric::OnRequest(CSIOMetricType type) { } } -void CSIOMetric::OnResponse(CSIOMetricType type, - size_t size, - int64_t latUs, - bool hasError) { +void CSIOMetric::OnResponse(CSIOMetricType type, size_t size, int64_t latUs, + bool hasError) { IOMetricPtr ioMetric = GetIOMetric(type); if (ioMetric != nullptr) { ioMetric->OnResponse(size, latUs, hasError); @@ -167,43 +161,42 @@ void CSIOMetric::OnResponse(CSIOMetricType type, IOMetricPtr CSIOMetric::GetIOMetric(CSIOMetricType type) { IOMetricPtr result = nullptr; switch (type) { - case CSIOMetricType::READ_CHUNK: - result = readMetric_; - break; - case CSIOMetricType::WRITE_CHUNK: - result = writeMetric_; - break; - case CSIOMetricType::RECOVER_CHUNK: - result = recoverMetric_; - break; - case CSIOMetricType::PASTE_CHUNK: - result = pasteMetric_; - break; - case CSIOMetricType::DOWNLOAD: - result = downloadMetric_; - break; - default: - result = nullptr; - break; + case CSIOMetricType::READ_CHUNK: + result = readMetric_; + break; + case CSIOMetricType::WRITE_CHUNK: + result = writeMetric_; + break; + case CSIOMetricType::RECOVER_CHUNK: + result = recoverMetric_; + break; + case CSIOMetricType::PASTE_CHUNK: + result = pasteMetric_; + break; + case CSIOMetricType::DOWNLOAD: + result = downloadMetric_; + break; + default: + result = nullptr; + break; } return result; } -int CSCopysetMetric::Init(const LogicPoolID& logicPoolId, - const CopysetID& copysetId) { +int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { logicPoolId_ = logicPoolId; copysetId_ = copysetId; int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { - LOG(ERROR) << "Init Copyset (" - << logicPoolId << "," << copysetId << ")" + LOG(ERROR) << "Init Copyset (" << logicPoolId << "," << copysetId << ")" << " metric failed."; return -1; } return 0; } -void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { +void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { std::string chunkCountPrefix = Prefix() + "_chunk_count"; std::string snapshotCountPrefix = Prefix() + "snapshot_count"; std::string cloneChunkCountPrefix = Prefix() + "_clonechunk_count"; @@ -216,26 +209,21 @@ void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { } void CSCopysetMetric::MonitorCurveSegmentLogStorage( - CurveSegmentLogStorage* logStorage) { + CurveSegmentLogStorage *logStorage) { std::string walSegmentCountPrefix = Prefix() + "_walsegment_count"; walSegmentCount_ = std::make_shared>( walSegmentCountPrefix, GetLogStorageWalSegmentCountFunc, logStorage); } ChunkServerMetric::ChunkServerMetric() - : hasInited_(false) - , leaderCount_(nullptr) - , chunkLeft_(nullptr) - , walSegmentLeft_(nullptr) - , chunkTrashed_(nullptr) - , chunkCount_(nullptr) - , snapshotCount_(nullptr) - , cloneChunkCount_(nullptr) - , walSegmentCount_(nullptr) {} - -ChunkServerMetric* ChunkServerMetric::self_ = nullptr; - -ChunkServerMetric* ChunkServerMetric::GetInstance() { + : hasInited_(false), leaderCount_(nullptr), chunkLeft_(nullptr), + walSegmentLeft_(nullptr), chunkTrashed_(nullptr), chunkCount_(nullptr), + walSegmentCount_(nullptr), snapshotCount_(nullptr), + cloneChunkCount_(nullptr) {} + +ChunkServerMetric *ChunkServerMetric::self_ = nullptr; + +ChunkServerMetric *ChunkServerMetric::GetInstance() { // chunkserver metric 在chunkserver启动时初始化创建 // 因此创建的时候不会存在竞争,不需要锁保护 if (self_ == nullptr) { @@ -244,7 +232,7 @@ ChunkServerMetric* ChunkServerMetric::GetInstance() { return self_; } -int ChunkServerMetric::Init(const ChunkServerMetricOptions& option) { +int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { if (hasInited_) { LOG(WARNING) << "chunkserver metric has inited."; return 0; @@ -305,8 +293,8 @@ int ChunkServerMetric::Fini() { return 0; } -int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId) { +int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { if (!option_.collectMetric) { return 0; } @@ -314,8 +302,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, GroupId groupId = ToGroupId(logicPoolId, copysetId); bool exist = copysetMetricMap_.Exist(groupId); if (exist) { - LOG(ERROR) << "Create Copyset (" - << logicPoolId << "," << copysetId << ")" + LOG(ERROR) << "Create Copyset (" << logicPoolId << "," << copysetId + << ")" << " metric failed : is already exists."; return -1; } @@ -323,8 +311,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, CopysetMetricPtr copysetMetric = std::make_shared(); int ret = copysetMetric->Init(logicPoolId, copysetId); if (ret < 0) { - LOG(ERROR) << "Create Copyset (" - << logicPoolId << "," << copysetId << ")" + LOG(ERROR) << "Create Copyset (" << logicPoolId << "," << copysetId + << ")" << " metric failed : init failed."; return -1; } @@ -333,8 +321,9 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, return 0; } -CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( - const LogicPoolID& logicPoolId, const CopysetID& copysetId) { +CopysetMetricPtr +ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { if (!option_.collectMetric) { return nullptr; } @@ -343,8 +332,8 @@ CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( return copysetMetricMap_.Get(groupId); } -int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId) { +int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); // 这里先保存copyset metric,等remove后再去释放 // 防止在读写锁里面去操作metric,导致死锁 @@ -353,8 +342,8 @@ int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, return 0; } -void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, +void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, + const CopysetID ©setId, CSIOMetricType type) { if (!option_.collectMetric) { return; @@ -367,12 +356,10 @@ void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, ioMetrics_.OnRequest(type); } -void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, - CSIOMetricType type, - size_t size, - int64_t latUs, - bool hasError) { +void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, + const CopysetID ©setId, + CSIOMetricType type, size_t size, + int64_t latUs, bool hasError) { if (!option_.collectMetric) { return; } @@ -384,7 +371,7 @@ void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { if (!option_.collectMetric) { return; } @@ -394,7 +381,7 @@ void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); } -void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { +void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { if (!option_.collectMetric) { return; } @@ -404,7 +391,7 @@ void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } -void ChunkServerMetric::MonitorTrash(Trash* trash) { +void ChunkServerMetric::MonitorTrash(Trash *trash) { if (!option_.collectMetric) { return; } @@ -430,7 +417,7 @@ void ChunkServerMetric::DecreaseLeaderCount() { *leaderCount_ << -1; } -void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { +void ChunkServerMetric::ExposeConfigMetric(common::Configuration *conf) { if (!option_.collectMetric) { return; } @@ -441,4 +428,3 @@ void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { } // namespace chunkserver } // namespace curve - diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index 097700103b..d4354d196f 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -36,11 +36,11 @@ #include "src/common/configuration.h" #include "src/chunkserver/datastore/file_pool.h" -using curve::common::Uncopyable; -using curve::common::RWLock; +using curve::common::Configuration; using curve::common::ReadLockGuard; +using curve::common::RWLock; +using curve::common::Uncopyable; using curve::common::WriteLockGuard; -using curve::common::Configuration; namespace curve { namespace chunkserver { @@ -54,8 +54,7 @@ class Trash; template using PassiveStatusPtr = std::shared_ptr>; -template -using AdderPtr = std::shared_ptr>; +template using AdderPtr = std::shared_ptr>; // 使用LatencyRecorder的实现来统计读写请求的size情况 // 可以统计分位值、最大值、中位数、平均值等情况 @@ -72,7 +71,7 @@ class IOMetric { * @param prefix: 用于bvar曝光时使用的前缀 * @return 成功返回0,失败返回-1 */ - int Init(const std::string& prefix); + int Init(const std::string &prefix); /** * IO请求到来时统计requestNum */ @@ -88,25 +87,25 @@ class IOMetric { public: // io请求的数量 - bvar::Adder reqNum_; + bvar::Adder reqNum_; // 成功io的数量 - bvar::Adder ioNum_; + bvar::Adder ioNum_; // 失败的io个数 - bvar::Adder errorNum_; + bvar::Adder errorNum_; // 所有io的数据量 - bvar::Adder ioBytes_; + bvar::Adder ioBytes_; // io的延时情况(分位值、最大值、中位数、平均值) - bvar::LatencyRecorder latencyRecorder_; + bvar::LatencyRecorder latencyRecorder_; // io大小的情况(分位值、最大值、中位数、平均值) - IOSizeRecorder sizeRecorder_; + IOSizeRecorder sizeRecorder_; // 最近1秒请求的IO数量 - bvar::PerSecond> rps_; + bvar::PerSecond> rps_; // 最近1秒的iops - bvar::PerSecond> iops_; + bvar::PerSecond> iops_; // 最近1秒的出错IO数量 - bvar::PerSecond> eps_; + bvar::PerSecond> eps_; // 最近1秒的数据量 - bvar::PerSecond> bps_; + bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -121,11 +120,8 @@ enum class CSIOMetricType { class CSIOMetric { public: CSIOMetric() - : readMetric_(nullptr) - , writeMetric_(nullptr) - , recoverMetric_(nullptr) - , pasteMetric_(nullptr) - , downloadMetric_(nullptr) {} + : readMetric_(nullptr), writeMetric_(nullptr), recoverMetric_(nullptr), + pasteMetric_(nullptr), downloadMetric_(nullptr) {} ~CSIOMetric() {} @@ -143,9 +139,7 @@ class CSIOMetric { * @param latUS: 此次io的延时 * @param hasError: 此次io是否有错误产生 */ - void OnResponse(CSIOMetricType type, - size_t size, - int64_t latUs, + void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** @@ -159,7 +153,7 @@ class CSIOMetric { * 初始化各项op的metric统计项 * @return 成功返回0,失败返回-1 */ - int Init(const std::string& prefix); + int Init(const std::string &prefix); /** * 释放各项op的metric资源 */ @@ -181,12 +175,9 @@ class CSIOMetric { class CSCopysetMetric { public: CSCopysetMetric() - : logicPoolId_(0) - , copysetId_(0) - , chunkCount_(nullptr) - , snapshotCount_(nullptr) - , cloneChunkCount_(nullptr) - , walSegmentCount_(nullptr) {} + : logicPoolId_(0), copysetId_(0), chunkCount_(nullptr), + walSegmentCount_(nullptr), snapshotCount_(nullptr), + cloneChunkCount_(nullptr) {} ~CSCopysetMetric() {} @@ -196,27 +187,25 @@ class CSCopysetMetric { * @param copysetId: copyset的id * @return 成功返回0,失败返回-1 */ - int Init(const LogicPoolID& logicPoolId, const CopysetID& copysetId); + int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** * 监控DataStore指标,主要包括chunk的数量、快照的数量等 * @param datastore: 该copyset下的datastore指针 */ - void MonitorDataStore(CSDataStore* datastore); + void MonitorDataStore(CSDataStore *datastore); /** * @brief: Monitor log storage's metric, like the number of WAL segment file * @param logStorage: The pointer to CurveSegmentLogStorage */ - void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage* logStorage); + void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); /** * 执行请求前记录metric * @param type: 请求对应的metric类型 */ - void OnRequest(CSIOMetricType type) { - ioMetrics_.OnRequest(type); - } + void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** * 执行请求后记录metric @@ -226,9 +215,7 @@ class CSCopysetMetric { * @param latUS: 此次io的延时 * @param hasError: 此次io是否有错误产生 */ - void OnResponse(CSIOMetricType type, - size_t size, - int64_t latUs, + void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { ioMetrics_.OnResponse(type, size, latUs, hasError); } @@ -272,10 +259,8 @@ class CSCopysetMetric { private: inline std::string Prefix() { - return "copyset_" - + std::to_string(logicPoolId_) - + "_" - + std::to_string(copysetId_); + return "copyset_" + std::to_string(logicPoolId_) + "_" + + std::to_string(copysetId_); } private: @@ -375,7 +360,7 @@ class ChunkServerMetric : public Uncopyable { * @pa)ram option: 初始化配置项 * @return 成功返回0,失败返回-1 */ - int Init(const ChunkServerMetricOptions& option); + int Init(const ChunkServerMetricOptions &option); /** * 释放metric资源 @@ -389,8 +374,7 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: 此次io操作所在的copysetid * @param type: 请求类型 */ - void OnRequest(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, + void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, CSIOMetricType type); /** @@ -403,11 +387,8 @@ class ChunkServerMetric : public Uncopyable { * @param latUS: 此次io的延时 * @param hasError: 此次io是否有错误产生 */ - void OnResponse(const LogicPoolID& logicPoolId, - const CopysetID& copysetId, - CSIOMetricType type, - size_t size, - int64_t latUs, + void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, + CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** @@ -417,8 +398,8 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: copyset的id * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 */ - int CreateCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId); + int CreateCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId); /** * 获取指定copyset的metric @@ -426,8 +407,8 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: copyset的id * @return 成功返回指定的copyset metric,失败返回nullptr */ - CopysetMetricPtr GetCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId); + CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId); /** * 删除指定copyset的metric @@ -435,26 +416,26 @@ class ChunkServerMetric : public Uncopyable { * @param copysetId: copyset的id * @return 成功返回0,失败返回-1 */ - int RemoveCopysetMetric(const LogicPoolID& logicPoolId, - const CopysetID& copysetId); + int RemoveCopysetMetric(const LogicPoolID &logicPoolId, + const CopysetID ©setId); /** * 监视chunk分配池,主要监视池中chunk的数量 * @param chunkFilePool: chunkfilePool的对象指针 */ - void MonitorChunkFilePool(FilePool* chunkFilePool); + void MonitorChunkFilePool(FilePool *chunkFilePool); /** * 监视wal segment分配池,主要监视池中segment的数量 * @param walFilePool: walfilePool的对象指针 */ - void MonitorWalFilePool(FilePool* walFilePool); + void MonitorWalFilePool(FilePool *walFilePool); /** * 监视回收站 * @param trash: trash的对象指针 */ - void MonitorTrash(Trash* trash); + void MonitorTrash(Trash *trash); /** * 增加 leader count 计数 @@ -470,7 +451,7 @@ class ChunkServerMetric : public Uncopyable { * 更新配置项数据 * @param conf: 配置内容 */ - void ExposeConfigMetric(common::Configuration* conf); + void ExposeConfigMetric(common::Configuration *conf); /** * 获取指定类型的IOMetric @@ -481,13 +462,9 @@ class ChunkServerMetric : public Uncopyable { return ioMetrics_.GetIOMetric(type); } - CopysetMetricMap* GetCopysetMetricMap() { - return ©setMetricMap_; - } + CopysetMetricMap *GetCopysetMetricMap() { return ©setMetricMap_; } - uint32_t GetCopysetCount() { - return copysetMetricMap_.Size(); - } + uint32_t GetCopysetCount() { return copysetMetricMap_.Size(); } uint32_t GetLeaderCount() const { if (leaderCount_ == nullptr) @@ -570,7 +547,7 @@ class ChunkServerMetric : public Uncopyable { // chunkserver上的IO类型的metric统计 CSIOMetric ioMetrics_; // 用于单例模式的自指指针 - static ChunkServerMetric* self_; + static ChunkServerMetric *self_; }; } // namespace chunkserver diff --git a/src/chunkserver/cli.cpp b/src/chunkserver/cli.cpp index 6e7056c4e8..71c7baea02 100644 --- a/src/chunkserver/cli.cpp +++ b/src/chunkserver/cli.cpp @@ -206,6 +206,8 @@ butil::Status Snapshot(const LogicPoolID &logicPoolId, const CopysetID ©setId, const PeerId &peer, const braft::cli::CliOptions &options) { + (void)logicPoolId; + (void)copysetId; brpc::Channel channel; if (channel.Init(peer.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", diff --git a/src/chunkserver/clone_copyer.cpp b/src/chunkserver/clone_copyer.cpp index 858a257939..bc449fd2c8 100644 --- a/src/chunkserver/clone_copyer.cpp +++ b/src/chunkserver/clone_copyer.cpp @@ -62,7 +62,7 @@ void OriginCopyer::DeleteExpiredCurveCache(void* arg) { while (taskCopyer->curveOpenTime_.size() > 0) { CurveOpenTimestamp oldestCache = *taskCopyer->curveOpenTime_.begin(); if (now.tv_sec - oldestCache.lastUsedSec < - taskCopyer->curveFileTimeoutSec_) { + static_cast(taskCopyer->curveFileTimeoutSec_)) { break; } @@ -186,6 +186,7 @@ void OriginCopyer::DownloadFromS3(const string& objectName, GetObjectAsyncCallBack cb = [=] (const S3Adapter* adapter, const std::shared_ptr& context) { + (void)adapter; brpc::ClosureGuard doneGuard(done); if (context->retCode != 0) { done->SetFailed(); diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index 0058f449a6..a9bedbae7b 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -47,9 +47,9 @@ DownloadClosure::DownloadClosure(std::shared_ptr readRequest, Closure* done) : isFailed_(false) , beginTime_(TimeUtility::GetTimeofDayUs()) - , readRequest_(readRequest) - , cloneCore_(cloneCore) , downloadCtx_(downloadCtx) + , cloneCore_(cloneCore) + , readRequest_(readRequest) , done_(done) { // 记录初始metric if (readRequest_ != nullptr) { @@ -354,7 +354,6 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, const butil::IOBuf* cloneData, char* chunkData) { const ChunkRequest* request = readRequest->request_; - ChunkID id = readRequest->ChunkId(); std::shared_ptr dataStore = readRequest->datastore_; off_t offset = request->offset(); diff --git a/src/chunkserver/concurrent_apply/concurrent_apply.h b/src/chunkserver/concurrent_apply/concurrent_apply.h index c11e3f76a4..af167c3e9a 100644 --- a/src/chunkserver/concurrent_apply/concurrent_apply.h +++ b/src/chunkserver/concurrent_apply/concurrent_apply.h @@ -61,8 +61,8 @@ class CURVE_CACHELINE_ALIGNMENT ConcurrentApplyModule { public: ConcurrentApplyModule(): start_(false), rconcurrentsize_(0), - wconcurrentsize_(0), rqueuedepth_(0), + wconcurrentsize_(0), wqueuedepth_(0), cond_(0) {} diff --git a/src/chunkserver/conf_epoch_file.cpp b/src/chunkserver/conf_epoch_file.cpp index 237668378f..6a39c6ce3e 100644 --- a/src/chunkserver/conf_epoch_file.cpp +++ b/src/chunkserver/conf_epoch_file.cpp @@ -34,10 +34,8 @@ namespace chunkserver { const uint32_t kConfEpochFileMaxSize = 4096; const uint64_t kConfEpochFileMagic = 0x6225929368674119; -int ConfEpochFile::Load(const std::string &path, - LogicPoolID *logicPoolID, - CopysetID *copysetID, - uint64_t *epoch) { +int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, + CopysetID *copysetID, uint64_t *epoch) { int fd = fs_->Open(path.c_str(), O_RDWR); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -86,16 +84,13 @@ int ConfEpochFile::Load(const std::string &path, LOG(INFO) << "Load conf epoch " << path << " success. " << "logicPoolID: " << *logicPoolID - << ", copysetID: " << *copysetID - << ", epoch: " << *epoch; + << ", copysetID: " << *copysetID << ", epoch: " << *epoch; return 0; } -int ConfEpochFile::Save(const std::string &path, - const LogicPoolID logicPoolID, - const CopysetID copysetID, - const uint64_t epoch) { +int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, + const CopysetID copysetID, const uint64_t epoch) { // 1. 转换成conf message ConfEpoch confEpoch; confEpoch.set_logicpoolid(logicPoolID); @@ -113,7 +108,8 @@ int ConfEpochFile::Save(const std::string &path, opt.enum_option = json2pb::OUTPUT_ENUM_BY_NUMBER; if (!json2pb::ProtoMessageToJson(confEpoch, &out, opt, &err)) { - LOG(ERROR) << "Failed to encode conf epoch," << " error: " << err; + LOG(ERROR) << "Failed to encode conf epoch," + << " error: " << err; return -1; } @@ -127,7 +123,8 @@ int ConfEpochFile::Save(const std::string &path, } // 3. write文件 - if (out.size() != fs_->Write(fd, out.c_str(), 0, out.size())) { + if (static_cast(out.size()) != + fs_->Write(fd, out.c_str(), 0, out.size())) { LOG(ERROR) << "SaveConfEpoch write failed, path: " << path << ", errno: " << errno << ", error message: " << strerror(errno); @@ -150,22 +147,18 @@ int ConfEpochFile::Save(const std::string &path, uint32_t ConfEpochFile::ConfEpochCrc(const ConfEpoch &confEpoch) { uint32_t crc32c = 0; - uint32_t logicPoolId = confEpoch.logicpoolid(); - uint32_t copysetId = confEpoch.copysetid(); - uint64_t epoch = confEpoch.epoch(); - uint64_t magic = kConfEpochFileMagic; - - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(&logicPoolId), - sizeof(logicPoolId)); - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(©setId), + uint32_t logicPoolId = confEpoch.logicpoolid(); + uint32_t copysetId = confEpoch.copysetid(); + uint64_t epoch = confEpoch.epoch(); + uint64_t magic = kConfEpochFileMagic; + + crc32c = curve::common::CRC32( + crc32c, reinterpret_cast(&logicPoolId), sizeof(logicPoolId)); + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(©setId), sizeof(copysetId)); - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(&epoch), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&epoch), sizeof(epoch)); - crc32c = curve::common::CRC32(crc32c, - reinterpret_cast(&magic), + crc32c = curve::common::CRC32(crc32c, reinterpret_cast(&magic), sizeof(magic)); return crc32c; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index 1945ed9ba4..b9a6d44192 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -75,10 +75,10 @@ CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, chunkDataRpath_(), appliedIndex_(0), leaderTerm_(-1), + configChange_(std::make_shared()), + lastSnapshotIndex_(0), scaning_(false), lastScanSec_(0), - lastSnapshotIndex_(0), - configChange_(std::make_shared()), enableOdsyncWhenOpenChunkFile_(false), isSyncing_(false), checkSyncingIntervalMs_(500) { @@ -504,6 +504,7 @@ void CopysetNode::on_leader_start(int64_t term) { } void CopysetNode::on_leader_stop(const butil::Status &status) { + (void)status; leaderTerm_.store(-1, std::memory_order_release); ChunkServerMetric::GetInstance()->DecreaseLeaderCount(); LOG(INFO) << "Copyset: " << GroupIdString() diff --git a/src/chunkserver/copyset_service.cpp b/src/chunkserver/copyset_service.cpp index e29f8c8ab8..e09516c0ad 100755 --- a/src/chunkserver/copyset_service.cpp +++ b/src/chunkserver/copyset_service.cpp @@ -84,6 +84,7 @@ void CopysetServiceImpl::CreateCopysetNode2(RpcController *controller, const CopysetRequest2 *request, CopysetResponse2 *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); Copyset copyset; @@ -138,6 +139,7 @@ void CopysetServiceImpl::DeleteBrokenCopyset(RpcController* controller, const CopysetRequest* request, CopysetResponse* response, Closure* done) { + (void)controller; LOG(INFO) << "Receive delete broken copyset request"; brpc::ClosureGuard doneGuard(done); @@ -163,6 +165,7 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, const CopysetStatusRequest *request, CopysetStatusResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); LOG(INFO) << "Received GetCopysetStatus request: " diff --git a/src/chunkserver/datastore/chunkserver_chunkfile.cpp b/src/chunkserver/datastore/chunkserver_chunkfile.cpp index 78d0ae2119..b4d21cdb7f 100644 --- a/src/chunkserver/datastore/chunkserver_chunkfile.cpp +++ b/src/chunkserver/datastore/chunkserver_chunkfile.cpp @@ -300,6 +300,7 @@ CSErrorCode CSChunkFile::Write(SequenceNum sn, off_t offset, size_t length, uint32_t* cost) { + (void)cost; WriteLockGuard writeGuard(rwLock_); if (!CheckOffsetAndLength( offset, length, isCloneChunk_ ? pageSize_ : FLAGS_minIoAlignment)) { diff --git a/src/chunkserver/datastore/chunkserver_datastore.cpp b/src/chunkserver/datastore/chunkserver_datastore.cpp index 3eed89d747..6d624e6a87 100644 --- a/src/chunkserver/datastore/chunkserver_datastore.cpp +++ b/src/chunkserver/datastore/chunkserver_datastore.cpp @@ -39,8 +39,8 @@ CSDataStore::CSDataStore(std::shared_ptr lfs, const DataStoreOptions& options) : chunkSize_(options.chunkSize), pageSize_(options.pageSize), - baseDir_(options.baseDir), locationLimit_(options.locationLimit), + baseDir_(options.baseDir), chunkFilePool_(chunkFilePool), lfs_(lfs), enableOdsyncWhenOpenChunkFile_(options.enableOdsyncWhenOpenChunkFile) { @@ -147,6 +147,7 @@ CSErrorCode CSDataStore::ReadChunk(ChunkID id, char * buf, off_t offset, size_t length) { + (void)sn; auto chunkFile = metaCache_.Get(id); if (chunkFile == nullptr) { return CSErrorCode::ChunkNotExistError; @@ -163,6 +164,7 @@ CSErrorCode CSDataStore::ReadChunk(ChunkID id, CSErrorCode CSDataStore::ReadChunkMetaPage(ChunkID id, SequenceNum sn, char * buf) { + (void)sn; auto chunkFile = metaCache_.Get(id); if (chunkFile == nullptr) { return CSErrorCode::ChunkNotExistError; diff --git a/src/chunkserver/datastore/file_pool.cpp b/src/chunkserver/datastore/file_pool.cpp index be56d87125..d5d5e78d8e 100644 --- a/src/chunkserver/datastore/file_pool.cpp +++ b/src/chunkserver/datastore/file_pool.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -44,10 +45,10 @@ using curve::common::kFilePoolMaigic; namespace curve { namespace chunkserver { -const char* FilePoolHelper::kFileSize = "chunkSize"; -const char* FilePoolHelper::kMetaPageSize = "metaPageSize"; -const char* FilePoolHelper::kFilePoolPath = "chunkfilepool_path"; -const char* FilePoolHelper::kCRC = "crc"; +const char *FilePoolHelper::kFileSize = "chunkSize"; +const char *FilePoolHelper::kMetaPageSize = "metaPageSize"; +const char *FilePoolHelper::kFilePoolPath = "chunkfilepool_path"; +const char *FilePoolHelper::kCRC = "crc"; const uint32_t FilePoolHelper::kPersistSize = 4096; const std::string FilePool::kCleanChunkSuffix_ = ".clean"; // NOLINT const std::chrono::milliseconds FilePool::kSuccessSleepMsec_(10); @@ -55,8 +56,8 @@ const std::chrono::milliseconds FilePool::kFailSleepMsec_(500); int FilePoolHelper::PersistEnCodeMetaInfo( std::shared_ptr fsptr, uint32_t chunkSize, - uint32_t metaPageSize, const std::string& filePoolPath, - const std::string& persistPath) { + uint32_t metaPageSize, const std::string &filePoolPath, + const std::string &persistPath) { Json::Value root; root[kFileSize] = chunkSize; root[kMetaPageSize] = metaPageSize; @@ -64,7 +65,7 @@ int FilePoolHelper::PersistEnCodeMetaInfo( uint32_t crcsize = sizeof(kFilePoolMaigic) + sizeof(chunkSize) + sizeof(metaPageSize) + filePoolPath.size(); - char* crcbuf = new char[crcsize]; + char *crcbuf = new char[crcsize]; ::memcpy(crcbuf, kFilePoolMaigic, sizeof(kFilePoolMaigic)); ::memcpy(crcbuf + sizeof(kFilePoolMaigic), &chunkSize, sizeof(uint32_t)); @@ -85,7 +86,7 @@ int FilePoolHelper::PersistEnCodeMetaInfo( LOG(INFO) << root.toStyledString().c_str(); - char* writeBuffer = new char[kPersistSize]; + char *writeBuffer = new char[kPersistSize]; memset(writeBuffer, 0, kPersistSize); memcpy(writeBuffer, root.toStyledString().c_str(), root.toStyledString().size()); @@ -105,9 +106,9 @@ int FilePoolHelper::PersistEnCodeMetaInfo( } int FilePoolHelper::DecodeMetaInfoFromMetaFile( - std::shared_ptr fsptr, const std::string& metaFilePath, - uint32_t metaFileSize, uint32_t* chunksize, uint32_t* metapagesize, - std::string* chunkfilePath) { + std::shared_ptr fsptr, const std::string &metaFilePath, + uint32_t metaFileSize, uint32_t *chunksize, uint32_t *metapagesize, + std::string *chunkfilePath) { int fd = fsptr->Open(metaFilePath, O_RDWR); if (fd < 0) { LOG(ERROR) << "meta file open failed, " << metaFilePath; @@ -117,7 +118,7 @@ int FilePoolHelper::DecodeMetaInfoFromMetaFile( std::unique_ptr readvalid(new char[metaFileSize]); memset(readvalid.get(), 0, metaFileSize); int ret = fsptr->Read(fd, readvalid.get(), 0, metaFileSize); - if (ret != metaFileSize) { + if (ret != static_cast(metaFileSize)) { fsptr->Close(fd); LOG(ERROR) << "meta file read failed, " << metaFilePath; return -1; @@ -128,10 +129,14 @@ int FilePoolHelper::DecodeMetaInfoFromMetaFile( uint32_t crcvalue = 0; bool parse = false; do { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(readvalid.get(), value)) { - LOG(ERROR) << "chunkfile meta file got error!"; + char *str = readvalid.get(); + JSONCPP_STRING errormsg; + if (!reader->parse(str, str + strlen(str), &value, &errormsg)) { + LOG(ERROR) << "chunkfile meta file got error!" + << " error: " << errormsg; break; } @@ -210,7 +215,7 @@ FilePool::FilePool(std::shared_ptr fsptr) memset(writeBuffer_.get(), 0, poolOpt_.bytesPerWrite); } -bool FilePool::Initialize(const FilePoolOptions& cfopt) { +bool FilePool::Initialize(const FilePoolOptions &cfopt) { poolOpt_ = cfopt; if (poolOpt_.getFileFromPool) { if (!CheckValid()) { @@ -261,7 +266,7 @@ bool FilePool::CleanChunk(uint64_t chunkid, bool onlyMarked) { } int fd = ret; - auto defer = [&](...){ fsptr_->Close(fd); }; + auto defer = [&](...) { fsptr_->Close(fd); }; std::shared_ptr _(nullptr, defer); uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; @@ -276,10 +281,11 @@ bool FilePool::CleanChunk(uint64_t chunkid, bool onlyMarked) { uint64_t nwrite = 0; uint64_t ntotal = chunklen; uint32_t bytesPerWrite = poolOpt_.bytesPerWrite; - char* buffer = writeBuffer_.get(); + char *buffer = writeBuffer_.get(); while (nwrite < ntotal) { - nbytes = fsptr_->Write(fd, buffer, nwrite, + nbytes = fsptr_->Write( + fd, buffer, nwrite, std::min(ntotal - nwrite, (uint64_t)bytesPerWrite)); if (nbytes < 0) { LOG(ERROR) << "Write file failed: " << chunkpath; @@ -305,8 +311,8 @@ bool FilePool::CleanChunk(uint64_t chunkid, bool onlyMarked) { } bool FilePool::CleaningChunk() { - auto popBack = [this](std::vector* chunks, - uint64_t* chunksLeft) -> uint64_t { + auto popBack = [this](std::vector *chunks, + uint64_t *chunksLeft) -> uint64_t { std::unique_lock lk(mtx_); if (chunks->empty()) { return 0; @@ -319,8 +325,8 @@ bool FilePool::CleaningChunk() { return chunkid; }; - auto pushBack = [this](std::vector* chunks, - uint64_t chunkid, uint64_t* chunksLeft) { + auto pushBack = [this](std::vector *chunks, uint64_t chunkid, + uint64_t *chunksLeft) { std::unique_lock lk(mtx_); chunks->push_back(chunkid); (*chunksLeft)++; @@ -374,9 +380,9 @@ bool FilePool::StopCleaning() { return true; } -bool FilePool::GetChunk(bool needClean, uint64_t* chunkid, bool* isCleaned) { - auto pop = [&](std::vector* chunks, - uint64_t* chunksLeft, bool isCleanChunks) -> bool { +bool FilePool::GetChunk(bool needClean, uint64_t *chunkid, bool *isCleaned) { + auto pop = [&](std::vector *chunks, uint64_t *chunksLeft, + bool isCleanChunks) -> bool { std::unique_lock lk(mtx_); if (chunks->empty()) { return false; @@ -391,14 +397,14 @@ bool FilePool::GetChunk(bool needClean, uint64_t* chunkid, bool* isCleaned) { }; if (!needClean) { - return pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false) - || pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true); + return pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false) || + pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true); } // Need clean chunk *isCleaned = false; - bool ret = pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true) - || pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false); + bool ret = pop(&cleanChunks_, ¤tState_.cleanChunksLeft, true) || + pop(&dirtyChunks_, ¤tState_.dirtyChunksLeft, false); if (true == ret && false == *isCleaned && CleanChunk(*chunkid, true)) { *isCleaned = true; @@ -407,8 +413,7 @@ bool FilePool::GetChunk(bool needClean, uint64_t* chunkid, bool* isCleaned) { return *isCleaned; } -int FilePool::GetFile(const std::string& targetpath, - const char* metapage, +int FilePool::GetFile(const std::string &targetpath, const char *metapage, bool needClean) { int ret = -1; int retry = 0; @@ -470,7 +475,7 @@ int FilePool::GetFile(const std::string& targetpath, return ret; } -int FilePool::AllocateChunk(const std::string& chunkpath) { +int FilePool::AllocateChunk(const std::string &chunkpath) { uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; int ret = fsptr_->Open(chunkpath.c_str(), O_RDWR | O_CREAT); @@ -487,7 +492,7 @@ int FilePool::AllocateChunk(const std::string& chunkpath) { return -1; } - char* data = new (std::nothrow) char[chunklen]; + char *data = new (std::nothrow) char[chunklen]; memset(data, 0, chunklen); ret = fsptr_->Write(fd, data, 0, chunklen); @@ -513,7 +518,7 @@ int FilePool::AllocateChunk(const std::string& chunkpath) { return ret; } -bool FilePool::WriteMetaPage(const std::string& sourcepath, const char* page) { +bool FilePool::WriteMetaPage(const std::string &sourcepath, const char *page) { int fd = -1; int ret = -1; @@ -526,7 +531,7 @@ bool FilePool::WriteMetaPage(const std::string& sourcepath, const char* page) { fd = ret; ret = fsptr_->Write(fd, page, 0, poolOpt_.metaPageSize); - if (ret != poolOpt_.metaPageSize) { + if (ret != static_cast(poolOpt_.metaPageSize)) { fsptr_->Close(fd); LOG(ERROR) << "write metapage failed, " << sourcepath.c_str(); return false; @@ -547,7 +552,7 @@ bool FilePool::WriteMetaPage(const std::string& sourcepath, const char* page) { return true; } -int FilePool::RecycleFile(const std::string& chunkpath) { +int FilePool::RecycleFile(const std::string &chunkpath) { if (!poolOpt_.getFileFromPool) { int ret = fsptr_->Delete(chunkpath.c_str()); if (ret < 0) { @@ -574,7 +579,7 @@ int FilePool::RecycleFile(const std::string& chunkpath) { return fsptr_->Delete(chunkpath.c_str()); } - if (info.st_size != chunklen) { + if (info.st_size != static_cast(chunklen)) { LOG(ERROR) << "file size illegal, " << chunkpath.c_str() << ", delete file dirctly" << ", standard size = " << chunklen @@ -634,7 +639,7 @@ bool FilePool::ScanInternal() { size_t suffixLen = kCleanChunkSuffix_.size(); uint64_t chunklen = poolOpt_.fileSize + poolOpt_.metaPageSize; - for (auto& iter : tmpvec) { + for (auto &iter : tmpvec) { bool isCleaned = false; std::string chunkNum = iter; if (::curve::common::StringEndsWith(iter, kCleanChunkSuffix_)) { @@ -642,10 +647,9 @@ bool FilePool::ScanInternal() { chunkNum = iter.substr(0, iter.size() - suffixLen); } - auto it = std::find_if(chunkNum.begin(), chunkNum.end(), - [](unsigned char c) { - return !std::isdigit(c); - }); + auto it = + std::find_if(chunkNum.begin(), chunkNum.end(), + [](unsigned char c) { return !std::isdigit(c); }); if (it != chunkNum.end()) { LOG(ERROR) << "file name illegal! [" << iter << "]"; return false; @@ -664,7 +668,7 @@ bool FilePool::ScanInternal() { struct stat info; int ret = fsptr_->Fstat(fd, &info); - if (ret != 0 || info.st_size != chunklen) { + if (ret != 0 || info.st_size != static_cast(chunklen)) { LOG(ERROR) << "file size illegal, " << filepath.c_str() << ", standard size = " << chunklen << ", current size = " << info.st_size; @@ -690,8 +694,8 @@ bool FilePool::ScanInternal() { currentmaxfilenum_.store(maxnum + 1); currentState_.dirtyChunksLeft = dirtyChunks_.size(); currentState_.cleanChunksLeft = cleanChunks_.size(); - currentState_.preallocatedChunksLeft = currentState_.dirtyChunksLeft - + currentState_.cleanChunksLeft; + currentState_.preallocatedChunksLeft = + currentState_.dirtyChunksLeft + currentState_.cleanChunksLeft; LOG(INFO) << "scan done, pool size = " << currentState_.preallocatedChunksLeft; @@ -703,9 +707,7 @@ size_t FilePool::Size() { return currentState_.preallocatedChunksLeft; } -FilePoolState_t FilePool::GetState() { - return currentState_; -} +FilePoolState_t FilePool::GetState() { return currentState_; } } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/heartbeat_helper.cpp b/src/chunkserver/heartbeat_helper.cpp index f960578ca5..02a2fc65c9 100644 --- a/src/chunkserver/heartbeat_helper.cpp +++ b/src/chunkserver/heartbeat_helper.cpp @@ -89,6 +89,7 @@ bool HeartbeatHelper::CopySetConfValid( bool HeartbeatHelper::NeedPurge(const butil::EndPoint &csEp, const CopySetConf &conf, const CopysetNodePtr ©set) { + (void)copyset; // CLDCFS-1004 bug-fix: mds下发epoch为0, 配置为空的copyset if (0 == conf.epoch() && conf.peers().empty()) { LOG(INFO) << "Clean copyset " diff --git a/src/chunkserver/op_request.cpp b/src/chunkserver/op_request.cpp index 90734d0937..5409092020 100755 --- a/src/chunkserver/op_request.cpp +++ b/src/chunkserver/op_request.cpp @@ -208,6 +208,7 @@ void DeleteChunkRequest::OnApply(uint64_t index, void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request auto ret = datastore->DeleteChunk(request.chunkid(), request.sn()); @@ -363,6 +364,9 @@ void ReadChunkRequest::OnApply(uint64_t index, void ReadChunkRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { + (void)datastore; + (void)request; + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request // read什么都不用做 } @@ -571,6 +575,9 @@ void ReadSnapshotRequest::OnApply(uint64_t index, void ReadSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { + (void)datastore; + (void)request; + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request // read什么都不用做 } @@ -607,6 +614,7 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request auto ret = datastore->DeleteSnapshotChunkOrCorrectSn( request.chunkid(), request.correctedsn()); @@ -669,6 +677,7 @@ void CreateCloneChunkRequest::OnApply(uint64_t index, void CreateCloneChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; // NOTE: 处理过程中优先使用参数传入的datastore/request auto ret = datastore->CreateCloneChunk(request.chunkid(), request.sn(), @@ -806,6 +815,7 @@ void ScanChunkRequest::OnApply(uint64_t index, void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT const ChunkRequest &request, const butil::IOBuf &data) { + (void)data; uint32_t crc = 0; size_t size = request.size(); std::unique_ptr readBuffer(new(std::nothrow)char[size]); diff --git a/src/chunkserver/raftlog/curve_segment.cpp b/src/chunkserver/raftlog/curve_segment.cpp index 59a7c65e85..0e144971b8 100644 --- a/src/chunkserver/raftlog/curve_segment.cpp +++ b/src/chunkserver/raftlog/curve_segment.cpp @@ -80,7 +80,7 @@ int CurveSegment::create() { return -1; } res = ::lseek(_fd, _meta_page_size, SEEK_SET); - if (res != _meta_page_size) { + if (res != static_cast(_meta_page_size)) { LOG(ERROR) << "lseek fail! error: " << strerror(errno); return -1; } @@ -231,7 +231,7 @@ int CurveSegment::load(braft::ConfigurationManager* configuration_manager) { int CurveSegment::_load_meta() { char* metaPage = new char[_meta_page_size]; int res = ::pread(_fd, metaPage, _meta_page_size, 0); - if (res != _meta_page_size) { + if (res != static_cast(_meta_page_size)) { delete metaPage; return -1; } @@ -437,7 +437,7 @@ int CurveSegment::append(const braft::LogEntry* entry) { data.copy_to(write_buf + kEntryHeaderSize, real_length); int ret = ::pwrite(_direct_fd, write_buf, to_write, _meta.bytes); free(write_buf); - if (ret != to_write) { + if (ret != static_cast(to_write)) { LOG(ERROR) << "Fail to write directly to fd=" << _direct_fd << ", buf=" << write_buf << ", size=" << to_write << ", offset=" << _meta.bytes << ", error=" << berror(); @@ -486,7 +486,7 @@ int CurveSegment::_update_meta_page() { ret = ::pwrite(_fd, metaPage, _meta_page_size, 0); } free(metaPage); - if (ret != _meta_page_size) { + if (ret != static_cast(_meta_page_size)) { LOG(ERROR) << "Fail to write meta page into fd=" << (FLAGS_enableWalDirectWrite ? _direct_fd : _fd) << ", path: " << _path << berror(); @@ -642,7 +642,6 @@ int CurveSegment::sync(bool will_sync) { } int CurveSegment::unlink() { - int ret = 0; std::string path(_path); if (_is_open) { butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, diff --git a/src/chunkserver/raftlog/curve_segment_log_storage.cpp b/src/chunkserver/raftlog/curve_segment_log_storage.cpp index c9552e5c3f..6182577e6e 100644 --- a/src/chunkserver/raftlog/curve_segment_log_storage.cpp +++ b/src/chunkserver/raftlog/curve_segment_log_storage.cpp @@ -424,6 +424,7 @@ int CurveSegmentLogStorage::append_entry(const braft::LogEntry* entry) { int CurveSegmentLogStorage::append_entries( const std::vector& entries, braft::IOMetric* metric) { + (void)metric; if (entries.empty()) { return 0; } diff --git a/src/chunkserver/raftlog/curve_segment_log_storage.h b/src/chunkserver/raftlog/curve_segment_log_storage.h index 0d970577a6..bb7ff46839 100644 --- a/src/chunkserver/raftlog/curve_segment_log_storage.h +++ b/src/chunkserver/raftlog/curve_segment_log_storage.h @@ -38,8 +38,8 @@ // Zhangyi Chen(chenzhangyi01@baidu.com) // Xiong,Kai(xiongkai@baidu.com) -#ifndef SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ -#define SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ +#ifndef SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ +#define SRC_CHUNKSERVER_RAFTLOG_CURVE_SEGMENT_LOG_STORAGE_H_ #include #include @@ -64,25 +64,23 @@ class CurveSegmentLogStorage; struct LogStorageOptions { std::shared_ptr walFilePool; - std::function monitorMetricCb; + std::function monitorMetricCb; LogStorageOptions() = default; - LogStorageOptions(std::shared_ptr walFilePool, - std::function monitorMetricCb) - : walFilePool(walFilePool), monitorMetricCb(monitorMetricCb) { - } + LogStorageOptions( + std::shared_ptr walFilePool, + std::function monitorMetricCb) + : walFilePool(walFilePool), monitorMetricCb(monitorMetricCb) {} }; struct LogStorageStatus { explicit LogStorageStatus(uint32_t walSegmentFileCount) - : walSegmentFileCount(walSegmentFileCount) { - } + : walSegmentFileCount(walSegmentFileCount) {} uint32_t walSegmentFileCount; }; -LogStorageOptions StoreOptForCurveSegmentLogStorage( - LogStorageOptions options); +LogStorageOptions StoreOptForCurveSegmentLogStorage(LogStorageOptions options); void RegisterCurveSegmentLogStorageOrDie(); @@ -96,31 +94,23 @@ void RegisterCurveSegmentLogStorageOrDie(); // log_inprogress_0001001: open segment class CurveSegmentLogStorage : public braft::LogStorage { public: - typedef std::map > SegmentMap; + typedef std::map> SegmentMap; - explicit CurveSegmentLogStorage(const std::string& path, - bool enable_sync = true, + explicit CurveSegmentLogStorage( + const std::string &path, bool enable_sync = true, std::shared_ptr walFilePool = nullptr) - : _path(path) - , _first_log_index(1) - , _last_log_index(0) - , _checksum_type(0) - , _enable_sync(enable_sync) - , _walFilePool(walFilePool) - {} + : _path(path), _first_log_index(1), _last_log_index(0), + _walFilePool(walFilePool), _checksum_type(0), + _enable_sync(enable_sync) {} CurveSegmentLogStorage() - : _first_log_index(1) - , _last_log_index(0) - , _checksum_type(0) - , _enable_sync(true) - , _walFilePool(nullptr) - {} + : _first_log_index(1), _last_log_index(0), _walFilePool(nullptr), + _checksum_type(0), _enable_sync(true) {} virtual ~CurveSegmentLogStorage() {} // init logstorage, check consistency and integrity - virtual int init(braft::ConfigurationManager* configuration_manager); + virtual int init(braft::ConfigurationManager *configuration_manager); // first log index in log virtual int64_t first_log_index() { @@ -131,18 +121,17 @@ class CurveSegmentLogStorage : public braft::LogStorage { virtual int64_t last_log_index(); // get logentry by index - virtual braft::LogEntry* get_entry(const int64_t index); + virtual braft::LogEntry *get_entry(const int64_t index); // get logentry's term by index virtual int64_t get_term(const int64_t index); // append entry to log - int append_entry(const braft::LogEntry* entry); + int append_entry(const braft::LogEntry *entry); // append entries to log and update IOMetric, return success append number - virtual int append_entries( - const std::vector& entries, - braft::IOMetric* metric); + virtual int append_entries(const std::vector &entries, + braft::IOMetric *metric); // delete logs from storage's head, [1, first_index_kept) will be discarded virtual int truncate_prefix(const int64_t first_index_kept); @@ -153,13 +142,11 @@ class CurveSegmentLogStorage : public braft::LogStorage { virtual int reset(const int64_t next_log_index); - LogStorage* new_instance(const std::string& uri) const; + LogStorage *new_instance(const std::string &uri) const; - SegmentMap& segments() { - return _segments; - } + SegmentMap &segments() { return _segments; } - void list_files(std::vector* seg_files); + void list_files(std::vector *seg_files); void sync(); @@ -170,15 +157,13 @@ class CurveSegmentLogStorage : public braft::LogStorage { int save_meta(const int64_t log_index); int load_meta(); int list_segments(bool is_empty); - int load_segments(braft::ConfigurationManager* configuration_manager); - int get_segment(int64_t log_index, scoped_refptr* ptr); - void pop_segments( - int64_t first_index_kept, - std::vector >* poped); - void pop_segments_from_back( - const int64_t first_index_kept, - std::vector >* popped, - scoped_refptr* last_segment); + int load_segments(braft::ConfigurationManager *configuration_manager); + int get_segment(int64_t log_index, scoped_refptr *ptr); + void pop_segments(int64_t first_index_kept, + std::vector> *poped); + void pop_segments_from_back(const int64_t first_index_kept, + std::vector> *popped, + scoped_refptr *last_segment); std::string _path; diff --git a/src/chunkserver/raftsnapshot/curve_file_service.h b/src/chunkserver/raftsnapshot/curve_file_service.h index 6f88bb6adb..3c8e503e8f 100644 --- a/src/chunkserver/raftsnapshot/curve_file_service.h +++ b/src/chunkserver/raftsnapshot/curve_file_service.h @@ -70,7 +70,7 @@ class BAIDU_CACHELINE_ALIGNMENT CurveFileService : public braft::FileService { void set_snapshot_attachment(SnapshotAttachment *snapshot_attachment); void clear_snapshot_attachment() { BAIDU_SCOPED_LOCK(_mutex); - auto ret = _snapshot_attachment.release(); + (void)_snapshot_attachment.release(); } private: diff --git a/src/chunkserver/register.cpp b/src/chunkserver/register.cpp index 616ef0ac95..b764878db5 100644 --- a/src/chunkserver/register.cpp +++ b/src/chunkserver/register.cpp @@ -53,8 +53,8 @@ Register::Register(const RegisterOptions &ops) { } int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, - ChunkServerMetadata *metadata, - const std::shared_ptr &epochMap) { + ChunkServerMetadata *metadata, + const std::shared_ptr &epochMap) { ::curve::mds::topology::ChunkServerRegistRequest req; ::curve::mds::topology::ChunkServerRegistResponse resp; req.set_disktype(ops_.chunkserverDiskType); @@ -65,7 +65,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, } req.set_port(ops_.chunkserverPort); uint64_t chunkPoolSize = ops_.chunkFilepool->Size() * - ops_.chunkFilepool->GetFilePoolOpt().fileSize; + ops_.chunkFilepool->GetFilePoolOpt().fileSize; req.set_chunkfilepoolsize(chunkPoolSize); if (ops_.chunkFilepool->GetFilePoolOpt().getFileFromPool) { req.set_usechunkfilepoolaswalpool(ops_.useChunkFilePoolAsWalPool); @@ -107,8 +107,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, break; } else { LOG(ERROR) << ops_.chunkserverInternalIp << ":" - << ops_.chunkserverPort - << " Fail to register to MDS " + << ops_.chunkserverPort << " Fail to register to MDS " << mdsEps_[inServiceIndex_] << ", cntl errorCode: " << cntl.ErrorCode() << "," << " cntl error: " << cntl.ErrorText() << "," @@ -131,8 +130,8 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, } if (resp.epochmap_size() != 0) { - for (auto it = resp.epochmap().begin(); - it != resp.epochmap().end(); it++) { + for (auto it = resp.epochmap().begin(); it != resp.epochmap().end(); + it++) { epochMap->UpdateEpoch(it->first, it->second); } } @@ -173,8 +172,8 @@ int Register::PersistChunkServerMeta(const ChunkServerMetadata &metadata) { return -1; } - if (ops_.fs->Write( - fd, metaStr.c_str(), 0, metaStr.size()) < metaStr.size()) { + if (ops_.fs->Write(fd, metaStr.c_str(), 0, metaStr.size()) < + static_cast(metaStr.size())) { LOG(ERROR) << "Failed to write chunkserver metadata file"; return -1; } diff --git a/src/chunkserver/scan_service.cpp b/src/chunkserver/scan_service.cpp index 89a876643e..bc3d2789ce 100644 --- a/src/chunkserver/scan_service.cpp +++ b/src/chunkserver/scan_service.cpp @@ -29,6 +29,7 @@ void ScanServiceImpl::FollowScanMap(RpcController *controller, const FollowScanMapRequest *request, FollowScanMapResponse *response, Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); scanManager_->DealFollowerScanMap(*request, response); } diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 678ac212a4..2941261240 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -185,7 +185,7 @@ bool Trash::IsCopysetInTrash(const std::string &dirName) { // 目录是十进制形式 // 例如:2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; - int n = dirName.find("."); + auto n = dirName.find("."); if (n == std::string::npos) { return false; } @@ -259,6 +259,7 @@ bool Trash::RecycleChunksAndWALInDir( bool Trash::RecycleChunkfile( const std::string &filepath, const std::string &filename) { + (void)filename; LockGuard lg(mtx_); if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath @@ -272,6 +273,7 @@ bool Trash::RecycleChunkfile( bool Trash::RecycleWAL( const std::string &filepath, const std::string &filename) { + (void)filename; LockGuard lg(mtx_); if (walPool_ != nullptr && 0 != walPool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle WAL " << filepath diff --git a/src/client/chunkserver_broadcaster.h b/src/client/chunkserver_broadcaster.h index d0f4969261..e7813754fa 100644 --- a/src/client/chunkserver_broadcaster.h +++ b/src/client/chunkserver_broadcaster.h @@ -24,6 +24,7 @@ #define SRC_CLIENT_CHUNKSERVER_BROADCASTER_H_ #include +#include #include "include/client/libcurve_define.h" #include "src/client/client_common.h" diff --git a/src/client/chunkserver_client.cpp b/src/client/chunkserver_client.cpp index 77d72a09bd..052c592746 100644 --- a/src/client/chunkserver_client.cpp +++ b/src/client/chunkserver_client.cpp @@ -25,6 +25,7 @@ #include #include #include +#include using curve::chunkserver::ChunkService_Stub; using curve::chunkserver::CHUNK_OP_STATUS; diff --git a/src/client/file_instance.cpp b/src/client/file_instance.cpp index 882287e619..b49bf3ed31 100644 --- a/src/client/file_instance.cpp +++ b/src/client/file_instance.cpp @@ -39,18 +39,14 @@ using curve::common::TimeUtility; using curve::mds::SessionStatus; FileInstance::FileInstance() - : finfo_(), - fileopt_(), - mdsclient_(nullptr), - leaseExecutor_(), - iomanager4file_(), - readonly_(false) {} - -bool FileInstance::Initialize(const std::string& filename, + : finfo_(), fileopt_(), mdsclient_(nullptr), leaseExecutor_(), + iomanager4file_(), readonly_(false) {} + +bool FileInstance::Initialize(const std::string &filename, std::shared_ptr mdsclient, - const UserInfo_t& userinfo, - const OpenFlags& openflags, - const FileServiceOption& fileservicopt, + const UserInfo_t &userinfo, + const OpenFlags &openflags, + const FileServiceOption &fileservicopt, bool readonly) { readonly_ = readonly; fileopt_ = fileservicopt; @@ -105,39 +101,37 @@ void FileInstance::UnInitialize() { mdsclient_.reset(); } -int FileInstance::Read(char* buf, off_t offset, size_t length) { - DLOG_EVERY_SECOND(INFO) << "begin Read "<< finfo_.fullPathName - << ", offset = " << offset - << ", len = " << length; +int FileInstance::Read(char *buf, off_t offset, size_t length) { + DLOG_EVERY_SECOND(INFO) << "begin Read " << finfo_.fullPathName + << ", offset = " << offset << ", len = " << length; return iomanager4file_.Read(buf, offset, length, mdsclient_.get()); } -int FileInstance::Write(const char* buf, off_t offset, size_t len) { +int FileInstance::Write(const char *buf, off_t offset, size_t len) { if (readonly_) { DVLOG(9) << "open with read only, do not support write!"; return -1; } DLOG_EVERY_SECOND(INFO) << "begin write " << finfo_.fullPathName - << ", offset = " << offset - << ", len = " << len; + << ", offset = " << offset << ", len = " << len; return iomanager4file_.Write(buf, offset, len, mdsclient_.get()); } -int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { - DLOG_EVERY_SECOND(INFO) << "begin AioRead " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; +int FileInstance::AioRead(CurveAioContext *aioctx, UserDataType dataType) { + DLOG_EVERY_SECOND(INFO) + << "begin AioRead " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioRead(aioctx, mdsclient_.get(), dataType); } -int FileInstance::AioWrite(CurveAioContext* aioctx, UserDataType dataType) { +int FileInstance::AioWrite(CurveAioContext *aioctx, UserDataType dataType) { if (readonly_) { DVLOG(9) << "open with read only, do not support write!"; return -1; } - DLOG_EVERY_SECOND(INFO) << "begin AioWrite " << finfo_.fullPathName - << ", offset = " << aioctx->offset - << ", len = " << aioctx->length; + DLOG_EVERY_SECOND(INFO) + << "begin AioWrite " << finfo_.fullPathName + << ", offset = " << aioctx->offset << ", len = " << aioctx->length; return iomanager4file_.AioWrite(aioctx, mdsclient_.get(), dataType); } @@ -150,7 +144,7 @@ int FileInstance::Discard(off_t offset, size_t length) { return -1; } -int FileInstance::AioDiscard(CurveAioContext* aioctx) { +int FileInstance::AioDiscard(CurveAioContext *aioctx) { if (!readonly_) { return iomanager4file_.AioDiscard(aioctx, mdsclient_.get()); } @@ -167,15 +161,16 @@ int FileInstance::AioDiscard(CurveAioContext* aioctx) { // 这时候当前还没有成功打开,所以还没有存储该session信息,所以无法通过refresh // 再去打开,所以这时候需要获取mds一侧session lease时长,然后在client这一侧 // 等待一段时间再去Open,如果依然失败,就向上层返回失败。 -int FileInstance::Open(const std::string& filename, - const UserInfo& userinfo, - std::string* sessionId) { - LeaseSession_t lease; +int FileInstance::Open(const std::string &filename, const UserInfo &userinfo, + std::string *sessionId) { + (void)userinfo; + + LeaseSession_t lease; int ret = LIBCURVE_ERROR::FAILED; FileEpoch_t fEpoch; - ret = mdsclient_->OpenFile(filename, finfo_.userinfo, - &finfo_, &fEpoch, &lease); + ret = mdsclient_->OpenFile(filename, finfo_.userinfo, &finfo_, &fEpoch, + &lease); if (ret == LIBCURVE_ERROR::OK) { iomanager4file_.UpdateFileThrottleParams(finfo_.throttleParams); ret = leaseExecutor_->Start(finfo_, lease) ? LIBCURVE_ERROR::OK @@ -188,17 +183,17 @@ int FileInstance::Open(const std::string& filename, return -ret; } -int FileInstance::ReOpen(const std::string& filename, - const std::string& sessionId, - const UserInfo& userInfo, - std::string* newSessionId) { +int FileInstance::ReOpen(const std::string &filename, + const std::string &sessionId, const UserInfo &userInfo, + std::string *newSessionId) { + (void)sessionId; return Open(filename, userInfo, newSessionId); } -int FileInstance::GetFileInfo(const std::string& filename, - FInfo_t* fi, FileEpoch_t *fEpoch) { - LIBCURVE_ERROR ret = mdsclient_->GetFileInfo(filename, finfo_.userinfo, - fi, fEpoch); +int FileInstance::GetFileInfo(const std::string &filename, FInfo_t *fi, + FileEpoch_t *fEpoch) { + LIBCURVE_ERROR ret = + mdsclient_->GetFileInfo(filename, finfo_.userinfo, fi, fEpoch); return -ret; } @@ -215,14 +210,14 @@ int FileInstance::Close() { return -ret; } -FileInstance* FileInstance::NewInitedFileInstance( - const FileServiceOption& fileServiceOption, - std::shared_ptr mdsClient, - const std::string& filename, - const UserInfo& userInfo, - const OpenFlags& openflags, // TODO(all): maybe we can put userinfo and readonly into openflags // NOLINT +FileInstance *FileInstance::NewInitedFileInstance( + const FileServiceOption &fileServiceOption, + std::shared_ptr mdsClient, const std::string &filename, + const UserInfo &userInfo, + const OpenFlags &openflags, // TODO(all): maybe we can put userinfo and + // readonly into openflags // NOLINT bool readonly) { - FileInstance* instance = new (std::nothrow) FileInstance(); + FileInstance *instance = new (std::nothrow) FileInstance(); if (instance == nullptr) { LOG(ERROR) << "Create FileInstance failed, filename: " << filename; return nullptr; @@ -242,12 +237,12 @@ FileInstance* FileInstance::NewInitedFileInstance( return instance; } -FileInstance* FileInstance::Open4Readonly(const FileServiceOption& opt, +FileInstance *FileInstance::Open4Readonly(const FileServiceOption &opt, std::shared_ptr mdsclient, - const std::string& filename, - const UserInfo& userInfo, - const OpenFlags& openflags) { - FileInstance* instance = FileInstance::NewInitedFileInstance( + const std::string &filename, + const UserInfo &userInfo, + const OpenFlags &openflags) { + FileInstance *instance = FileInstance::NewInitedFileInstance( opt, std::move(mdsclient), filename, userInfo, openflags, true); if (instance == nullptr) { LOG(ERROR) << "NewInitedFileInstance failed, filename = " << filename; @@ -280,5 +275,5 @@ void FileInstance::StopLease() { } } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/io_tracker.cpp b/src/client/io_tracker.cpp index 238be74d96..85d6dae911 100644 --- a/src/client/io_tracker.cpp +++ b/src/client/io_tracker.cpp @@ -50,8 +50,8 @@ IOTracker::IOTracker(IOManager* iomanager, FileMetric* clientMetric, bool disableStripe) : mc_(mc), - iomanager_(iomanager), scheduler_(scheduler), + iomanager_(iomanager), fileMetric_(clientMetric), disableStripe_(disableStripe) { id_ = tracekerID_.fetch_add(1, std::memory_order_relaxed); diff --git a/src/client/io_tracker.h b/src/client/io_tracker.h index 6c6ae27df6..6369410ae3 100644 --- a/src/client/io_tracker.h +++ b/src/client/io_tracker.h @@ -336,13 +336,13 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // store segment indices that can be discarded std::unordered_set discardSegments_; + // metacache为当前fileinstance的元数据信息 + MetaCache* mc_; + // scheduler用来将用户线程与client自己的线程切分 // 大IO被切分之后,将切分的reqlist传给scheduler向下发送 RequestScheduler* scheduler_; - // metacache为当前fileinstance的元数据信息 - MetaCache* mc_; - // 对于异步IO,Tracker需要向上层通知当前IO已经处理结束 // iomanager可以将该tracker释放 IOManager* iomanager_; diff --git a/src/client/libcbd_ext4.cpp b/src/client/libcbd_ext4.cpp index ae48dd530b..6bbdda0fef 100644 --- a/src/client/libcbd_ext4.cpp +++ b/src/client/libcbd_ext4.cpp @@ -28,7 +28,7 @@ extern "C" { CurveOptions g_cbd_ext4_options = {false, 0}; -int cbd_ext4_init(const CurveOptions* options) { +int cbd_ext4_init(const CurveOptions *options) { if (g_cbd_ext4_options.inited) { return 0; } @@ -45,47 +45,47 @@ int cbd_ext4_init(const CurveOptions* options) { return 0; } -int cbd_ext4_fini() { - return 0; -} +int cbd_ext4_fini() { return 0; } -int cbd_ext4_open(const char* filename) { +int cbd_ext4_open(const char *filename) { int fd = -1; char path[CBD_MAX_FILE_PATH_LEN] = {0}; #ifdef CBD_BACKEND_EXT4 - strcat(path, g_cbd_ext4_options.datahome); //NOLINT - strcat(path, "/"); //NOLINT + strcat(path, g_cbd_ext4_options.datahome); // NOLINT + strcat(path, "/"); // NOLINT #endif - strcat(path, filename); //NOLINT + strcat(path, filename); // NOLINT fd = open(path, O_RDWR | O_CREAT, 0660); return fd; } -int cbd_ext4_close(int fd) { - return close(fd); -} +int cbd_ext4_close(int fd) { return close(fd); } -int cbd_ext4_pread(int fd, void* buf, off_t offset, size_t length) { +int cbd_ext4_pread(int fd, void *buf, off_t offset, size_t length) { return pread(fd, buf, length, offset); } -int cbd_ext4_pwrite(int fd, const void* buf, off_t offset, size_t length) { +int cbd_ext4_pwrite(int fd, const void *buf, off_t offset, size_t length) { return pwrite(fd, buf, length, offset); } int cbd_ext4_pdiscard(int fd, off_t offset, size_t length) { + (void)fd; + (void)offset; + (void)length; return 0; } void cbd_ext4_aio_callback(union sigval sigev_value) { - CurveAioContext* context = (CurveAioContext *)sigev_value.sival_ptr; //NOLINT + CurveAioContext *context = + (CurveAioContext *)sigev_value.sival_ptr; // NOLINT context->cb(context); } -int cbd_ext4_aio_pread(int fd, CurveAioContext* context) { - struct aiocb* cb; +int cbd_ext4_aio_pread(int fd, CurveAioContext *context) { + struct aiocb *cb; cb = (struct aiocb *)malloc(sizeof(struct aiocb)); if (!cb) { @@ -98,14 +98,14 @@ int cbd_ext4_aio_pread(int fd, CurveAioContext* context) { cb->aio_nbytes = context->length; cb->aio_buf = context->buf; cb->aio_sigevent.sigev_notify = SIGEV_THREAD; - cb->aio_sigevent.sigev_value.sival_ptr = (void*)context; //NOLINT + cb->aio_sigevent.sigev_value.sival_ptr = (void *)context; // NOLINT cb->aio_sigevent.sigev_notify_function = cbd_ext4_aio_callback; return aio_read(cb); } -int cbd_ext4_aio_pwrite(int fd, CurveAioContext* context) { - struct aiocb* cb; +int cbd_ext4_aio_pwrite(int fd, CurveAioContext *context) { + struct aiocb *cb; cb = (struct aiocb *)malloc(sizeof(struct aiocb)); if (!cb) { @@ -118,32 +118,31 @@ int cbd_ext4_aio_pwrite(int fd, CurveAioContext* context) { cb->aio_nbytes = context->length; cb->aio_buf = context->buf; cb->aio_sigevent.sigev_notify = SIGEV_THREAD; - cb->aio_sigevent.sigev_value.sival_ptr = (void*)context; //NOLINT + cb->aio_sigevent.sigev_value.sival_ptr = (void *)context; // NOLINT cb->aio_sigevent.sigev_notify_function = cbd_ext4_aio_callback; return aio_write(cb); } -int cbd_ext4_aio_pdiscard(int fd, CurveAioContext* aioctx) { +int cbd_ext4_aio_pdiscard(int fd, CurveAioContext *aioctx) { + (void)fd; aioctx->ret = aioctx->length; aioctx->cb(aioctx); return 0; } -int cbd_ext4_sync(int fd) { - return fsync(fd); -} +int cbd_ext4_sync(int fd) { return fsync(fd); } -int64_t cbd_ext4_filesize(const char* filename) { +int64_t cbd_ext4_filesize(const char *filename) { struct stat st; int ret; char path[CBD_MAX_FILE_PATH_LEN] = {0}; #ifdef CBD_BACKEND_EXT4 - strcat(path, g_cbd_ext4_options.datahome); //NOLINT - strcat(path, "/"); //NOLINT + strcat(path, g_cbd_ext4_options.datahome); // NOLINT + strcat(path, "/"); // NOLINT #endif - strcat(path, filename); //NOLINT + strcat(path, filename); // NOLINT ret = stat(path, &st); if (ret) { @@ -153,9 +152,9 @@ int64_t cbd_ext4_filesize(const char* filename) { } } -int cbd_ext4_increase_epoch(const char* filename) { +int cbd_ext4_increase_epoch(const char *filename) { + (void)filename; return 0; } } // extern "C" - diff --git a/src/client/libcbd_libcurve.cpp b/src/client/libcbd_libcurve.cpp index 62fa3afc7d..2e29ef6f3c 100644 --- a/src/client/libcbd_libcurve.cpp +++ b/src/client/libcbd_libcurve.cpp @@ -91,6 +91,7 @@ int cbd_libcurve_aio_pdiscard(int fd, CurveAioContext* context) { int cbd_libcurve_sync(int fd) { // Ignored as it always sync writes to chunkserver currently + (void)fd; return 0; } diff --git a/src/client/libcurve_file.cpp b/src/client/libcurve_file.cpp index c03d723a69..d2b05cf020 100644 --- a/src/client/libcurve_file.cpp +++ b/src/client/libcurve_file.cpp @@ -48,7 +48,7 @@ #include "src/common/fast_align.h" bool globalclientinited_ = false; -curve::client::FileClient* globalclient = nullptr; +curve::client::FileClient *globalclient = nullptr; using curve::client::UserInfo; @@ -72,9 +72,9 @@ char g_processname[kProcessNameMax]; class LoggerGuard { private: - friend void InitLogging(const std::string& confPath); + friend void InitLogging(const std::string &confPath); - explicit LoggerGuard(const std::string& confpath) { + explicit LoggerGuard(const std::string &confpath) { InitInternal(confpath); } @@ -84,13 +84,13 @@ class LoggerGuard { } } - void InitInternal(const std::string& confpath); + void InitInternal(const std::string &confpath); private: bool needShutdown_ = false; }; -void LoggerGuard::InitInternal(const std::string& confPath) { +void LoggerGuard::InitInternal(const std::string &confPath) { curve::common::Configuration conf; conf.SetConfigPath(confPath); @@ -120,26 +120,22 @@ void LoggerGuard::InitInternal(const std::string& confPath) { LOG_IF(WARNING, !conf.GetStringValue("global.logPath", &FLAGS_log_dir)) << "config no logpath info, using default dir '/tmp'"; - std::string processName = std::string("libcurve-").append( - curve::common::UUIDGenerator().GenerateUUID().substr(0, 8)); - snprintf(g_processname, sizeof(g_processname), - "%s", processName.c_str()); + std::string processName = + std::string("libcurve-") + .append(curve::common::UUIDGenerator().GenerateUUID().substr(0, 8)); + snprintf(g_processname, sizeof(g_processname), "%s", processName.c_str()); google::InitGoogleLogging(g_processname); needShutdown_ = true; } -void InitLogging(const std::string& confPath) { +void InitLogging(const std::string &confPath) { static LoggerGuard guard(confPath); } } // namespace FileClient::FileClient() - : rwlock_(), - fdcount_(0), - fileserviceMap_(), - clientconfig_(), - mdsClient_(), + : rwlock_(), fdcount_(0), fileserviceMap_(), clientconfig_(), mdsClient_(), csClient_(std::make_shared()), csBroadCaster_(std::make_shared(csClient_)), inited_(false), @@ -150,7 +146,7 @@ bool FileClient::CheckAligned(off_t offset, size_t length) const { common::is_aligned(length, kMinIOAlignment); } -int FileClient::Init(const std::string& configpath) { +int FileClient::Init(const std::string &configpath) { if (inited_) { LOG(WARNING) << "already inited!"; return 0; @@ -187,8 +183,7 @@ int FileClient::Init(const std::string& configpath) { mdsClient_ = std::move(tmpMdsClient); - int rc2 = csClient_->Init( - clientconfig_.GetFileServiceOption().csClientOpt); + int rc2 = csClient_->Init(clientconfig_.GetFileServiceOption().csClientOpt); if (rc2 != 0) { LOG(ERROR) << "Init ChunkServer Client failed!"; return -LIBCURVE_ERROR::FAILED; @@ -221,11 +216,10 @@ void FileClient::UnInit() { inited_ = false; } -int FileClient::Open(const std::string& filename, - const UserInfo_t& userinfo, - const OpenFlags& openflags) { +int FileClient::Open(const std::string &filename, const UserInfo_t &userinfo, + const OpenFlags &openflags) { LOG(INFO) << "Opening filename: " << filename << ", flags: " << openflags; - FileInstance* fileserv = FileInstance::NewInitedFileInstance( + FileInstance *fileserv = FileInstance::NewInitedFileInstance( clientconfig_.GetFileServiceOption(), mdsClient_, filename, userinfo, openflags, false); if (fileserv == nullptr) { @@ -256,9 +250,9 @@ int FileClient::Open(const std::string& filename, return fd; } -int FileClient::Open4ReadOnly(const std::string& filename, - const UserInfo_t& userinfo, bool disableStripe) { - FileInstance* instance = FileInstance::Open4Readonly( +int FileClient::Open4ReadOnly(const std::string &filename, + const UserInfo_t &userinfo, bool disableStripe) { + FileInstance *instance = FileInstance::Open4Readonly( clientconfig_.GetFileServiceOption(), mdsClient_, filename, userinfo); if (instance == nullptr) { @@ -283,16 +277,16 @@ int FileClient::Open4ReadOnly(const std::string& filename, return fd; } -int FileClient::IncreaseEpoch(const std::string& filename, - const UserInfo_t& userinfo) { +int FileClient::IncreaseEpoch(const std::string &filename, + const UserInfo_t &userinfo) { LOG(INFO) << "IncreaseEpoch, filename: " << filename; FInfo_t fi; FileEpoch_t fEpoch; std::list> csLocs; LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { - ret = mdsClient_->IncreaseEpoch(filename, userinfo, - &fi, &fEpoch, &csLocs); + ret = mdsClient_->IncreaseEpoch(filename, userinfo, &fi, &fEpoch, + &csLocs); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "IncreaseEpoch failed, filename: " << filename << ", ret: " << ret; @@ -301,11 +295,10 @@ int FileClient::IncreaseEpoch(const std::string& filename, return -LIBCURVE_ERROR::FAILED; } - int ret2 = csBroadCaster_->BroadCastFileEpoch( - fEpoch.fileId, fEpoch.epoch, csLocs); + int ret2 = + csBroadCaster_->BroadCastFileEpoch(fEpoch.fileId, fEpoch.epoch, csLocs); LOG_IF(ERROR, ret2 != LIBCURVE_ERROR::OK) - << "BroadCastEpoch failed, filename: " << filename - << ", ret: " << ret2; + << "BroadCastEpoch failed, filename: " << filename << ", ret: " << ret2; // update epoch if file is already open auto it = fileserviceFileNameMap_.find(filename); @@ -315,8 +308,8 @@ int FileClient::IncreaseEpoch(const std::string& filename, return ret2; } -int FileClient::Create(const std::string& filename, - const UserInfo_t& userinfo, size_t size) { +int FileClient::Create(const std::string &filename, const UserInfo_t &userinfo, + size_t size) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->CreateFile(filename, userinfo, size); @@ -329,13 +322,13 @@ int FileClient::Create(const std::string& filename, return -ret; } -int FileClient::Create2(const std::string& filename, - const UserInfo_t& userinfo, size_t size, - uint64_t stripeUnit, uint64_t stripeCount) { +int FileClient::Create2(const std::string &filename, const UserInfo_t &userinfo, + size_t size, uint64_t stripeUnit, + uint64_t stripeCount) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { - ret = mdsClient_->CreateFile(filename, userinfo, size, true, - stripeUnit, stripeCount); + ret = mdsClient_->CreateFile(filename, userinfo, size, true, stripeUnit, + stripeCount); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "Create file failed, filename: " << filename << ", ret: " << ret; } else { @@ -345,7 +338,7 @@ int FileClient::Create2(const std::string& filename, return -ret; } -int FileClient::Read(int fd, char* buf, off_t offset, size_t len) { +int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { // 长度为0,直接返回,不做任何操作 if (len == 0) { return -LIBCURVE_ERROR::OK; @@ -366,7 +359,7 @@ int FileClient::Read(int fd, char* buf, off_t offset, size_t len) { return fileserviceMap_[fd]->Read(buf, offset, len); } -int FileClient::Write(int fd, const char* buf, off_t offset, size_t len) { +int FileClient::Write(int fd, const char *buf, off_t offset, size_t len) { // 长度为0,直接返回,不做任何操作 if (len == 0) { return -LIBCURVE_ERROR::OK; @@ -398,7 +391,7 @@ int FileClient::Discard(int fd, off_t offset, size_t length) { return iter->second->Discard(offset, length); } -int FileClient::AioRead(int fd, CurveAioContext* aioctx, +int FileClient::AioRead(int fd, CurveAioContext *aioctx, UserDataType dataType) { // 长度为0,直接返回,不做任何操作 if (aioctx->length == 0) { @@ -423,7 +416,7 @@ int FileClient::AioRead(int fd, CurveAioContext* aioctx, return ret; } -int FileClient::AioWrite(int fd, CurveAioContext* aioctx, +int FileClient::AioWrite(int fd, CurveAioContext *aioctx, UserDataType dataType) { // 长度为0,直接返回,不做任何操作 if (aioctx->length == 0) { @@ -449,7 +442,7 @@ int FileClient::AioWrite(int fd, CurveAioContext* aioctx, return ret; } -int FileClient::AioDiscard(int fd, CurveAioContext* aioctx) { +int FileClient::AioDiscard(int fd, CurveAioContext *aioctx) { ReadLockGuard lk(rwlock_); auto iter = fileserviceMap_.find(fd); if (CURVE_UNLIKELY(iter == fileserviceMap_.end())) { @@ -460,14 +453,13 @@ int FileClient::AioDiscard(int fd, CurveAioContext* aioctx) { } } -int FileClient::Rename(const UserInfo_t& userinfo, - const std::string& oldpath, const std::string& newpath) { +int FileClient::Rename(const UserInfo_t &userinfo, const std::string &oldpath, + const std::string &newpath) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->RenameFile(userinfo, oldpath, newpath); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Rename failed, OldPath: " << oldpath - << ", NewPath: " << newpath + << "Rename failed, OldPath: " << oldpath << ", NewPath: " << newpath << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; @@ -476,15 +468,14 @@ int FileClient::Rename(const UserInfo_t& userinfo, return -ret; } -int FileClient::Extend(const std::string& filename, - const UserInfo_t& userinfo, uint64_t newsize) { +int FileClient::Extend(const std::string &filename, const UserInfo_t &userinfo, + uint64_t newsize) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->Extend(filename, userinfo, newsize); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "Extend failed, filename: " << filename - << ", NewSize: " << newsize - << ", ret: " << ret; + << ", NewSize: " << newsize << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -492,15 +483,14 @@ int FileClient::Extend(const std::string& filename, return -ret; } -int FileClient::Unlink(const std::string& filename, - const UserInfo_t& userinfo, bool deleteforce) { +int FileClient::Unlink(const std::string &filename, const UserInfo_t &userinfo, + bool deleteforce) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->DeleteFile(filename, userinfo, deleteforce); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) << "Unlink failed, filename: " << filename - << ", force: " << deleteforce - << ", ret: " << ret; + << ", force: " << deleteforce << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -508,14 +498,13 @@ int FileClient::Unlink(const std::string& filename, return -ret; } -int FileClient::Recover(const std::string& filename, - const UserInfo_t& userinfo, uint64_t fileId) { +int FileClient::Recover(const std::string &filename, const UserInfo_t &userinfo, + uint64_t fileId) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->RecoverFile(filename, userinfo, fileId); LOG_IF(ERROR, ret != LIBCURVE_ERROR::OK) - << "Recover failed, filename: " << filename - << ", ret: " << ret; + << "Recover failed, filename: " << filename << ", ret: " << ret; } else { LOG(ERROR) << "global mds client not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -523,8 +512,8 @@ int FileClient::Recover(const std::string& filename, return -ret; } -int FileClient::StatFile(const std::string& filename, - const UserInfo_t& userinfo, FileStatInfo* finfo) { +int FileClient::StatFile(const std::string &filename, + const UserInfo_t &userinfo, FileStatInfo *finfo) { FInfo_t fi; FileEpoch_t fEpoch; int ret; @@ -538,18 +527,18 @@ int FileClient::StatFile(const std::string& filename, } if (ret == LIBCURVE_ERROR::OK) { - finfo->id = fi.id; + finfo->id = fi.id; finfo->parentid = fi.parentid; - finfo->ctime = fi.ctime; - finfo->length = fi.length; + finfo->ctime = fi.ctime; + finfo->length = fi.length; finfo->filetype = fi.filetype; finfo->stripeUnit = fi.stripeUnit; finfo->stripeCount = fi.stripeCount; memcpy(finfo->filename, fi.filename.c_str(), - std::min(sizeof(finfo->filename), fi.filename.size() + 1)); + std::min(sizeof(finfo->filename), fi.filename.size() + 1)); memcpy(finfo->owner, fi.owner.c_str(), - std::min(sizeof(finfo->owner), fi.owner.size() + 1)); + std::min(sizeof(finfo->owner), fi.owner.size() + 1)); finfo->fileStatus = static_cast(fi.filestatus); } @@ -557,8 +546,8 @@ int FileClient::StatFile(const std::string& filename, return -ret; } -int FileClient::Listdir(const std::string& dirpath, - const UserInfo_t& userinfo, std::vector* filestatVec) { +int FileClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, + std::vector *filestatVec) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->Listdir(dirpath, userinfo, filestatVec); @@ -572,7 +561,7 @@ int FileClient::Listdir(const std::string& dirpath, return -ret; } -int FileClient::Mkdir(const std::string& dirpath, const UserInfo_t& userinfo) { +int FileClient::Mkdir(const std::string &dirpath, const UserInfo_t &userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->CreateFile(dirpath, userinfo, 0, false); @@ -593,7 +582,7 @@ int FileClient::Mkdir(const std::string& dirpath, const UserInfo_t& userinfo) { return -ret; } -int FileClient::Rmdir(const std::string& dirpath, const UserInfo_t& userinfo) { +int FileClient::Rmdir(const std::string &dirpath, const UserInfo_t &userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->DeleteFile(dirpath, userinfo); @@ -606,8 +595,9 @@ int FileClient::Rmdir(const std::string& dirpath, const UserInfo_t& userinfo) { return -ret; } -int FileClient::ChangeOwner(const std::string& filename, - const std::string& newOwner, const UserInfo_t& userinfo) { +int FileClient::ChangeOwner(const std::string &filename, + const std::string &newOwner, + const UserInfo_t &userinfo) { LIBCURVE_ERROR ret; if (mdsClient_ != nullptr) { ret = mdsClient_->ChangeOwner(filename, newOwner, userinfo); @@ -655,14 +645,14 @@ int FileClient::Close(int fd) { return -LIBCURVE_ERROR::FAILED; } -int FileClient::GetClusterId(char* buf, int len) { +int FileClient::GetClusterId(char *buf, int len) { std::string result = GetClusterId(); if (result.empty()) { return -LIBCURVE_ERROR::FAILED; } - if (len >= result.size() + 1) { + if (static_cast(len) >= result.size() + 1) { snprintf(buf, len, "%s", result.c_str()); return LIBCURVE_ERROR::OK; } @@ -689,7 +679,7 @@ std::string FileClient::GetClusterId() { return {}; } -int FileClient::GetFileInfo(int fd, FInfo* finfo) { +int FileClient::GetFileInfo(int fd, FInfo *finfo) { int ret = -LIBCURVE_ERROR::FAILED; ReadLockGuard lk(rwlock_); @@ -745,23 +735,21 @@ bool FileClient::StartDummyServer() { return true; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve // 全局初始化与反初始化 -int GlobalInit(const char* configpath); +int GlobalInit(const char *configpath); void GlobalUnInit(); -int Init(const char* path) { - return GlobalInit(path); -} +int Init(const char *path) { return GlobalInit(path); } -int Open4Qemu(const char* filename) { +int Open4Qemu(const char *filename) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -775,11 +763,11 @@ int Open4Qemu(const char* filename) { return globalclient->Open(realname, userinfo); } -int IncreaseEpoch(const char* filename) { +int IncreaseEpoch(const char *filename) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -793,11 +781,11 @@ int IncreaseEpoch(const char* filename) { return globalclient->IncreaseEpoch(realname, userinfo); } -int Extend4Qemu(const char* filename, int64_t newsize) { +int Extend4Qemu(const char *filename, int64_t newsize) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -812,20 +800,20 @@ int Extend4Qemu(const char* filename, int64_t newsize) { } return globalclient->Extend(realname, userinfo, - static_cast(newsize)); + static_cast(newsize)); } -int Open(const char* filename, const C_UserInfo_t* userinfo) { +int Open(const char *filename, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Open(filename, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } -int Read(int fd, char* buf, off_t offset, size_t length) { +int Read(int fd, char *buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -834,7 +822,7 @@ int Read(int fd, char* buf, off_t offset, size_t length) { return globalclient->Read(fd, buf, offset, length); } -int Write(int fd, const char* buf, off_t offset, size_t length) { +int Write(int fd, const char *buf, off_t offset, size_t length) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -852,32 +840,30 @@ int Discard(int fd, off_t offset, size_t length) { return globalclient->Discard(fd, offset, length); } -int AioRead(int fd, CurveAioContext* aioctx) { +int AioRead(int fd, CurveAioContext *aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - DVLOG(9) << "offset: " << aioctx->offset - << " length: " << aioctx->length - << " op: " << aioctx->op; + DVLOG(9) << "offset: " << aioctx->offset << " length: " << aioctx->length + << " op: " << aioctx->op; return globalclient->AioRead(fd, aioctx); } -int AioWrite(int fd, CurveAioContext* aioctx) { +int AioWrite(int fd, CurveAioContext *aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - DVLOG(9) << "offset: " << aioctx->offset - << " length: " << aioctx->length - << " op: " << aioctx->op - << " buf: " << *(unsigned int*)aioctx->buf; + DVLOG(9) << "offset: " << aioctx->offset << " length: " << aioctx->length + << " op: " << aioctx->op + << " buf: " << *(unsigned int *)aioctx->buf; return globalclient->AioWrite(fd, aioctx); } -int AioDiscard(int fd, CurveAioContext* aioctx) { +int AioDiscard(int fd, CurveAioContext *aioctx) { if (globalclient == nullptr) { LOG(ERROR) << "Not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -886,98 +872,96 @@ int AioDiscard(int fd, CurveAioContext* aioctx) { return globalclient->AioDiscard(fd, aioctx); } -int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size) { +int Create(const char *filename, const C_UserInfo_t *userinfo, size_t size) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Create(filename, - UserInfo(userinfo->owner, userinfo->password), size); + return globalclient->Create( + filename, UserInfo(userinfo->owner, userinfo->password), size); } -int Create2(const char* filename, const C_UserInfo_t* userinfo, size_t size, - uint64_t stripeUnit, uint64_t stripeCount) { +int Create2(const char *filename, const C_UserInfo_t *userinfo, size_t size, + uint64_t stripeUnit, uint64_t stripeCount) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Create2(filename, - UserInfo(userinfo->owner, userinfo->password), - size, stripeUnit, stripeCount); + UserInfo(userinfo->owner, userinfo->password), + size, stripeUnit, stripeCount); } -int Rename(const C_UserInfo_t* userinfo, - const char* oldpath, const char* newpath) { +int Rename(const C_UserInfo_t *userinfo, const char *oldpath, + const char *newpath) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Rename(UserInfo(userinfo->owner, userinfo->password), - oldpath, newpath); + oldpath, newpath); } -int Extend(const char* filename, - const C_UserInfo_t* userinfo, uint64_t newsize) { +int Extend(const char *filename, const C_UserInfo_t *userinfo, + uint64_t newsize) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Extend(filename, - UserInfo(userinfo->owner, userinfo->password), newsize); + return globalclient->Extend( + filename, UserInfo(userinfo->owner, userinfo->password), newsize); } -int Unlink(const char* filename, const C_UserInfo_t* userinfo) { +int Unlink(const char *filename, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Unlink(filename, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } -int DeleteForce(const char* filename, const C_UserInfo_t* userinfo) { +int DeleteForce(const char *filename, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Unlink(filename, - UserInfo(userinfo->owner, userinfo->password), - true); + return globalclient->Unlink( + filename, UserInfo(userinfo->owner, userinfo->password), true); } -int Recover(const char* filename, const C_UserInfo_t* userinfo, - uint64_t fileId) { +int Recover(const char *filename, const C_UserInfo_t *userinfo, + uint64_t fileId) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } - return globalclient->Recover(filename, - UserInfo(userinfo->owner, userinfo->password), - fileId); + return globalclient->Recover( + filename, UserInfo(userinfo->owner, userinfo->password), fileId); } -DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo) { +DirInfo_t *OpenDir(const char *dirpath, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return nullptr; } - DirInfo_t* dirinfo = new (std::nothrow) DirInfo_t; - dirinfo->dirpath = const_cast(dirpath); - dirinfo->userinfo = const_cast(userinfo); + DirInfo_t *dirinfo = new (std::nothrow) DirInfo_t; + dirinfo->dirpath = const_cast(dirpath); + dirinfo->userinfo = const_cast(userinfo); dirinfo->fileStat = nullptr; return dirinfo; } -int Listdir(DirInfo_t* dirinfo) { +int Listdir(DirInfo_t *dirinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -989,9 +973,10 @@ int Listdir(DirInfo_t* dirinfo) { } std::vector fileStat; - int ret = globalclient->Listdir(dirinfo->dirpath, - UserInfo(dirinfo->userinfo->owner, dirinfo->userinfo->password), - &fileStat); + int ret = globalclient->Listdir( + dirinfo->dirpath, + UserInfo(dirinfo->userinfo->owner, dirinfo->userinfo->password), + &fileStat); dirinfo->dirSize = fileStat.size(); dirinfo->fileStat = new (std::nothrow) FileStatInfo_t[dirinfo->dirSize]; @@ -1001,7 +986,7 @@ int Listdir(DirInfo_t* dirinfo) { return -LIBCURVE_ERROR::FAILED; } - for (int i = 0; i < dirinfo->dirSize; i++) { + for (uint64_t i = 0; i < dirinfo->dirSize; i++) { dirinfo->fileStat[i].id = fileStat[i].id; dirinfo->fileStat[i].parentid = fileStat[i].parentid; dirinfo->fileStat[i].filetype = fileStat[i].filetype; @@ -1011,13 +996,13 @@ int Listdir(DirInfo_t* dirinfo) { memcpy(dirinfo->fileStat[i].owner, fileStat[i].owner, NAME_MAX_SIZE); memset(dirinfo->fileStat[i].filename, 0, NAME_MAX_SIZE); memcpy(dirinfo->fileStat[i].filename, fileStat[i].filename, - NAME_MAX_SIZE); + NAME_MAX_SIZE); } return ret; } -void CloseDir(DirInfo_t* dirinfo) { +void CloseDir(DirInfo_t *dirinfo) { if (dirinfo != nullptr) { if (dirinfo->fileStat != nullptr) { delete[] dirinfo->fileStat; @@ -1027,24 +1012,24 @@ void CloseDir(DirInfo_t* dirinfo) { } } -int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo) { +int Mkdir(const char *dirpath, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Mkdir(dirpath, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } -int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo) { +int Rmdir(const char *dirpath, const C_UserInfo_t *userinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; } return globalclient->Rmdir(dirpath, - UserInfo(userinfo->owner, userinfo->password)); + UserInfo(userinfo->owner, userinfo->password)); } int Close(int fd) { @@ -1056,11 +1041,11 @@ int Close(int fd) { return globalclient->Close(fd); } -int StatFile4Qemu(const char* filename, FileStatInfo* finfo) { +int StatFile4Qemu(const char *filename, FileStatInfo *finfo) { curve::client::UserInfo_t userinfo; std::string realname; - bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename(filename, - &realname, &userinfo.owner); + bool ret = curve::client::ServiceHelper::GetUserInfoFromFilename( + filename, &realname, &userinfo.owner); if (!ret) { LOG(ERROR) << "get user info from filename failed!"; return -LIBCURVE_ERROR::FAILED; @@ -1074,8 +1059,8 @@ int StatFile4Qemu(const char* filename, FileStatInfo* finfo) { return globalclient->StatFile(realname, userinfo, finfo); } -int StatFile(const char* filename, - const C_UserInfo_t* cuserinfo, FileStatInfo* finfo) { +int StatFile(const char *filename, const C_UserInfo_t *cuserinfo, + FileStatInfo *finfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1085,8 +1070,8 @@ int StatFile(const char* filename, return globalclient->StatFile(filename, userinfo, finfo); } -int ChangeOwner(const char* filename, - const char* newOwner, const C_UserInfo_t* cuserinfo) { +int ChangeOwner(const char *filename, const char *newOwner, + const C_UserInfo_t *cuserinfo) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1096,11 +1081,9 @@ int ChangeOwner(const char* filename, return globalclient->ChangeOwner(filename, newOwner, userinfo); } -void UnInit() { - GlobalUnInit(); -} +void UnInit() { GlobalUnInit(); } -int GetClusterId(char* buf, int len) { +int GetClusterId(char *buf, int len) { if (globalclient == nullptr) { LOG(ERROR) << "not inited!"; return -LIBCURVE_ERROR::FAILED; @@ -1109,7 +1092,7 @@ int GetClusterId(char* buf, int len) { return globalclient->GetClusterId(buf, len); } -int GlobalInit(const char* path) { +int GlobalInit(const char *path) { int ret = 0; if (globalclientinited_) { LOG(INFO) << "global cient already inited!"; @@ -1146,74 +1129,74 @@ void GlobalUnInit() { } } -const char* LibCurveErrorName(LIBCURVE_ERROR err) { +const char *LibCurveErrorName(LIBCURVE_ERROR err) { switch (err) { - case LIBCURVE_ERROR::OK: - return "OK"; - case LIBCURVE_ERROR::EXISTS: - return "EXISTS"; - case LIBCURVE_ERROR::FAILED: - return "FAILED"; - case LIBCURVE_ERROR::DISABLEIO: - return "DISABLEIO"; - case LIBCURVE_ERROR::AUTHFAIL: - return "AUTHFAIL"; - case LIBCURVE_ERROR::DELETING: - return "DELETING"; - case LIBCURVE_ERROR::NOTEXIST: - return "NOTEXIST"; - case LIBCURVE_ERROR::UNDER_SNAPSHOT: - return "UNDER_SNAPSHOT"; - case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: - return "NOT_UNDERSNAPSHOT"; - case LIBCURVE_ERROR::DELETE_ERROR: - return "DELETE_ERROR"; - case LIBCURVE_ERROR::NOT_ALLOCATE: - return "NOT_ALLOCATE"; - case LIBCURVE_ERROR::NOT_SUPPORT: - return "NOT_SUPPORT"; - case LIBCURVE_ERROR::NOT_EMPTY: - return "NOT_EMPTY"; - case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: - return "NO_SHRINK_BIGGER_FILE"; - case LIBCURVE_ERROR::SESSION_NOTEXISTS: - return "SESSION_NOTEXISTS"; - case LIBCURVE_ERROR::FILE_OCCUPIED: - return "FILE_OCCUPIED"; - case LIBCURVE_ERROR::PARAM_ERROR: - return "PARAM_ERROR"; - case LIBCURVE_ERROR::INTERNAL_ERROR: - return "INTERNAL_ERROR"; - case LIBCURVE_ERROR::CRC_ERROR: - return "CRC_ERROR"; - case LIBCURVE_ERROR::INVALID_REQUEST: - return "INVALID_REQUEST"; - case LIBCURVE_ERROR::DISK_FAIL: - return "DISK_FAIL"; - case LIBCURVE_ERROR::NO_SPACE: - return "NO_SPACE"; - case LIBCURVE_ERROR::NOT_ALIGNED: - return "NOT_ALIGNED"; - case LIBCURVE_ERROR::BAD_FD: - return "BAD_FD"; - case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: - return "LENGTH_NOT_SUPPORT"; - case LIBCURVE_ERROR::SESSION_NOT_EXIST: - return "SESSION_NOT_EXIST"; - case LIBCURVE_ERROR::STATUS_NOT_MATCH: - return "STATUS_NOT_MATCH"; - case LIBCURVE_ERROR::DELETE_BEING_CLONED: - return "DELETE_BEING_CLONED"; - case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: - return "CLIENT_NOT_SUPPORT_SNAPSHOT"; - case LIBCURVE_ERROR::SNAPSTHO_FROZEN: - return "SNAPSTHO_FROZEN"; - case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: - return "RETRY_UNTIL_SUCCESS"; - case LIBCURVE_ERROR::EPOCH_TOO_OLD: - return "EPOCH_TOO_OLD"; - case LIBCURVE_ERROR::UNKNOWN: - break; + case LIBCURVE_ERROR::OK: + return "OK"; + case LIBCURVE_ERROR::EXISTS: + return "EXISTS"; + case LIBCURVE_ERROR::FAILED: + return "FAILED"; + case LIBCURVE_ERROR::DISABLEIO: + return "DISABLEIO"; + case LIBCURVE_ERROR::AUTHFAIL: + return "AUTHFAIL"; + case LIBCURVE_ERROR::DELETING: + return "DELETING"; + case LIBCURVE_ERROR::NOTEXIST: + return "NOTEXIST"; + case LIBCURVE_ERROR::UNDER_SNAPSHOT: + return "UNDER_SNAPSHOT"; + case LIBCURVE_ERROR::NOT_UNDERSNAPSHOT: + return "NOT_UNDERSNAPSHOT"; + case LIBCURVE_ERROR::DELETE_ERROR: + return "DELETE_ERROR"; + case LIBCURVE_ERROR::NOT_ALLOCATE: + return "NOT_ALLOCATE"; + case LIBCURVE_ERROR::NOT_SUPPORT: + return "NOT_SUPPORT"; + case LIBCURVE_ERROR::NOT_EMPTY: + return "NOT_EMPTY"; + case LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE: + return "NO_SHRINK_BIGGER_FILE"; + case LIBCURVE_ERROR::SESSION_NOTEXISTS: + return "SESSION_NOTEXISTS"; + case LIBCURVE_ERROR::FILE_OCCUPIED: + return "FILE_OCCUPIED"; + case LIBCURVE_ERROR::PARAM_ERROR: + return "PARAM_ERROR"; + case LIBCURVE_ERROR::INTERNAL_ERROR: + return "INTERNAL_ERROR"; + case LIBCURVE_ERROR::CRC_ERROR: + return "CRC_ERROR"; + case LIBCURVE_ERROR::INVALID_REQUEST: + return "INVALID_REQUEST"; + case LIBCURVE_ERROR::DISK_FAIL: + return "DISK_FAIL"; + case LIBCURVE_ERROR::NO_SPACE: + return "NO_SPACE"; + case LIBCURVE_ERROR::NOT_ALIGNED: + return "NOT_ALIGNED"; + case LIBCURVE_ERROR::BAD_FD: + return "BAD_FD"; + case LIBCURVE_ERROR::LENGTH_NOT_SUPPORT: + return "LENGTH_NOT_SUPPORT"; + case LIBCURVE_ERROR::SESSION_NOT_EXIST: + return "SESSION_NOT_EXIST"; + case LIBCURVE_ERROR::STATUS_NOT_MATCH: + return "STATUS_NOT_MATCH"; + case LIBCURVE_ERROR::DELETE_BEING_CLONED: + return "DELETE_BEING_CLONED"; + case LIBCURVE_ERROR::CLIENT_NOT_SUPPORT_SNAPSHOT: + return "CLIENT_NOT_SUPPORT_SNAPSHOT"; + case LIBCURVE_ERROR::SNAPSTHO_FROZEN: + return "SNAPSTHO_FROZEN"; + case LIBCURVE_ERROR::RETRY_UNTIL_SUCCESS: + return "RETRY_UNTIL_SUCCESS"; + case LIBCURVE_ERROR::EPOCH_TOO_OLD: + return "EPOCH_TOO_OLD"; + case LIBCURVE_ERROR::UNKNOWN: + break; } static thread_local char message[64]; diff --git a/src/client/mds_client.cpp b/src/client/mds_client.cpp index 87a3b4b767..14bf850431 100644 --- a/src/client/mds_client.cpp +++ b/src/client/mds_client.cpp @@ -249,6 +249,8 @@ LIBCURVE_ERROR MDSClient::OpenFile(const std::string &filename, FileEpoch_t *fEpoch, LeaseSession *lease) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; OpenFileResponse response; mdsClientMetric_.openFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.openFile.latency); @@ -311,6 +313,8 @@ LIBCURVE_ERROR MDSClient::CreateFile(const std::string &filename, bool normalFile, uint64_t stripeUnit, uint64_t stripeCount) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CreateFileResponse response; mdsClientMetric_.createFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.createFile.latency); @@ -346,6 +350,8 @@ LIBCURVE_ERROR MDSClient::CloseFile(const std::string &filename, const UserInfo_t &userinfo, const std::string &sessionid) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CloseFileResponse response; mdsClientMetric_.closeFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.closeFile.latency); @@ -380,6 +386,8 @@ LIBCURVE_ERROR MDSClient::GetFileInfo(const std::string &filename, const UserInfo_t &uinfo, FInfo_t *fi, FileEpoch_t *fEpoch) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetFileInfoResponse response; mdsClientMetric_.getFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.getFile.latency); @@ -416,6 +424,8 @@ LIBCURVE_ERROR MDSClient::IncreaseEpoch(const std::string& filename, FileEpoch_t *fEpoch, std::list> *csLocs) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; IncreaseFileEpochResponse response; mdsClientMetric_.increaseEpoch.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.increaseEpoch.latency); @@ -474,6 +484,8 @@ LIBCURVE_ERROR MDSClient::CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, uint64_t* seq) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CreateSnapShotResponse response; MDSClientBase::CreateSnapShot(filename, userinfo, &response, cntl, channel); @@ -533,6 +545,8 @@ LIBCURVE_ERROR MDSClient::DeleteSnapShot(const std::string &filename, const UserInfo_t &userinfo, uint64_t seq) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; DeleteSnapShotResponse response; MDSClientBase::DeleteSnapShot(filename, userinfo, seq, &response, cntl, channel); @@ -565,6 +579,8 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, const std::vector *seq, std::map *snapif) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ListSnapShotFileInfoResponse response; MDSClientBase::ListSnapShot(filename, userinfo, seq, &response, cntl, channel); @@ -597,7 +613,7 @@ LIBCURVE_ERROR MDSClient::ListSnapShot(const std::string &filename, snapif->insert(std::make_pair(tempInfo.seqnum, tempInfo)); } - if (response.fileinfo_size() != seq->size()) { + if (response.fileinfo_size() != static_cast(seq->size())) { LOG(WARNING) << "some snapshot info not found!"; return LIBCURVE_ERROR::NOTEXIST; } @@ -613,6 +629,8 @@ LIBCURVE_ERROR MDSClient::GetSnapshotSegmentInfo(const std::string &filename, uint64_t seq, uint64_t offset, SegmentInfo *segInfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetOrAllocateSegmentResponse response; MDSClientBase::GetSnapshotSegmentInfo(filename, userinfo, seq, offset, &response, cntl, channel); @@ -676,6 +694,8 @@ LIBCURVE_ERROR MDSClient::RefreshSession(const std::string &filename, LeaseRefreshResult *resp, LeaseSession *lease) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ReFreshSessionResponse response; mdsClientMetric_.refreshSession.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.refreshSession.latency); @@ -750,6 +770,8 @@ LIBCURVE_ERROR MDSClient::CheckSnapShotStatus(const std::string &filename, uint64_t seq, FileStatus *filestatus) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CheckSnapShotStatusResponse response; MDSClientBase::CheckSnapShotStatus(filename, userinfo, seq, &response, cntl, channel); @@ -785,6 +807,8 @@ MDSClient::GetServerList(const LogicPoolID &logicalpooid, const std::vector ©setidvec, std::vector> *cpinfoVec) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetChunkServerListInCopySetsResponse response; mdsClientMetric_.getServerList.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.getServerList.latency); @@ -848,6 +872,8 @@ MDSClient::GetServerList(const LogicPoolID &logicalpooid, LIBCURVE_ERROR MDSClient::GetClusterInfo(ClusterContext *clsctx) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; curve::mds::topology::GetClusterInfoResponse response; MDSClientBase::GetClusterInfo(&response, cntl, channel); @@ -873,6 +899,8 @@ LIBCURVE_ERROR MDSClient::CreateCloneFile( const UserInfo_t &userinfo, uint64_t size, uint64_t sn, uint32_t chunksize, uint64_t stripeUnit, uint64_t stripeCount, FInfo *fileinfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; CreateCloneFileResponse response; MDSClientBase::CreateCloneFile(source, destination, userinfo, size, sn, chunksize, stripeUnit, stripeCount, @@ -928,6 +956,8 @@ LIBCURVE_ERROR MDSClient::SetCloneFileStatus(const std::string &filename, const UserInfo_t &userinfo, uint64_t fileID) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; SetCloneFileStatusResponse response; MDSClientBase::SetCloneFileStatus(filename, filestatus, userinfo, fileID, &response, cntl, channel); @@ -960,6 +990,8 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, const FileEpoch_t *fEpoch, SegmentInfo *segInfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; GetOrAllocateSegmentResponse response; mdsClientMetric_.getOrAllocateSegment.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.getOrAllocateSegment.latency); @@ -1023,6 +1055,8 @@ LIBCURVE_ERROR MDSClient::GetOrAllocateSegment(bool allocate, uint64_t offset, LIBCURVE_ERROR MDSClient::DeAllocateSegment(const FInfo *fileInfo, uint64_t offset) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; DeAllocateSegmentResponse response; mdsClientMetric_.deAllocateSegment.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.deAllocateSegment.latency); @@ -1063,6 +1097,8 @@ LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, uint64_t originId, uint64_t destinationId) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; RenameFileResponse response; mdsClientMetric_.renameFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.renameFile.latency); @@ -1102,6 +1138,8 @@ LIBCURVE_ERROR MDSClient::RenameFile(const UserInfo_t &userinfo, LIBCURVE_ERROR MDSClient::Extend(const std::string &filename, const UserInfo_t &userinfo, uint64_t newsize) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ExtendFileResponse response; mdsClientMetric_.extendFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.extendFile.latency); @@ -1135,6 +1173,8 @@ LIBCURVE_ERROR MDSClient::DeleteFile(const std::string &filename, const UserInfo_t &userinfo, bool deleteforce, uint64_t fileid) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; DeleteFileResponse response; mdsClientMetric_.deleteFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.deleteFile.latency); @@ -1173,6 +1213,8 @@ LIBCURVE_ERROR MDSClient::RecoverFile(const std::string &filename, const UserInfo_t &userinfo, uint64_t fileid) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; RecoverFileResponse response; mdsClientMetric_.recoverFile.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.recoverFile.latency); @@ -1205,6 +1247,8 @@ LIBCURVE_ERROR MDSClient::ChangeOwner(const std::string &filename, const std::string &newOwner, const UserInfo_t &userinfo) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ChangeOwnerResponse response; mdsClientMetric_.changeOwner.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.changeOwner.latency); @@ -1244,6 +1288,8 @@ LIBCURVE_ERROR MDSClient::Listdir(const std::string &dirpath, const UserInfo_t &userinfo, std::vector *filestatVec) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; ListDirResponse response; mdsClientMetric_.listDir.qps.count << 1; LatencyGuard lg(&mdsClientMetric_.listDir.latency); @@ -1306,6 +1352,8 @@ LIBCURVE_ERROR MDSClient::GetChunkServerInfo(const PeerAddr &csAddr, } auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; curve::mds::topology::GetChunkServerInfoResponse response; mdsClientMetric_.getChunkServerId.qps.count << 1; @@ -1366,6 +1414,8 @@ LIBCURVE_ERROR MDSClient::ListChunkServerInServer(const std::string &serverIp, std::vector *csIds) { auto task = RPCTaskDefine { + (void)addrindex; + (void)rpctimeoutMS; curve::mds::topology::ListChunkServerResponse response; mdsClientMetric_.listChunkserverInServer.qps.count << 1; diff --git a/src/client/mds_client_base.cpp b/src/client/mds_client_base.cpp index 2ab238fef5..1f390f519e 100644 --- a/src/client/mds_client_base.cpp +++ b/src/client/mds_client_base.cpp @@ -377,7 +377,6 @@ void MDSClientBase::GetOrAllocateSegment(bool allocate, // convert the user offset to seg offset uint64_t segmentsize = fi->segmentsize; - uint64_t chunksize = fi->chunksize; uint64_t seg_offset = (offset / segmentsize) * segmentsize; request.set_filename(fi->fullPathName); request.set_offset(seg_offset); diff --git a/src/client/metacache.cpp b/src/client/metacache.cpp index 289a1eb1aa..dd9f4ba2f0 100644 --- a/src/client/metacache.cpp +++ b/src/client/metacache.cpp @@ -214,8 +214,7 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( CopysetID copysetId, const PeerAddr& leaderAddr) { std::vector> copysetInfos; - int ret = - mdsclient_->GetServerList(logicPoolId, {copysetId}, ©setInfos); + (void)mdsclient_->GetServerList(logicPoolId, {copysetId}, ©setInfos); bool needUpdate = (!copysetInfos.empty()) && (copysetInfos[0].HasPeerInCopyset(leaderAddr)); diff --git a/src/client/metacache_struct.h b/src/client/metacache_struct.h index f29d3de467..85ca375339 100644 --- a/src/client/metacache_struct.h +++ b/src/client/metacache_struct.h @@ -55,7 +55,7 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetPeerInfo { CopysetPeerInfo() = default; - CopysetPeerInfo(const CopysetPeerInfo&) = default; + CopysetPeerInfo(const CopysetPeerInfo &) = default; CopysetPeerInfo &operator=(const CopysetPeerInfo &other) = default; CopysetPeerInfo(const T &cid, const PeerAddr &internal, @@ -160,7 +160,7 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { bool GetCurrentLeaderID(T *id) const { if (leaderindex_ >= 0) { - if (csinfos_.size() < leaderindex_) { + if (static_cast(csinfos_.size()) < leaderindex_) { return false; } else { *id = csinfos_[leaderindex_].peerID; @@ -212,7 +212,8 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { */ int GetLeaderInfo(T *peerid, EndPoint *ep) { // 第一次获取leader,如果当前leader信息没有确定,返回-1,由外部主动发起更新leader - if (leaderindex_ < 0 || leaderindex_ >= csinfos_.size()) { + if (leaderindex_ < 0 || + leaderindex_ >= static_cast(csinfos_.size())) { LOG(INFO) << "GetLeaderInfo pool " << lpid_ << ", copyset " << cpid_ << " has no leader"; diff --git a/src/client/request_sender.cpp b/src/client/request_sender.cpp index 12e062b62e..539d063d4f 100644 --- a/src/client/request_sender.cpp +++ b/src/client/request_sender.cpp @@ -80,6 +80,7 @@ int RequestSender::ReadChunk(const ChunkIDInfo& idinfo, uint64_t appliedindex, const RequestSourceInfo& sourceInfo, ClientClosure *done) { + (void)sn; brpc::ClosureGuard doneGuard(done); brpc::Controller *cntl = new brpc::Controller(); ChunkResponse *response = new ChunkResponse(); diff --git a/src/client/splitor.cpp b/src/client/splitor.cpp index f95b20e0b7..c821f16395 100644 --- a/src/client/splitor.cpp +++ b/src/client/splitor.cpp @@ -143,8 +143,6 @@ bool Splitor::AssignInternal(IOTracker* iotracker, MetaCache* metaCache, MDSClient* mdsclient, const FInfo_t* fileInfo, const FileEpoch_t* fEpoch, ChunkIndex chunkidx) { - const auto maxSplitSizeBytes = 1024 * iosplitopt_.fileIOSplitMaxSizeKB; - lldiv_t res = std::div( static_cast(chunkidx) * fileInfo->chunksize, // NOLINT static_cast(fileInfo->segmentsize)); // NOLINT @@ -369,7 +367,6 @@ int Splitor::SplitForStripe(IOTracker* iotracker, MetaCache* metaCache, uint64_t cur = offset; uint64_t left = length; - uint64_t curChunkIndex = 0; while (left > 0) { uint64_t blockIndex = cur / stripeUnit; @@ -427,11 +424,11 @@ uint64_t Splitor::ProcessUnalignedRequests(const off_t currentOffset, uint64_t alignedEndOffset = common::align_down(currentEndOff, iosplitopt_.alignment.cloneVolume); - if (currentOffset == alignedStartOffset && + if (static_cast(currentOffset) == alignedStartOffset && currentEndOff == alignedEndOffset) { padding->aligned = true; } else { - if (currentOffset == alignedStartOffset) { + if (static_cast(currentOffset) == alignedStartOffset) { padding->aligned = false; padding->type = RequestContext::Padding::Right; padding->offset = alignedEndOffset; diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 26fa1bbea8..5cc99945fe 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -24,15 +24,14 @@ namespace curve { namespace client { -UnstableState UnstableHelper::GetCurrentUnstableState( - ChunkServerID csId, - const butil::EndPoint& csEndPoint) { - +UnstableState +UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, + const butil::EndPoint &csEndPoint) { std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); // 如果当前ip已经超过阈值,则直接返回chunkserver unstable - int unstabled = serverUnstabledChunkservers_[ip].size(); + uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); mtx_.unlock(); diff --git a/src/common/concurrent/dlock.h b/src/common/concurrent/dlock.h index c68538e4e9..36210b445b 100644 --- a/src/common/concurrent/dlock.h +++ b/src/common/concurrent/dlock.h @@ -32,8 +32,8 @@ namespace curve { namespace common { -using curve::kvstorage::KVStorageClient; using curve::common::Uncopyable; +using curve::kvstorage::KVStorageClient; struct DLockOpts { std::string pfx; @@ -47,19 +47,19 @@ struct DLockOpts { class DLock : public Uncopyable { public: - explicit DLock(const DLockOpts &opts) : opts_(opts), locker_(0) {} + explicit DLock(const DLockOpts &opts) : locker_(0), opts_(opts) {} virtual ~DLock(); /** * @brief Init the etcd Mutex - * + * * @return lock leaseid */ virtual int64_t Init(); /** * @brief lock the object - * + * * @return error code EtcdErrCode */ virtual int Lock(); @@ -97,8 +97,8 @@ class DLock : public Uncopyable { const DLockOpts &opts_; }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_CONCURRENT_DLOCK_H_ diff --git a/src/common/fs_util.h b/src/common/fs_util.h index 99dab36174..3e591fd5ca 100644 --- a/src/common/fs_util.h +++ b/src/common/fs_util.h @@ -20,8 +20,8 @@ * Author: charisu */ -#ifndef SRC_COMMON_FS_UTIL_H_ -#define SRC_COMMON_FS_UTIL_H_ +#ifndef SRC_COMMON_FS_UTIL_H_ +#define SRC_COMMON_FS_UTIL_H_ #include #include @@ -32,8 +32,8 @@ namespace curve { namespace common { // 计算path2相对于path1的相对路径 -inline std::string CalcRelativePath(const std::string& path1, - const std::string& path2) { +inline std::string CalcRelativePath(const std::string &path1, + const std::string &path2) { if (path1.empty() || path2.empty()) { return ""; } @@ -41,7 +41,7 @@ inline std::string CalcRelativePath(const std::string& path1, std::vector dirs2; SplitString(path1, "/", &dirs1); SplitString(path2, "/", &dirs2); - int unmatchedIndex = 0; + size_t unmatchedIndex = 0; while (unmatchedIndex < dirs1.size() && unmatchedIndex < dirs2.size()) { if (dirs1[unmatchedIndex] != dirs2[unmatchedIndex]) { break; @@ -52,13 +52,13 @@ inline std::string CalcRelativePath(const std::string& path1, if (unmatchedIndex == dirs1.size()) { rpath.append("."); } - for (int i = 0; i < dirs1.size() - unmatchedIndex; ++i) { + for (int i = 0; i < static_cast(dirs1.size() - unmatchedIndex); ++i) { if (i > 0) { rpath.append("/"); } rpath.append(".."); } - for (int i = unmatchedIndex; i < dirs2.size(); ++i) { + for (size_t i = unmatchedIndex; i < dirs2.size(); ++i) { rpath.append("/"); rpath.append(dirs2[i]); } @@ -66,8 +66,7 @@ inline std::string CalcRelativePath(const std::string& path1, } // Check whether the path2 is the subpath of path1 -inline bool IsSubPath(const std::string& path1, - const std::string& path2) { +inline bool IsSubPath(const std::string &path1, const std::string &path2) { return StringStartWith(CalcRelativePath(path1, path2), "./"); } @@ -75,4 +74,3 @@ inline bool IsSubPath(const std::string& path1, } // namespace curve #endif // SRC_COMMON_FS_UTIL_H_ - diff --git a/src/common/s3_adapter.h b/src/common/s3_adapter.h index b975de1b8e..53e514a813 100644 --- a/src/common/s3_adapter.h +++ b/src/common/s3_adapter.h @@ -22,6 +22,7 @@ #ifndef SRC_COMMON_S3_ADAPTER_H_ #define SRC_COMMON_S3_ADAPTER_H_ +#include #include #include #include @@ -54,7 +55,7 @@ #include //NOLINT #include //NOLINT #include //NOLINT -#include // NOLINT +#include //NOLINT #include "src/common/configuration.h" #include "src/common/throttle.h" @@ -116,7 +117,7 @@ struct GetObjectAsyncContext : public Aws::Client::AsyncCallerContext { size_t len; GetObjectAsyncCallBack cb; int retCode; - int retry; + uint32_t retry; size_t actualLen; }; @@ -144,9 +145,7 @@ class S3Adapter { s3Client_ = nullptr; throttle_ = nullptr; } - virtual ~S3Adapter() { - Deinit(); - } + virtual ~S3Adapter() { Deinit(); } /** * 初始化S3Adapter */ @@ -360,10 +359,15 @@ class FakeS3Adapter : public S3Adapter { int PutObject(const Aws::String &key, const char *buffer, const size_t bufferSize) override { + (void)key; + (void)buffer; + (void)bufferSize; return 0; } int PutObject(const Aws::String &key, const std::string &data) override { + (void)key; + (void)data; return 0; } @@ -374,6 +378,8 @@ class FakeS3Adapter : public S3Adapter { } int GetObject(const Aws::String &key, std::string *data) override { + (void)key; + (void)data; // just return 4M data data->resize(4 * 1024 * 1024, '1'); return 0; @@ -381,6 +387,8 @@ class FakeS3Adapter : public S3Adapter { int GetObject(const std::string &key, char *buf, off_t offset, size_t len) override { + (void)key; + (void)offset; // juset return len data memset(buf, '1', len); return 0; @@ -393,13 +401,20 @@ class FakeS3Adapter : public S3Adapter { context->cb(this, context); } - int DeleteObject(const Aws::String &key) override { return 0; } + int DeleteObject(const Aws::String &key) override { + (void)key; + return 0; + } int DeleteObjects(const std::list &keyList) override { + (void)keyList; return 0; } - bool ObjectExist(const Aws::String &key) override { return true; } + bool ObjectExist(const Aws::String &key) override { + (void)key; + return true; + } }; diff --git a/src/fs/ext4_filesystem_impl.cpp b/src/fs/ext4_filesystem_impl.cpp index 7392e2d01e..f4cd6cfcdb 100644 --- a/src/fs/ext4_filesystem_impl.cpp +++ b/src/fs/ext4_filesystem_impl.cpp @@ -337,7 +337,7 @@ int Ext4FileSystemImpl::Write(int fd, butil::IOBuf buf, uint64_t offset, int length) { - if (length != buf.size()) { + if (length != static_cast(buf.size())) { LOG(ERROR) << "IOBuf::pcut_into_file_descriptor failed, fd: " << fd << ", data size doesn't equal to length, data size: " << buf.size() << ", length: " << length; @@ -345,7 +345,6 @@ int Ext4FileSystemImpl::Write(int fd, } int remainLength = length; - int relativeOffset = 0; int retryTimes = 0; while (remainLength > 0) { @@ -380,6 +379,9 @@ int Ext4FileSystemImpl::Sync(int fd) { int Ext4FileSystemImpl::Append(int fd, const char *buf, int length) { + (void)fd; + (void)buf; + (void)length; // TODO(yyk) return 0; } diff --git a/src/fs/local_filesystem.cpp b/src/fs/local_filesystem.cpp index 52301b0c27..a14ae59829 100644 --- a/src/fs/local_filesystem.cpp +++ b/src/fs/local_filesystem.cpp @@ -32,6 +32,7 @@ namespace fs { std::shared_ptr LocalFsFactory::CreateFs( FileSystemType type, const std::string& deviceID) { + (void)deviceID; std::shared_ptr localFs; if (type == FileSystemType::EXT4) { localFs = Ext4FileSystemImpl::getInstance(); diff --git a/src/mds/copyset/copyset_manager.cpp b/src/mds/copyset/copyset_manager.cpp index fc06973da9..445885860c 100644 --- a/src/mds/copyset/copyset_manager.cpp +++ b/src/mds/copyset/copyset_manager.cpp @@ -57,7 +57,7 @@ bool CopysetManager::GenCopyset(const ClusterInfo& cluster, } int numChunkServers = cluster.GetClusterSize(); - if (*scatterWidth > (numChunkServers - 1)) { + if (static_cast(*scatterWidth) > (numChunkServers - 1)) { // It's impossible that scatterWidth is lager than cluster size return false; } diff --git a/src/mds/copyset/copyset_policy.cpp b/src/mds/copyset/copyset_policy.cpp index e938c3498b..8f2612efcd 100644 --- a/src/mds/copyset/copyset_policy.cpp +++ b/src/mds/copyset/copyset_policy.cpp @@ -131,6 +131,8 @@ void CopysetZoneShufflePolicy::GetMinCopySetFromScatterWidth( int CopysetZoneShufflePolicy::GetMaxPermutationNum(int numCopysets, int numChunkServers, int numReplicas) { + (void)numChunkServers; + (void)numReplicas; return numCopysets; } diff --git a/src/mds/heartbeat/chunkserver_healthy_checker.cpp b/src/mds/heartbeat/chunkserver_healthy_checker.cpp index 0b3844a8a1..ce4225bd1d 100644 --- a/src/mds/heartbeat/chunkserver_healthy_checker.cpp +++ b/src/mds/heartbeat/chunkserver_healthy_checker.cpp @@ -90,7 +90,6 @@ bool ChunkserverHealthyChecker::ChunkServerStateNeedUpdate( return false; } - bool shouldOffline = true; if (OnlineState::OFFLINE != info.state) { LOG(WARNING) << "chunkserver " << info.csId << " is offline. " << timePass / milliseconds(1) << "ms from last heartbeat"; diff --git a/src/mds/heartbeat/heartbeat_service.cpp b/src/mds/heartbeat/heartbeat_service.cpp index db55479a34..1100952d9e 100644 --- a/src/mds/heartbeat/heartbeat_service.cpp +++ b/src/mds/heartbeat/heartbeat_service.cpp @@ -36,6 +36,7 @@ void HeartbeatServiceImpl::ChunkServerHeartbeat( const ::curve::mds::heartbeat::ChunkServerHeartbeatRequest *request, ::curve::mds::heartbeat::ChunkServerHeartbeatResponse *response, ::google::protobuf::Closure *done) { + (void)controller; brpc::ClosureGuard doneGuard(done); heartbeatManager_->ChunkServerHeartbeat(*request, response); } diff --git a/src/mds/nameserver2/allocstatistic/alloc_statistic.h b/src/mds/nameserver2/allocstatistic/alloc_statistic.h index caa90f2d22..6ecb6f0da4 100644 --- a/src/mds/nameserver2/allocstatistic/alloc_statistic.h +++ b/src/mds/nameserver2/allocstatistic/alloc_statistic.h @@ -32,12 +32,12 @@ #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" -using ::curve::mds::topology::PoolIdType; using ::curve::common::Atomic; +using ::curve::common::InterruptibleSleeper; using ::curve::common::Mutex; using ::curve::common::RWLock; using ::curve::common::Thread; -using ::curve::common::InterruptibleSleeper; +using ::curve::mds::topology::PoolIdType; namespace curve { namespace mds { @@ -49,7 +49,8 @@ using ::curve::kvstorage::EtcdClientImp; * The statistics are divided into two parts: * part1: * 1. statistics of the allocation amount before the designated revision - * 2. record the segment allocation amount of each revision since mds started + * 2. record the segment allocation amount of each revision since mds + * started * 3. combine the data in 1 and 2 * part2: the background periodically persists the merged data in part1 * @@ -76,17 +77,12 @@ class AllocStatistic { * @param[in] client Etcd client */ AllocStatistic(uint64_t periodicPersistInterMs, uint64_t retryInterMs, - std::shared_ptr client) : - client_(client), - currentValueAvalible_(false), - segmentAllocFromEtcdOK_(false), - stop_(true), - periodicPersistInterMs_(periodicPersistInterMs), - retryInterMs_(retryInterMs) {} - - ~AllocStatistic() { - Stop(); - } + std::shared_ptr client) + : client_(client), segmentAllocFromEtcdOK_(false), + currentValueAvalible_(false), retryInterMs_(retryInterMs), + periodicPersistInterMs_(periodicPersistInterMs), stop_(true) {} + + ~AllocStatistic() { Stop(); } /** * @brief Init Obtains the allocated segment information and information in @@ -97,7 +93,7 @@ class AllocStatistic { */ int Init(); - /** + /** * @brief Run 1. get all the segments under the specified revision * 2. persist the statistics of allocated segment size in memory * under each logicalPool regularly @@ -138,11 +134,10 @@ class AllocStatistic { * @param[in] changeSize Segment reduction * @param[in] revision Version corresponding to this change */ - virtual void DeAllocSpace( - PoolIdType, int64_t changeSize, int64_t revision); + virtual void DeAllocSpace(PoolIdType, int64_t changeSize, int64_t revision); private: - /** + /** * @brief CalculateSegmentAlloc Get all the segment records of the * specified revision from Etcd */ @@ -154,14 +149,15 @@ class AllocStatistic { */ void PeriodicPersist(); - /** + /** * @brief HandleResult Dealing with the situation that error occur when * obtaining all segment records of specified revision */ bool HandleResult(int res); /** - * @brief DoMerge For each logicalPool, merge the change amount and data read in Etcd //NOLINT + * @brief DoMerge For each logicalPool, merge the change amount and data + * read in Etcd //NOLINT */ void DoMerge(); @@ -201,7 +197,8 @@ class AllocStatistic { std::map existSegmentAllocValues_; RWLock existSegmentAllocValuesLock_; - // At the beginning, stores allocation data of the segment before specified revision //NOLINT + // At the beginning, stores allocation data of the segment before specified + // revision // Later, stores the merged value std::map segmentAlloc_; RWLock segmentAllocLock_; @@ -230,7 +227,8 @@ class AllocStatistic { InterruptibleSleeper sleeper_; - // thread for periodically persisting allocated segment size of each logical pool //NOLINT + // thread for periodically persisting allocated segment size of each logical + // pool Thread periodicPersist_; // thread for calculating allocated segment size under specified revision @@ -240,4 +238,3 @@ class AllocStatistic { } // namespace curve #endif // SRC_MDS_NAMESERVER2_ALLOCSTATISTIC_ALLOC_STATISTIC_H_ - diff --git a/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp b/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp index 89bec51c80..5a4a980615 100644 --- a/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp +++ b/src/mds/nameserver2/allocstatistic/alloc_statistic_helper.cpp @@ -32,22 +32,21 @@ namespace curve { namespace mds { -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; -using ::curve::common::SEGMENTINFOKEYPREFIX; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; +using ::curve::common::SEGMENTINFOKEYPREFIX; const int GETBUNDLE = 1000; int AllocStatisticHelper::GetExistSegmentAllocValues( std::map *out, const std::shared_ptr &client) { // Obtain the segmentSize value of corresponding logical pools from Etcd std::vector allocVec; - int res = client->List( - SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, &allocVec); + int res = + client->List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, &allocVec); if (res != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list [" << SEGMENTALLOCSIZEKEY << "," - << SEGMENTALLOCSIZEKEYEND << ") fail, errorCode: " - << res; + << SEGMENTALLOCSIZEKEYEND << ") fail, errorCode: " << res; return -1; } @@ -55,8 +54,8 @@ int AllocStatisticHelper::GetExistSegmentAllocValues( for (auto &item : allocVec) { PoolIdType lid; uint64_t alloc; - bool res = NameSpaceStorageCodec::DecodeSegmentAllocValue( - item, &lid, &alloc); + bool res = + NameSpaceStorageCodec::DecodeSegmentAllocValue(item, &lid, &alloc); if (false == res) { LOG(ERROR) << "decode segment alloc value: " << item << " fail"; continue; @@ -83,8 +82,9 @@ int AllocStatisticHelper::CalculateSegmentAlloc( // get segments in bundles from Etcd, GETBUNDLE is the number of items // to fetch - int res = client->ListWithLimitAndRevision( - startKey, SEGMENTINFOKEYEND, GETBUNDLE, revision, &values, &lastKey); + int res = client->ListWithLimitAndRevision(startKey, SEGMENTINFOKEYEND, + GETBUNDLE, revision, &values, + &lastKey); if (res != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list [" << startKey << "," << SEGMENTINFOKEYEND << ") at revision: " << revision @@ -94,17 +94,17 @@ int AllocStatisticHelper::CalculateSegmentAlloc( } // decode the obtained value - int startPos = 1; + size_t startPos = 1; if (startKey == SEGMENTINFOKEYPREFIX) { startPos = 0; } - for ( ; startPos < values.size(); startPos++) { + for (; startPos < values.size(); startPos++) { PageFileSegment segment; - bool res = NameSpaceStorageCodec::DecodeSegment( - values[startPos], &segment); + bool res = NameSpaceStorageCodec::DecodeSegment(values[startPos], + &segment); if (false == res) { - LOG(ERROR) << "decode segment item{" - << values[startPos] << "} fail"; + LOG(ERROR) << "decode segment item{" << values[startPos] + << "} fail"; return -1; } else { (*out)[segment.logicalpoolid()] += segment.segmentsize(); diff --git a/src/mds/nameserver2/chunk_allocator.cpp b/src/mds/nameserver2/chunk_allocator.cpp index e2ee64425b..b4b3fb8413 100644 --- a/src/mds/nameserver2/chunk_allocator.cpp +++ b/src/mds/nameserver2/chunk_allocator.cpp @@ -63,7 +63,7 @@ bool ChunkSegmentAllocatorImpl::AllocateChunkSegment(FileType type, return false; } auto logicalpoolId = copysets[0].logicalPoolId; - for (auto i = 0; i < copysets.size(); i++) { + for (size_t i = 0; i < copysets.size(); i++) { if (copysets[i].logicalPoolId != logicalpoolId) { LOG(ERROR) << "Allocate Copysets id not same, copysets[" << i << "] = " diff --git a/src/mds/nameserver2/clean_core.cpp b/src/mds/nameserver2/clean_core.cpp index cf5907d5fd..54f743c300 100644 --- a/src/mds/nameserver2/clean_core.cpp +++ b/src/mds/nameserver2/clean_core.cpp @@ -172,7 +172,6 @@ StatusCode CleanCore::CleanDiscardSegment( const DiscardSegmentInfo& discardSegmentInfo, TaskProgress* progress) { const FileInfo& fileInfo = discardSegmentInfo.fileinfo(); const PageFileSegment& segment = discardSegmentInfo.pagefilesegment(); - const LogicalPoolID logicalPoolId = segment.logicalpoolid(); const SeqNum seq = fileInfo.seqnum(); LOG(INFO) << "Start CleanDiscardSegment, filename = " << fileInfo.filename() diff --git a/src/mds/nameserver2/curvefs.cpp b/src/mds/nameserver2/curvefs.cpp index a5dfef17d0..c634cd8ad2 100644 --- a/src/mds/nameserver2/curvefs.cpp +++ b/src/mds/nameserver2/curvefs.cpp @@ -453,6 +453,7 @@ StatusCode CurveFS::GetAllocatedSize(const std::string& fileName, StatusCode CurveFS::GetFileAllocSize(const std::string& fileName, const FileInfo& fileInfo, AllocatedSize* allocSize) { + (void)fileName; std::vector segments; auto listSegmentRet = storage_->ListSegment(fileInfo.id(), &segments); @@ -470,6 +471,7 @@ StatusCode CurveFS::GetFileAllocSize(const std::string& fileName, StatusCode CurveFS::GetDirAllocSize(const std::string& fileName, const FileInfo& fileInfo, AllocatedSize* allocSize) { + (void)fileInfo; std::vector files; StatusCode ret = ReadDir(fileName, &files); if (ret != StatusCode::kOK) { @@ -1955,6 +1957,7 @@ StatusCode CurveFS::CheckPathOwnerInternal(const std::string &filename, const std::string &signature, std::string *lastEntry, uint64_t *parentID) { + (void)signature; std::vector paths; ::curve::common::SplitString(filename, "/", &paths); @@ -2247,6 +2250,7 @@ bool CurveFS::CheckSignature(const std::string& owner, StatusCode CurveFS::ListClient(bool listAllClient, std::vector* clientInfos) { + (void)listAllClient; std::set allClients = fileRecordManager_->ListAllClient(); for (const auto &c : allClients) { diff --git a/src/mds/nameserver2/namespace_storage.cpp b/src/mds/nameserver2/namespace_storage.cpp index 9191178884..6546869f69 100644 --- a/src/mds/nameserver2/namespace_storage.cpp +++ b/src/mds/nameserver2/namespace_storage.cpp @@ -26,43 +26,40 @@ #include "src/mds/nameserver2/helper/namespace_helper.h" #include "src/common/namespace_define.h" -using ::curve::common::SNAPSHOTFILEINFOKEYPREFIX; -using ::curve::common::SNAPSHOTFILEINFOKEYEND; -using ::curve::common::DISCARDSEGMENTKEYPREFIX; using ::curve::common::DISCARDSEGMENTKEYEND; +using ::curve::common::DISCARDSEGMENTKEYPREFIX; +using ::curve::common::SNAPSHOTFILEINFOKEYEND; +using ::curve::common::SNAPSHOTFILEINFOKEYPREFIX; namespace curve { namespace mds { -std::ostream& operator << (std::ostream & os, StoreStatus &s) { +std::ostream &operator<<(std::ostream &os, StoreStatus &s) { os << static_cast::type>(s); return os; } NameServerStorageImp::NameServerStorageImp( std::shared_ptr client, std::shared_ptr cache) - : client_(client), cache_(cache), discardMetric_() {} + : cache_(cache), client_(client), discardMetric_() {} StoreStatus NameServerStorageImp::PutFile(const FileInfo &fileInfo) { std::string storeKey; - if (GetStoreKey(fileInfo.filetype(), - fileInfo.parentid(), - fileInfo.filename(), - &storeKey) - != StoreStatus::OK) { + if (GetStoreKey(fileInfo.filetype(), fileInfo.parentid(), + fileInfo.filename(), &storeKey) != StoreStatus::OK) { LOG(ERROR) << "get store key failed,filename = " << fileInfo.filename(); return StoreStatus::InternalError; } std::string encodeFileInfo; if (!NameSpaceStorageCodec::EncodeFileInfo(fileInfo, &encodeFileInfo)) { - LOG(ERROR) << "encode file: " << fileInfo.filename()<< "err"; + LOG(ERROR) << "encode file: " << fileInfo.filename() << "err"; return StoreStatus::InternalError; } int errCode = client_->Put(storeKey, encodeFileInfo); if (errCode != EtcdErrCode::EtcdOK) { - LOG(ERROR) << "put file: [" << fileInfo.filename() << "] err: " - << errCode; + LOG(ERROR) << "put file: [" << fileInfo.filename() + << "] err: " << errCode; } else { // update to cache cache_->Put(storeKey, encodeFileInfo); @@ -75,8 +72,8 @@ StoreStatus NameServerStorageImp::GetFile(InodeID parentid, const std::string &filename, FileInfo *fileInfo) { std::string storeKey; - if (GetStoreKey(FileType::INODE_PAGEFILE, parentid, filename, &storeKey) - != StoreStatus::OK) { + if (GetStoreKey(FileType::INODE_PAGEFILE, parentid, filename, &storeKey) != + StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << filename; return StoreStatus::InternalError; } @@ -112,10 +109,10 @@ StoreStatus NameServerStorageImp::GetFile(InodeID parentid, } StoreStatus NameServerStorageImp::DeleteFile(InodeID id, - const std::string &filename) { + const std::string &filename) { std::string storeKey; - if (GetStoreKey(FileType::INODE_PAGEFILE, id, filename, &storeKey) - != StoreStatus::OK) { + if (GetStoreKey(FileType::INODE_PAGEFILE, id, filename, &storeKey) != + StoreStatus::OK) { LOG(ERROR) << "get store key failed,filename = " << filename; return StoreStatus::InternalError; } @@ -131,11 +128,12 @@ StoreStatus NameServerStorageImp::DeleteFile(InodeID id, return getErrorCode(resCode); } -StoreStatus NameServerStorageImp::DeleteSnapshotFile(InodeID id, - const std::string &filename) { +StoreStatus +NameServerStorageImp::DeleteSnapshotFile(InodeID id, + const std::string &filename) { std::string storeKey; - if (GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, - id, filename, &storeKey) != StoreStatus::OK) { + if (GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, id, filename, + &storeKey) != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << filename; return StoreStatus::InternalError; } @@ -152,12 +150,10 @@ StoreStatus NameServerStorageImp::DeleteSnapshotFile(InodeID id, } StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, - const FileInfo &newFInfo) { + const FileInfo &newFInfo) { std::string oldStoreKey; - auto res = GetStoreKey(FileType::INODE_PAGEFILE, - oldFInfo.parentid(), - oldFInfo.filename(), - &oldStoreKey); + auto res = GetStoreKey(FileType::INODE_PAGEFILE, oldFInfo.parentid(), + oldFInfo.filename(), &oldStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << oldFInfo.filename(); @@ -165,10 +161,8 @@ StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, } std::string newStoreKey; - res = GetStoreKey(FileType::INODE_PAGEFILE, - newFInfo.parentid(), - newFInfo.filename(), - &newStoreKey); + res = GetStoreKey(FileType::INODE_PAGEFILE, newFInfo.parentid(), + newFInfo.filename(), &newStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << newFInfo.filename(); @@ -179,33 +173,29 @@ StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, std::string encodeNewFileInfo; if (!NameSpaceStorageCodec::EncodeFileInfo(oldFInfo, &encodeOldFileInfo) || !NameSpaceStorageCodec::EncodeFileInfo(newFInfo, &encodeNewFileInfo)) { - LOG(ERROR) << "encode oldfile inodeid : " << oldFInfo.id() - << ", oldfile: " << oldFInfo.filename() - << " or newfile inodeid : " << newFInfo.id() - << ", newfile: " << newFInfo.filename() << "err"; - return StoreStatus::InternalError; + LOG(ERROR) << "encode oldfile inodeid : " << oldFInfo.id() + << ", oldfile: " << oldFInfo.filename() + << " or newfile inodeid : " << newFInfo.id() + << ", newfile: " << newFInfo.filename() << "err"; + return StoreStatus::InternalError; } // delete the data in the cache first cache_->Remove(oldStoreKey); // update Etcd - Operation op1{ - OpType::OpDelete, - const_cast(oldStoreKey.c_str()), "", - oldStoreKey.size(), 0}; - Operation op2{ - OpType::OpPut, - const_cast(newStoreKey.c_str()), - const_cast(encodeNewFileInfo.c_str()), - newStoreKey.size(), encodeNewFileInfo.size()}; + Operation op1{OpType::OpDelete, const_cast(oldStoreKey.c_str()), "", + static_cast(oldStoreKey.size()), 0}; + Operation op2{OpType::OpPut, const_cast(newStoreKey.c_str()), + const_cast(encodeNewFileInfo.c_str()), + static_cast(newStoreKey.size()), + static_cast(encodeNewFileInfo.size())}; std::vector ops{op1, op2}; int errCode = client_->TxnN(ops); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "rename file from [" << oldFInfo.id() << ", " - << oldFInfo.filename() << "] to [" << newFInfo.id() - << ", " << newFInfo.filename() << "] err: " - << errCode; + << oldFInfo.filename() << "] to [" << newFInfo.id() << ", " + << newFInfo.filename() << "] err: " << errCode; } else { // update to cache at last cache_->Put(newStoreKey, encodeNewFileInfo); @@ -214,13 +204,11 @@ StoreStatus NameServerStorageImp::RenameFile(const FileInfo &oldFInfo, } StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( - const FileInfo &oldFInfo, - const FileInfo &newFInfo, - const FileInfo &conflictFInfo, - const FileInfo &recycleFInfo) { + const FileInfo &oldFInfo, const FileInfo &newFInfo, + const FileInfo &conflictFInfo, const FileInfo &recycleFInfo) { std::string oldStoreKey, newStoreKey, conflictStoreKey, recycleStoreKey; auto res = GetStoreKey(oldFInfo.filetype(), oldFInfo.parentid(), - oldFInfo.filename(), &oldStoreKey); + oldFInfo.filename(), &oldStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << oldFInfo.filename(); @@ -228,7 +216,7 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( } res = GetStoreKey(newFInfo.filetype(), newFInfo.parentid(), - newFInfo.filename(), &newStoreKey); + newFInfo.filename(), &newStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << newFInfo.filename(); @@ -236,7 +224,7 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( } res = GetStoreKey(conflictFInfo.filetype(), conflictFInfo.parentid(), - conflictFInfo.filename(), &conflictStoreKey); + conflictFInfo.filename(), &conflictStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << conflictFInfo.filename(); @@ -249,7 +237,7 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( } res = GetStoreKey(recycleFInfo.filetype(), recycleFInfo.parentid(), - recycleFInfo.filename(), &recycleStoreKey); + recycleFInfo.filename(), &recycleStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << recycleFInfo.filename(); @@ -258,16 +246,15 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( std::string encodeRecycleFInfo; std::string encodeNewFInfo; - if (!NameSpaceStorageCodec::EncodeFileInfo( - recycleFInfo, &encodeRecycleFInfo)) { + if (!NameSpaceStorageCodec::EncodeFileInfo(recycleFInfo, + &encodeRecycleFInfo)) { LOG(ERROR) << "encode recycle file: " << recycleFInfo.filename() - << " err"; + << " err"; return StoreStatus::InternalError; } if (!NameSpaceStorageCodec::EncodeFileInfo(newFInfo, &encodeNewFInfo)) { - LOG(ERROR) << "encode recycle file: " << newFInfo.filename() - << " err"; + LOG(ERROR) << "encode recycle file: " << newFInfo.filename() << " err"; return StoreStatus::InternalError; } @@ -276,27 +263,22 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( cache_->Remove(oldStoreKey); // put recycleFInfo; delete oldFInfo; put newFInfo - Operation op1{ - OpType::OpPut, - const_cast(recycleStoreKey.c_str()), - const_cast(encodeRecycleFInfo.c_str()), - recycleStoreKey.size(), encodeRecycleFInfo.size()}; - Operation op2{ - OpType::OpDelete, - const_cast(oldStoreKey.c_str()), "", - oldStoreKey.size(), 0}; - Operation op3{ - OpType::OpPut, - const_cast(newStoreKey.c_str()), - const_cast(encodeNewFInfo.c_str()), - newStoreKey.size(), encodeNewFInfo.size()}; + Operation op1{OpType::OpPut, const_cast(recycleStoreKey.c_str()), + const_cast(encodeRecycleFInfo.c_str()), + static_cast(recycleStoreKey.size()), + static_cast(encodeRecycleFInfo.size())}; + Operation op2{OpType::OpDelete, const_cast(oldStoreKey.c_str()), "", + static_cast(oldStoreKey.size()), 0}; + Operation op3{OpType::OpPut, const_cast(newStoreKey.c_str()), + const_cast(encodeNewFInfo.c_str()), + static_cast(newStoreKey.size()), + static_cast(encodeNewFInfo.size())}; std::vector ops{op1, op2, op3}; int errCode = client_->TxnN(ops); if (errCode != EtcdErrCode::EtcdOK) { - LOG(ERROR) << "rename file from [" << oldFInfo.filename() - << "] to [" << newFInfo.filename() << "] err: " - << errCode; + LOG(ERROR) << "rename file from [" << oldFInfo.filename() << "] to [" + << newFInfo.filename() << "] err: " << errCode; } else { // update to cache cache_->Put(recycleStoreKey, encodeRecycleFInfo); @@ -305,11 +287,12 @@ StoreStatus NameServerStorageImp::ReplaceFileAndRecycleOldFile( return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::MoveFileToRecycle( - const FileInfo &originFileInfo, const FileInfo &recycleFileInfo) { +StoreStatus +NameServerStorageImp::MoveFileToRecycle(const FileInfo &originFileInfo, + const FileInfo &recycleFileInfo) { std::string originFileInfoKey; auto res = GetStoreKey(originFileInfo.filetype(), originFileInfo.parentid(), - originFileInfo.filename(), &originFileInfoKey); + originFileInfo.filename(), &originFileInfoKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << originFileInfo.filename(); @@ -318,7 +301,7 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( std::string recycleFileInfoKey; res = GetStoreKey(recycleFileInfo.filetype(), recycleFileInfo.parentid(), - recycleFileInfo.filename(), &recycleFileInfoKey); + recycleFileInfo.filename(), &recycleFileInfoKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << recycleFileInfo.filename(); @@ -326,10 +309,10 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( } std::string encodeRecycleFInfo; - if (!NameSpaceStorageCodec::EncodeFileInfo( - recycleFileInfo, &encodeRecycleFInfo)) { + if (!NameSpaceStorageCodec::EncodeFileInfo(recycleFileInfo, + &encodeRecycleFInfo)) { LOG(ERROR) << "encode recycle file: " << recycleFileInfo.filename() - << " err"; + << " err"; return StoreStatus::InternalError; } @@ -337,23 +320,20 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( cache_->Remove(originFileInfoKey); // remove originFileInfo from Etcd, and put recycleFileInfo - Operation op1{ - OpType::OpDelete, - const_cast(originFileInfoKey.c_str()), "", - originFileInfoKey.size(), 0}; - Operation op2{ - OpType::OpPut, - const_cast(recycleFileInfoKey.c_str()), - const_cast(encodeRecycleFInfo.c_str()), - recycleFileInfoKey.size(), encodeRecycleFInfo.size()}; + Operation op1{OpType::OpDelete, + const_cast(originFileInfoKey.c_str()), "", + static_cast(originFileInfoKey.size()), 0}; + Operation op2{OpType::OpPut, const_cast(recycleFileInfoKey.c_str()), + const_cast(encodeRecycleFInfo.c_str()), + static_cast(recycleFileInfoKey.size()), + static_cast(encodeRecycleFInfo.size())}; std::vector ops{op1, op2}; int errCode = client_->TxnN(ops); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "move file [" << originFileInfo.filename() - << "] to recycle file [" - << recycleFileInfo.filename() << "] err: " - << errCode; + << "] to recycle file [" << recycleFileInfo.filename() + << "] err: " << errCode; } else { // update to cache cache_->Put(recycleFileInfoKey, encodeRecycleFInfo); @@ -361,8 +341,7 @@ StoreStatus NameServerStorageImp::MoveFileToRecycle( return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::ListFile(InodeID startid, - InodeID endid, +StoreStatus NameServerStorageImp::ListFile(InodeID startid, InodeID endid, std::vector *files) { std::string startStoreKey; auto res = @@ -382,26 +361,25 @@ StoreStatus NameServerStorageImp::ListFile(InodeID startid, return ListFileInternal(startStoreKey, endStoreKey, files); } -StoreStatus NameServerStorageImp::ListSegment(InodeID id, - std::vector *segments) { +StoreStatus +NameServerStorageImp::ListSegment(InodeID id, + std::vector *segments) { std::string startStoreKey = - NameSpaceStorageCodec::EncodeSegmentStoreKey(id, 0); + NameSpaceStorageCodec::EncodeSegmentStoreKey(id, 0); std::string endStoreKey = - NameSpaceStorageCodec::EncodeSegmentStoreKey(id + 1, 0); + NameSpaceStorageCodec::EncodeSegmentStoreKey(id + 1, 0); std::vector out; - int errCode = client_->List( - startStoreKey, endStoreKey, &out); + int errCode = client_->List(startStoreKey, endStoreKey, &out); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list segment err:" << errCode; return getErrorCode(errCode); } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { PageFileSegment segment; - bool decodeOK = NameSpaceStorageCodec::DecodeSegment(out[i], - &segment); + bool decodeOK = NameSpaceStorageCodec::DecodeSegment(out[i], &segment); if (decodeOK) { segments->emplace_back(segment); } else { @@ -412,20 +390,20 @@ StoreStatus NameServerStorageImp::ListSegment(InodeID id, return StoreStatus::OK; } -StoreStatus NameServerStorageImp::ListSnapshotFile(InodeID startid, - InodeID endid, - std::vector *files) { +StoreStatus +NameServerStorageImp::ListSnapshotFile(InodeID startid, InodeID endid, + std::vector *files) { std::string startStoreKey; - auto res = GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, - startid, "", &startStoreKey); + auto res = GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, startid, "", + &startStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, id = " << startid; return StoreStatus::InternalError; } std::string endStoreKey; - res = GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, - endid, "", &endStoreKey); + res = + GetStoreKey(FileType::INODE_SNAPSHOT_PAGEFILE, endid, "", &endStoreKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, id = " << endid; return StoreStatus::InternalError; @@ -434,23 +412,22 @@ StoreStatus NameServerStorageImp::ListSnapshotFile(InodeID startid, return ListFileInternal(startStoreKey, endStoreKey, files); } -StoreStatus NameServerStorageImp::ListFileInternal( - const std::string& startStoreKey, - const std::string& endStoreKey, - std::vector *files) { +StoreStatus +NameServerStorageImp::ListFileInternal(const std::string &startStoreKey, + const std::string &endStoreKey, + std::vector *files) { std::vector out; - int errCode = client_->List( - startStoreKey, endStoreKey, &out); + int errCode = client_->List(startStoreKey, endStoreKey, &out); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "list file err:" << errCode; return getErrorCode(errCode); } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { FileInfo fileInfo; - bool decodeOK = NameSpaceStorageCodec::DecodeFileInfo(out[i], - &fileInfo); + bool decodeOK = + NameSpaceStorageCodec::DecodeFileInfo(out[i], &fileInfo); if (decodeOK) { files->emplace_back(fileInfo); } else { @@ -461,8 +438,7 @@ StoreStatus NameServerStorageImp::ListFileInternal( return StoreStatus::OK; } -StoreStatus NameServerStorageImp::PutSegment(InodeID id, - uint64_t off, +StoreStatus NameServerStorageImp::PutSegment(InodeID id, uint64_t off, const PageFileSegment *segment, int64_t *revision) { std::string storeKey = @@ -482,8 +458,7 @@ StoreStatus NameServerStorageImp::PutSegment(InodeID id, return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::GetSegment(InodeID id, - uint64_t off, +StoreStatus NameServerStorageImp::GetSegment(InodeID id, uint64_t off, PageFileSegment *segment) { std::string storeKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id, off); @@ -498,21 +473,21 @@ StoreStatus NameServerStorageImp::GetSegment(InodeID id, if (decodeOK) { return StoreStatus::OK; } else { - LOG(ERROR) << "decode segment inodeid: " << id - << ", off: " << off <<" err"; + LOG(ERROR) << "decode segment inodeid: " << id << ", off: " << off + << " err"; return StoreStatus::InternalError; } } else if (errCode == EtcdErrCode::EtcdKeyNotExist) { LOG(INFO) << "segment not exist. inodeid: " << id << ", off: " << off; } else { - LOG(ERROR) << "get segment inodeid: " << id - << ", off: " << off << " err: " << errCode; + LOG(ERROR) << "get segment inodeid: " << id << ", off: " << off + << " err: " << errCode; } return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::DeleteSegment( - InodeID id, uint64_t off, int64_t *revision) { +StoreStatus NameServerStorageImp::DeleteSegment(InodeID id, uint64_t off, + int64_t *revision) { std::string storeKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id, off); int errCode = client_->DeleteRewithRevision(storeKey, revision); @@ -520,14 +495,14 @@ StoreStatus NameServerStorageImp::DeleteSegment( // update the cache first, then update Etcd cache_->Remove(storeKey); if (errCode != EtcdErrCode::EtcdOK) { - LOG(ERROR) << "delete segment of inodeid: " << id - << "off: " << off << ", err:" << errCode; + LOG(ERROR) << "delete segment of inodeid: " << id << "off: " << off + << ", err:" << errCode; } return getErrorCode(errCode); } StoreStatus NameServerStorageImp::ListDiscardSegment( - std::map* discardSegments) { + std::map *discardSegments) { assert(discardSegments != nullptr); std::vector> out; @@ -538,7 +513,7 @@ StoreStatus NameServerStorageImp::ListDiscardSegment( return StoreStatus::InternalError; } - for (const auto& kv : out) { + for (const auto &kv : out) { DiscardSegmentInfo info; if (!NameSpaceStorageCodec::DecodeDiscardSegment(kv.second, &info)) { LOG(ERROR) << "Decode DiscardSegment failed"; @@ -551,8 +526,9 @@ StoreStatus NameServerStorageImp::ListDiscardSegment( return StoreStatus::OK; } -StoreStatus NameServerStorageImp::DiscardSegment( - const FileInfo& fileInfo, const PageFileSegment& segment) { +StoreStatus +NameServerStorageImp::DiscardSegment(const FileInfo &fileInfo, + const PageFileSegment &segment) { const uint64_t inodeId = fileInfo.id(); const uint64_t offset = segment.startoffset(); const std::string segmentKey = @@ -574,16 +550,14 @@ StoreStatus NameServerStorageImp::DiscardSegment( return StoreStatus::InternalError; } - Operation op1{ - OpType::OpDelete, - const_cast(segmentKey.c_str()), - const_cast(encodeSegment.c_str()), - segmentKey.size(), encodeSegment.size()}; - Operation op2{ - OpType::OpPut, - const_cast(cleanSegmentKey.c_str()), - const_cast(encodeDiscardSegment.c_str()), - cleanSegmentKey.size(), encodeDiscardSegment.size()}; + Operation op1{OpType::OpDelete, const_cast(segmentKey.c_str()), + const_cast(encodeSegment.c_str()), + static_cast(segmentKey.size()), + static_cast(encodeSegment.size())}; + Operation op2{OpType::OpPut, const_cast(cleanSegmentKey.c_str()), + const_cast(encodeDiscardSegment.c_str()), + static_cast(cleanSegmentKey.size()), + static_cast(encodeDiscardSegment.size())}; std::vector ops{op1, op2}; auto errCode = client_->TxnN(ops); @@ -600,8 +574,8 @@ StoreStatus NameServerStorageImp::DiscardSegment( } StoreStatus NameServerStorageImp::CleanDiscardSegment(uint64_t segmentSize, - const std::string& key, - int64_t* revision) { + const std::string &key, + int64_t *revision) { int errCode = client_->DeleteRewithRevision(key, revision); if (errCode != EtcdErrCode::EtcdOK) { LOG(ERROR) << "CleanDiscardSegment failed, key = " << key @@ -614,12 +588,10 @@ StoreStatus NameServerStorageImp::CleanDiscardSegment(uint64_t segmentSize, } StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, - const FileInfo *snapshotFInfo) { + const FileInfo *snapshotFInfo) { std::string originFileKey; - auto res = GetStoreKey(originFInfo->filetype(), - originFInfo->parentid(), - originFInfo->filename(), - &originFileKey); + auto res = GetStoreKey(originFInfo->filetype(), originFInfo->parentid(), + originFInfo->filename(), &originFileKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << originFInfo->filename(); @@ -627,10 +599,8 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, } std::string snapshotFileKey; - res = GetStoreKey(snapshotFInfo->filetype(), - snapshotFInfo->parentid(), - snapshotFInfo->filename(), - &snapshotFileKey); + res = GetStoreKey(snapshotFInfo->filetype(), snapshotFInfo->parentid(), + snapshotFInfo->filename(), &snapshotFileKey); if (res != StoreStatus::OK) { LOG(ERROR) << "get store key failed, filename = " << snapshotFInfo->filename(); @@ -640,7 +610,8 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, std::string encodeFileInfo; std::string encodeSnapshot; if (!NameSpaceStorageCodec::EncodeFileInfo(*originFInfo, &encodeFileInfo) || - !NameSpaceStorageCodec::EncodeFileInfo(*snapshotFInfo, &encodeSnapshot)) { + !NameSpaceStorageCodec::EncodeFileInfo(*snapshotFInfo, + &encodeSnapshot)) { LOG(ERROR) << "encode originfile inodeid: " << originFInfo->id() << ", originfile: " << originFInfo->filename() << " or snapshotfile inodeid: " << snapshotFInfo->id() @@ -652,16 +623,14 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, cache_->Remove(originFileKey); // then update Etcd - Operation op1{ - OpType::OpPut, - const_cast(originFileKey.c_str()), - const_cast(encodeFileInfo.c_str()), - originFileKey.size(), encodeFileInfo.size()}; - Operation op2{ - OpType::OpPut, - const_cast(snapshotFileKey.c_str()), - const_cast(encodeSnapshot.c_str()), - snapshotFileKey.size(), encodeSnapshot.size()}; + Operation op1{OpType::OpPut, const_cast(originFileKey.c_str()), + const_cast(encodeFileInfo.c_str()), + static_cast(originFileKey.size()), + static_cast(encodeFileInfo.size())}; + Operation op2{OpType::OpPut, const_cast(snapshotFileKey.c_str()), + const_cast(encodeSnapshot.c_str()), + static_cast(snapshotFileKey.size()), + static_cast(encodeSnapshot.size())}; std::vector ops{op1, op2}; int errCode = client_->TxnN(ops); @@ -678,63 +647,62 @@ StoreStatus NameServerStorageImp::SnapShotFile(const FileInfo *originFInfo, return getErrorCode(errCode); } -StoreStatus NameServerStorageImp::LoadSnapShotFile( - std::vector *snapshotFiles) { - return ListFileInternal(SNAPSHOTFILEINFOKEYPREFIX, - SNAPSHOTFILEINFOKEYEND, snapshotFiles); +StoreStatus +NameServerStorageImp::LoadSnapShotFile(std::vector *snapshotFiles) { + return ListFileInternal(SNAPSHOTFILEINFOKEYPREFIX, SNAPSHOTFILEINFOKEYEND, + snapshotFiles); } StoreStatus NameServerStorageImp::getErrorCode(int errCode) { switch (errCode) { - case EtcdErrCode::EtcdOK: - return StoreStatus::OK; - - case EtcdErrCode::EtcdKeyNotExist: - return StoreStatus::KeyNotExist; - - case EtcdErrCode::EtcdUnknown: - case EtcdErrCode::EtcdInvalidArgument: - case EtcdErrCode::EtcdAlreadyExists: - case EtcdErrCode::EtcdPermissionDenied: - case EtcdErrCode::EtcdOutOfRange: - case EtcdErrCode::EtcdUnimplemented: - case EtcdErrCode::EtcdInternal: - case EtcdErrCode::EtcdNotFound: - case EtcdErrCode::EtcdDataLoss: - case EtcdErrCode::EtcdUnauthenticated: - case EtcdErrCode::EtcdCanceled: - case EtcdErrCode::EtcdDeadlineExceeded: - case EtcdErrCode::EtcdResourceExhausted: - case EtcdErrCode::EtcdFailedPrecondition: - case EtcdErrCode::EtcdAborted: - case EtcdErrCode::EtcdUnavailable: - case EtcdErrCode::EtcdTxnUnkownOp: - case EtcdErrCode::EtcdObjectNotExist: - case EtcdErrCode::EtcdErrObjectType: - return StoreStatus::InternalError; + case EtcdErrCode::EtcdOK: + return StoreStatus::OK; + + case EtcdErrCode::EtcdKeyNotExist: + return StoreStatus::KeyNotExist; + + case EtcdErrCode::EtcdUnknown: + case EtcdErrCode::EtcdInvalidArgument: + case EtcdErrCode::EtcdAlreadyExists: + case EtcdErrCode::EtcdPermissionDenied: + case EtcdErrCode::EtcdOutOfRange: + case EtcdErrCode::EtcdUnimplemented: + case EtcdErrCode::EtcdInternal: + case EtcdErrCode::EtcdNotFound: + case EtcdErrCode::EtcdDataLoss: + case EtcdErrCode::EtcdUnauthenticated: + case EtcdErrCode::EtcdCanceled: + case EtcdErrCode::EtcdDeadlineExceeded: + case EtcdErrCode::EtcdResourceExhausted: + case EtcdErrCode::EtcdFailedPrecondition: + case EtcdErrCode::EtcdAborted: + case EtcdErrCode::EtcdUnavailable: + case EtcdErrCode::EtcdTxnUnkownOp: + case EtcdErrCode::EtcdObjectNotExist: + case EtcdErrCode::EtcdErrObjectType: + return StoreStatus::InternalError; - default: - return StoreStatus::InternalError; + default: + return StoreStatus::InternalError; } } -StoreStatus NameServerStorageImp::GetStoreKey(FileType filetype, - InodeID id, - const std::string& filename, - std::string* storeKey) { +StoreStatus NameServerStorageImp::GetStoreKey(FileType filetype, InodeID id, + const std::string &filename, + std::string *storeKey) { switch (filetype) { - case FileType::INODE_PAGEFILE: - case FileType::INODE_DIRECTORY: - *storeKey = NameSpaceStorageCodec::EncodeFileStoreKey(id, filename); - break; - case FileType::INODE_SNAPSHOT_PAGEFILE: - *storeKey = - NameSpaceStorageCodec::EncodeSnapShotFileStoreKey(id, filename); - break; - default: - LOG(ERROR) << "filetype: " - << filetype << " of " << filename << " not exist"; - return StoreStatus::InternalError; + case FileType::INODE_PAGEFILE: + case FileType::INODE_DIRECTORY: + *storeKey = NameSpaceStorageCodec::EncodeFileStoreKey(id, filename); + break; + case FileType::INODE_SNAPSHOT_PAGEFILE: + *storeKey = + NameSpaceStorageCodec::EncodeSnapShotFileStoreKey(id, filename); + break; + default: + LOG(ERROR) << "filetype: " << filetype << " of " << filename + << " not exist"; + return StoreStatus::InternalError; } return StoreStatus::OK; } diff --git a/src/mds/schedule/copySetScheduler.cpp b/src/mds/schedule/copySetScheduler.cpp index 84b5c84e93..418d86a55f 100644 --- a/src/mds/schedule/copySetScheduler.cpp +++ b/src/mds/schedule/copySetScheduler.cpp @@ -208,12 +208,12 @@ void CopySetScheduler::StatsCopysetDistribute( for (auto &item : distribute) { num += item.second.size(); - if (max == -1 || item.second.size() > max) { + if (max == -1 || static_cast(item.second.size()) > max) { max = item.second.size(); maxcsId = item.first; } - if (min == -1 || item.second.size() < min) { + if (min == -1 || static_cast(item.second.size()) < min) { min = item.second.size(); mincsId = item.first; } @@ -357,7 +357,7 @@ bool CopySetScheduler::CopySetSatisfiyBasicMigrationCond( } // the replica num of copyset is not standard - if (info.peers.size() != + if (static_cast(info.peers.size()) != topo_->GetStandardReplicaNumInLogicalPool(info.id.first)) { return false; } diff --git a/src/mds/schedule/leaderScheduler.cpp b/src/mds/schedule/leaderScheduler.cpp index 4d7cdc455d..2a75e6e14b 100644 --- a/src/mds/schedule/leaderScheduler.cpp +++ b/src/mds/schedule/leaderScheduler.cpp @@ -53,8 +53,8 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { int maxId = -1; int minLeaderCount = -1; int minId = -1; - std::vector csInfos - = topo_->GetChunkServersInLogicalPool(lid); + std::vector csInfos = + topo_->GetChunkServersInLogicalPool(lid); static std::random_device rd; static std::mt19937 g(rd()); std::shuffle(csInfos.begin(), csInfos.end(), g); @@ -65,12 +65,14 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { continue; } - if (maxLeaderCount == -1 || csInfo.leaderCount > maxLeaderCount) { + if (maxLeaderCount == -1 || + static_cast(csInfo.leaderCount) > maxLeaderCount) { maxId = csInfo.info.id; maxLeaderCount = csInfo.leaderCount; } - if (minLeaderCount == -1 || csInfo.leaderCount < minLeaderCount) { + if (minLeaderCount == -1 || + static_cast(csInfo.leaderCount) < minLeaderCount) { // the chunkserver with minLeaderCount and not in coolingTime // can be the transfer target if (!coolingTimeExpired(csInfo.startUpTime)) { @@ -85,9 +87,9 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { << ", maxLeaderCount:" << maxLeaderCount << "), (id:" << minId << ", minleaderCount:" << minLeaderCount << ")"; - // leader scheduling is not required when (maxLeaderCount-minLeaderCount <= 1) //NOLINT - if (maxLeaderCount >= 0 && - minLeaderCount >= 0 && + // leader scheduling is not required when + // (maxLeaderCount-minLeaderCount <= 1) + if (maxLeaderCount >= 0 && minLeaderCount >= 0 && maxLeaderCount - minLeaderCount <= 1) { LOG(INFO) << "leaderScheduler no need to generate transferLeader op"; return oneRoundGenOp; @@ -101,12 +103,12 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { Operator transferLeaderOutOp; CopySetInfo selectedCopySet; if (transferLeaderOut(maxId, maxLeaderCount, lid, &transferLeaderOutOp, - &selectedCopySet)) { + &selectedCopySet)) { if (opController_->AddOperator(transferLeaderOutOp)) { oneRoundGenOp += 1; LOG(INFO) << "leaderScheduler generatre operator " - << transferLeaderOutOp.OpToString() - << " for " << selectedCopySet.CopySetInfoStr() + << transferLeaderOutOp.OpToString() << " for " + << selectedCopySet.CopySetInfoStr() << " from transfer leader out"; return oneRoundGenOp; } @@ -120,12 +122,12 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { Operator transferLeaderInOp; CopySetInfo selectedCopySet; if (transferLeaderIn(minId, minLeaderCount, lid, &transferLeaderInOp, - &selectedCopySet)) { + &selectedCopySet)) { if (opController_->AddOperator(transferLeaderInOp)) { oneRoundGenOp += 1; LOG(INFO) << "leaderScheduler generatre operator " - << transferLeaderInOp.OpToString() - << " for " << selectedCopySet.CopySetInfoStr() + << transferLeaderInOp.OpToString() << " for " + << selectedCopySet.CopySetInfoStr() << " from transfer leader in"; return oneRoundGenOp; } @@ -136,13 +138,14 @@ int LeaderScheduler::DoLeaderSchedule(PoolIdType lid) { } bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, - PoolIdType lid, Operator *op, CopySetInfo *selectedCopySet) { + PoolIdType lid, Operator *op, + CopySetInfo *selectedCopySet) { // find all copyset with source chunkserver as its leader as the candidate std::vector candidateInfos; for (auto &cInfo : topo_->GetCopySetInfosInLogicalPool(lid)) { // skip those copysets that the source is the follower in it if (cInfo.leader != source) { - continue; + continue; } // skip the copyset under configuration changing @@ -162,7 +165,7 @@ bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, while (retryTimes < maxRetryTransferLeader) { // select a copyset from candidates randomly srand((unsigned)time(NULL)); - *selectedCopySet = candidateInfos[rand()%candidateInfos.size()]; + *selectedCopySet = candidateInfos[rand() % candidateInfos.size()]; // choose the chunkserver with least leaders from follower ChunkServerIdType targetId = UNINTIALIZE_ID; uint32_t targetLeaderCount = std::numeric_limits::max(); @@ -199,15 +202,14 @@ bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, } if (targetId == UNINTIALIZE_ID || - count - 1 < targetLeaderCount + 1 || + count - 1 < static_cast(targetLeaderCount + 1) || !coolingTimeExpired(targetStartUpTime)) { retryTimes++; continue; } else { *op = operatorFactory.CreateTransferLeaderOperator( - *selectedCopySet, targetId, OperatorPriority::NormalPriority); - op->timeLimit = - std::chrono::seconds(transTimeSec_); + *selectedCopySet, targetId, OperatorPriority::NormalPriority); + op->timeLimit = std::chrono::seconds(transTimeSec_); return true; } } @@ -216,7 +218,8 @@ bool LeaderScheduler::transferLeaderOut(ChunkServerIdType source, int count, } bool LeaderScheduler::transferLeaderIn(ChunkServerIdType target, int count, - PoolIdType lid, Operator *op, CopySetInfo *selectedCopySet) { + PoolIdType lid, Operator *op, + CopySetInfo *selectedCopySet) { // find the copyset on follower and transfer leader to the target std::vector candidateInfos; for (auto &cInfo : topo_->GetCopySetInfosInLogicalPool(lid)) { @@ -246,7 +249,7 @@ bool LeaderScheduler::transferLeaderIn(ChunkServerIdType target, int count, int retryTimes = 1; while (retryTimes < maxRetryTransferLeader) { // select a copyset randomly from candidates - *selectedCopySet = candidateInfos[rand()%candidateInfos.size()]; + *selectedCopySet = candidateInfos[rand() % candidateInfos.size()]; // fetch the leader number of the leader of the selected copyset and // the target @@ -258,7 +261,7 @@ bool LeaderScheduler::transferLeaderIn(ChunkServerIdType target, int count, continue; } - if (sourceInfo.leaderCount - 1 < count + 1) { + if (static_cast(sourceInfo.leaderCount - 1) < count + 1) { retryTimes++; continue; } @@ -279,7 +282,7 @@ bool LeaderScheduler::copySetHealthy(const CopySetInfo &cInfo) { ChunkServerInfo csInfo; if (!topo_->GetChunkServerInfo(peer.id, &csInfo)) { LOG(ERROR) << "leaderScheduler cannot get info of chukServer:" - << peer.id; + << peer.id; healthy = false; break; } @@ -302,9 +305,7 @@ bool LeaderScheduler::coolingTimeExpired(uint64_t startUpTime) { return tm.tv_sec - startUpTime > chunkserverCoolingTimeSec_; } -int64_t LeaderScheduler::GetRunningInterval() { - return runInterval_; -} +int64_t LeaderScheduler::GetRunningInterval() { return runInterval_; } } // namespace schedule } // namespace mds } // namespace curve diff --git a/src/mds/schedule/operatorTemplate.h b/src/mds/schedule/operatorTemplate.h index 753f9d91b0..cfca1c7adb 100644 --- a/src/mds/schedule/operatorTemplate.h +++ b/src/mds/schedule/operatorTemplate.h @@ -98,6 +98,7 @@ OperatorT::OperatorT( EpochType startEpoch, const CopySetKey &id, OperatorPriority pri, const steady_clock::time_point &timeLimit, std::shared_ptr step) { + (void)timeLimit; this->startEpoch = startEpoch; this->copysetID.first = id.first; this->copysetID.second = id.second; diff --git a/src/mds/schedule/rapidLeaderScheduler.cpp b/src/mds/schedule/rapidLeaderScheduler.cpp index 16ead9f686..a23f30d30b 100644 --- a/src/mds/schedule/rapidLeaderScheduler.cpp +++ b/src/mds/schedule/rapidLeaderScheduler.cpp @@ -149,7 +149,7 @@ ChunkServerIdType RapidLeaderScheduler::SelectTargetPeer( // the replica with least leader number int possibleSelected = MinLeaderNumInCopySetPeers(info, stat); - if (possibleSelected == curChunkServerId) { + if (possibleSelected == static_cast(curChunkServerId)) { return selected; } diff --git a/src/mds/schedule/recoverScheduler.cpp b/src/mds/schedule/recoverScheduler.cpp index 68831ccdeb..7876bd442c 100644 --- a/src/mds/schedule/recoverScheduler.cpp +++ b/src/mds/schedule/recoverScheduler.cpp @@ -77,11 +77,11 @@ int RecoverScheduler::Schedule() { // alarm if over half of the replicas are offline int deadBound = - copysetInfo.peers.size() - (copysetInfo.peers.size()/2 + 1); - if (offlinelists.size() > deadBound) { + copysetInfo.peers.size() - (copysetInfo.peers.size() / 2 + 1); + if (static_cast(offlinelists.size()) > deadBound) { LOG(ERROR) << "recoverSchdeuler find " - << copysetInfo.CopySetInfoStr() - << " has " << offlinelists.size() + << copysetInfo.CopySetInfoStr() << " has " + << offlinelists.size() << " replica offline, cannot repair, please check"; continue; } @@ -90,10 +90,10 @@ int RecoverScheduler::Schedule() { for (auto it = offlinelists.begin(); it != offlinelists.end();) { if (excludes.count(*it) > 0) { LOG(ERROR) << "can not recover offline chunkserver " << *it - << " on " << copysetInfo.CopySetInfoStr() - << ", because it's server has more than " - << chunkserverFailureTolerance_ - << " offline chunkservers"; + << " on " << copysetInfo.CopySetInfoStr() + << ", because it's server has more than " + << chunkserverFailureTolerance_ + << " offline chunkservers"; it = offlinelists.erase(it); } else { ++it; @@ -108,22 +108,21 @@ int RecoverScheduler::Schedule() { Operator fixRes; ChunkServerIdType target; // failed to recover the replica - if (!FixOfflinePeer( - copysetInfo, *offlinelists.begin(), &fixRes, &target)) { + if (!FixOfflinePeer(copysetInfo, *offlinelists.begin(), &fixRes, + &target)) { continue; - // succeeded but failed to add the operator to the controller + // succeeded but failed to add the operator to the controller } else if (!opController_->AddOperator(fixRes)) { LOG(WARNING) << "recover scheduler add operator " - << fixRes.OpToString() << " on " - << copysetInfo.CopySetInfoStr() << " fail"; + << fixRes.OpToString() << " on " + << copysetInfo.CopySetInfoStr() << " fail"; continue; - // succeeded in recovering replica and adding it to the controller + // succeeded in recovering replica and adding it to the controller } else { LOG(INFO) << "recoverScheduler generate operator:" - << fixRes.OpToString() << " for " - << copysetInfo.CopySetInfoStr() - << ", remove offlinePeer: " - << *offlinelists.begin(); + << fixRes.OpToString() << " for " + << copysetInfo.CopySetInfoStr() + << ", remove offlinePeer: " << *offlinelists.begin(); // if the target returned has the initial value, that means offline // replicas are removed directly. if (target == UNINTIALIZE_ID) { @@ -135,10 +134,11 @@ int RecoverScheduler::Schedule() { // should be generated on target. If failed to generate, delete the // operator. if (!topo_->CreateCopySetAtChunkServer(copysetInfo.id, target)) { - LOG(WARNING) << "recoverScheduler create " - << copysetInfo.CopySetInfoStr() - << " on chunkServer: " << target - << " error, delete operator" << fixRes.OpToString(); + LOG(WARNING) + << "recoverScheduler create " + << copysetInfo.CopySetInfoStr() + << " on chunkServer: " << target + << " error, delete operator" << fixRes.OpToString(); opController_->RemoveOperator(copysetInfo.id); continue; } @@ -150,24 +150,21 @@ int RecoverScheduler::Schedule() { return 1; } -int64_t RecoverScheduler::GetRunningInterval() { - return runInterval_; -} +int64_t RecoverScheduler::GetRunningInterval() { return runInterval_; } -bool RecoverScheduler::FixOfflinePeer( - const CopySetInfo &info, ChunkServerIdType peerId, - Operator *op, ChunkServerIdType *target) { +bool RecoverScheduler::FixOfflinePeer(const CopySetInfo &info, + ChunkServerIdType peerId, Operator *op, + ChunkServerIdType *target) { assert(op != nullptr); // check the standard number of replicas first auto standardReplicaNum = topo_->GetStandardReplicaNumInLogicalPool(info.id.first); if (standardReplicaNum <= 0) { - LOG(WARNING) << "RecoverScheduler find logical pool " - << info.id.first << " standard num " - << standardReplicaNum << " invalid"; + LOG(WARNING) << "RecoverScheduler find logical pool " << info.id.first + << " standard num " << standardReplicaNum << " invalid"; return false; } - if (info.peers.size() > standardReplicaNum) { + if (static_cast(info.peers.size()) > standardReplicaNum) { // remove the offline replica *op = operatorFactory.CreateRemovePeerOperator( info, peerId, OperatorPriority::HighPriority); @@ -180,8 +177,9 @@ bool RecoverScheduler::FixOfflinePeer( auto csId = SelectBestPlacementChunkServer(info, peerId); if (csId == UNINTIALIZE_ID) { LOG(WARNING) << "recoverScheduler can not select chunkServer to " - "repair " << info.CopySetInfoStr() - << ", which replica: " << peerId << " is offline"; + "repair " + << info.CopySetInfoStr() << ", which replica: " << peerId + << " is offline"; return false; } else { *op = operatorFactory.CreateChangePeerOperator( @@ -220,17 +218,19 @@ void RecoverScheduler::CalculateExcludesChunkServer( // tolerance threshold. If it does, the chunkservers on this server will not // be recovered for (auto item : unhealthyStateCS) { - if (item.second.size() < chunkserverFailureTolerance_) { + if (static_cast(item.second.size()) < + chunkserverFailureTolerance_) { continue; } - LOG(WARNING) << "server " << item.first << " has " - << item.second.size() << " offline chunkservers"; + LOG(WARNING) << "server " << item.first << " has " << item.second.size() + << " offline chunkservers"; for (auto cs : item.second) { excludes->emplace(cs); } } - // if the chunkserver is in pending status, it will be considered recoverable //NOLINT + // if the chunkserver is in pending status, it will be considered + // recoverable //NOLINT for (auto it : pendingCS) { excludes->erase(it); } @@ -238,4 +238,3 @@ void RecoverScheduler::CalculateExcludesChunkServer( } // namespace schedule } // namespace mds } // namespace curve - diff --git a/src/mds/schedule/scanScheduler.cpp b/src/mds/schedule/scanScheduler.cpp index 72ea4fa902..6f6bc3af48 100644 --- a/src/mds/schedule/scanScheduler.cpp +++ b/src/mds/schedule/scanScheduler.cpp @@ -35,44 +35,44 @@ int ScanScheduler::Schedule() { LOG(INFO) << "ScanScheduler begin."; auto currentHour = ::curve::common::TimeUtility::GetCurrentHour(); - bool duringScanTime = currentHour >= scanStartHour_ && - currentHour <= scanEndHour_; + bool duringScanTime = + currentHour >= scanStartHour_ && currentHour <= scanEndHour_; auto count = 0; ::curve::mds::topology::LogicalPool lpool; auto logicPoolIds = topo_->GetLogicalpools(); - for (const auto& lpid : logicPoolIds) { + for (const auto &lpid : logicPoolIds) { CopySetInfos copysets2start, copysets2cancel; auto copysetInfos = topo_->GetCopySetInfosInLogicalPool(lpid); topo_->GetLogicalPool(lpid, &lpool); if (!duringScanTime || !lpool.ScanEnable()) { - for (const auto& copysetInfo : copysetInfos) { + for (const auto ©setInfo : copysetInfos) { if (StartOrReadyToScan(copysetInfo)) { copysets2cancel.push_back(copysetInfo); } } } else { - SelectCopysetsForScan( - copysetInfos, ©sets2start, ©sets2cancel); + SelectCopysetsForScan(copysetInfos, ©sets2start, + ©sets2cancel); } - count += GenScanOperator(copysets2start, - ConfigChangeType::START_SCAN_PEER); + count += + GenScanOperator(copysets2start, ConfigChangeType::START_SCAN_PEER); count += GenScanOperator(copysets2cancel, ConfigChangeType::CANCEL_SCAN_PEER); } - LOG(INFO) << "ScanScheduelr generate " - << count << " operators at this round"; + LOG(INFO) << "ScanScheduelr generate " << count + << " operators at this round"; return 1; } -bool ScanScheduler::StartOrReadyToScan(const CopySetInfo& copysetInfo) { +bool ScanScheduler::StartOrReadyToScan(const CopySetInfo ©setInfo) { Operator op; if (copysetInfo.scaning) { return true; } else if (opController_->GetOperatorById(copysetInfo.id, &op)) { - auto step = dynamic_cast(op.step.get()); + auto step = dynamic_cast(op.step.get()); return nullptr != step && step->IsStartScanOp(); } else if (copysetInfo.HasCandidate()) { return copysetInfo.configChangeInfo.type() == @@ -82,12 +82,11 @@ bool ScanScheduler::StartOrReadyToScan(const CopySetInfo& copysetInfo) { return false; } -void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos* copysetInfos, - int count, - Selected* selected, - CopySetInfos* copysets2start) { +void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos *copysetInfos, + int count, Selected *selected, + CopySetInfos *copysets2start) { std::sort(copysetInfos->begin(), copysetInfos->end(), - [](const CopySetInfo& a, const CopySetInfo& b) { + [](const CopySetInfo &a, const CopySetInfo &b) { if (a.lastScanSec == b.lastScanSec) { return a.id < b.id; } @@ -95,14 +94,15 @@ void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos* copysetInfos, }); auto nowSec = ::curve::common::TimeUtility::GetTimeofDaySec(); - for (const auto& copysetInfo : *copysetInfos) { + for (const auto ©setInfo : *copysetInfos) { if (nowSec - copysetInfo.lastScanSec < scanIntervalSec_ || count <= 0) { return; } bool succ = true; - for (const auto& peer : copysetInfo.peers) { - if ((*selected)[peer.id] >= scanConcurrentPerChunkserver_) { + for (const auto &peer : copysetInfo.peers) { + if ((*selected)[peer.id] >= + static_cast(scanConcurrentPerChunkserver_)) { succ = false; break; } @@ -112,25 +112,25 @@ void ScanScheduler::SelectCopysetsToStartScan(CopySetInfos* copysetInfos, if (succ) { count--; copysets2start->push_back(copysetInfo); - for (const auto& peer : copysetInfo.peers) { + for (const auto &peer : copysetInfo.peers) { (*selected)[peer.id]++; } } } } -void ScanScheduler::SelectCopysetsToCancelScan(CopySetInfos* copysetInfos, +void ScanScheduler::SelectCopysetsToCancelScan(CopySetInfos *copysetInfos, int count, - CopySetInfos* copysets2cancel) { + CopySetInfos *copysets2cancel) { std::sort(copysetInfos->begin(), copysetInfos->end(), - [](const CopySetInfo& a, const CopySetInfo& b) { + [](const CopySetInfo &a, const CopySetInfo &b) { if (a.scaning == b.scaning) { return a.id < b.id; } return a.scaning == false; }); - for (const auto& copysetInfo : *copysetInfos) { + for (const auto ©setInfo : *copysetInfos) { if (count-- <= 0) { return; } @@ -138,15 +138,15 @@ void ScanScheduler::SelectCopysetsToCancelScan(CopySetInfos* copysetInfos, } } -void ScanScheduler::SelectCopysetsForScan(const CopySetInfos& copysetInfos, - CopySetInfos* copysets2start, - CopySetInfos* copysets2cancel) { +void ScanScheduler::SelectCopysetsForScan(const CopySetInfos ©setInfos, + CopySetInfos *copysets2start, + CopySetInfos *copysets2cancel) { CopySetInfos scaning, nonScan; Selected selected; // scaning chunk server - for (const auto& copysetInfo : copysetInfos) { + for (const auto ©setInfo : copysetInfos) { if (StartOrReadyToScan(copysetInfo)) { scaning.push_back(copysetInfo); - for (const auto& peer : copysetInfo.peers) { + for (const auto &peer : copysetInfo.peers) { selected[peer.id]++; } LOG(INFO) << "Copyset is on scaning: " @@ -168,11 +168,11 @@ void ScanScheduler::SelectCopysetsForScan(const CopySetInfos& copysetInfos, } } -int ScanScheduler::GenScanOperator(const CopySetInfos& copysetInfos, +int ScanScheduler::GenScanOperator(const CopySetInfos ©setInfos, ConfigChangeType opType) { auto count = 0; bool ready2start = (opType == ConfigChangeType::START_SCAN_PEER); - for (auto& copysetInfo : copysetInfos) { + for (auto ©setInfo : copysetInfos) { auto priority = ready2start ? OperatorPriority::LowPriority : OperatorPriority::HighPriority; @@ -182,17 +182,15 @@ int ScanScheduler::GenScanOperator(const CopySetInfos& copysetInfos, auto succ = opController_->AddOperator(op); count += succ ? 1 : 0; - LOG(INFO) << "Generate operator " << op.OpToString() - << " for " << copysetInfo.CopySetInfoStr() + LOG(INFO) << "Generate operator " << op.OpToString() << " for " + << copysetInfo.CopySetInfoStr() << (succ ? " success" : " fail"); } return count; } -int64_t ScanScheduler::GetRunningInterval() { - return runInterval_; -} +int64_t ScanScheduler::GetRunningInterval() { return runInterval_; } } // namespace schedule } // namespace mds diff --git a/src/mds/schedule/scheduleMetricsTemplate.h b/src/mds/schedule/scheduleMetricsTemplate.h index 416d9c278f..3bec2b679c 100644 --- a/src/mds/schedule/scheduleMetricsTemplate.h +++ b/src/mds/schedule/scheduleMetricsTemplate.h @@ -242,6 +242,9 @@ void ScheduleMetricsT< TopoCopySetInfoT>::RemoveUpdateOperatorsMap(const Operator &op, std::string type, IdType target) { + (void)type; + (void)target; + auto findOp = operators.find(op.copysetID); if (findOp == operators.end()) { return; @@ -306,7 +309,7 @@ void ScheduleMetricsT(members.size())) { copysetPeers += hostPort; } else { copysetPeers += hostPort + ","; diff --git a/src/mds/schedule/scheduler.cpp b/src/mds/schedule/scheduler.cpp index 801842a26b..0117b8607b 100644 --- a/src/mds/schedule/scheduler.cpp +++ b/src/mds/schedule/scheduler.cpp @@ -116,7 +116,7 @@ ChunkServerIdType Scheduler::SelectBestPlacementChunkServer( << " invalid"; return UNINTIALIZE_ID; } - if (excludeZones.size() >= standardZoneNum) { + if (static_cast(excludeZones.size()) >= standardZoneNum) { excludeZones.clear(); } @@ -210,7 +210,7 @@ ChunkServerIdType Scheduler::SelectRedundantReplicaToRemove( " replicaNum must >=0, please check"; return UNINTIALIZE_ID; } - if (copySetInfo.peers.size() <= standardReplicaNum) { + if (static_cast(copySetInfo.peers.size()) <= standardReplicaNum) { LOG(ERROR) << "topoAdapter cannot select redundent replica for " << copySetInfo.CopySetInfoStr() << ", beacuse replicaNum " << copySetInfo.peers.size() @@ -242,7 +242,7 @@ ChunkServerIdType Scheduler::SelectRedundantReplicaToRemove( // 1. alarm if the zone number is lass than the standard // TODO(lixiaocui): adjust by adding or deleting replica in this case - if (zoneList.size() < standardZoneNum) { + if (static_cast(zoneList.size()) < standardZoneNum) { LOG(ERROR) << "topoAdapter find " << copySetInfo.CopySetInfoStr() << " replicas distribute in " << zoneList.size() << " zones, less than standard zoneNum " @@ -265,7 +265,7 @@ ChunkServerIdType Scheduler::SelectRedundantReplicaToRemove( std::vector candidateChunkServer; for (auto item : zoneList) { if (item.second.size() == 1) { - if (zoneList.size() == standardZoneNum) { + if (static_cast(zoneList.size()) == standardZoneNum) { continue; } } diff --git a/src/mds/schedule/scheduler_helper.cpp b/src/mds/schedule/scheduler_helper.cpp index f17f303c04..3e0fa106c0 100644 --- a/src/mds/schedule/scheduler_helper.cpp +++ b/src/mds/schedule/scheduler_helper.cpp @@ -139,7 +139,7 @@ bool SchedulerHelper::SatisfyZoneAndScatterWidthLimit( zoneList[targetZone] += 1; } - if (zoneList.size() < minZone) { + if (static_cast(zoneList.size()) < minZone) { return false; } diff --git a/src/mds/schedule/topoAdapter.h b/src/mds/schedule/topoAdapter.h index d9e74259e8..348cf95fa2 100644 --- a/src/mds/schedule/topoAdapter.h +++ b/src/mds/schedule/topoAdapter.h @@ -148,7 +148,7 @@ struct CopySetInfo { struct ChunkServerInfo { public: ChunkServerInfo() : - leaderCount(0), diskCapacity(0), diskUsed(0), startUpTime(0) {} + startUpTime(0), leaderCount(0), diskCapacity(0), diskUsed(0) {} ChunkServerInfo(const PeerInfo &info, OnlineState state, DiskState diskState, ChunkServerStatus status, uint32_t leaderCount, uint64_t capacity, uint64_t used, diff --git a/src/mds/server/mds.cpp b/src/mds/server/mds.cpp index 7085ce1e25..932537a207 100644 --- a/src/mds/server/mds.cpp +++ b/src/mds/server/mds.cpp @@ -244,7 +244,7 @@ void MDS::InitEtcdClient(const EtcdConf& etcdConf, auto res = etcdClient_->Init(etcdConf, etcdTimeout, retryTimes); LOG_IF(FATAL, res != EtcdErrCode::EtcdOK) << "init etcd client err! " - << "etcdaddr: " << std::string{etcdConf.Endpoints, etcdConf.len} + << "etcdaddr: " << std::string(etcdConf.Endpoints, etcdConf.len) << ", etcdaddr len: " << etcdConf.len << ", etcdtimeout: " << etcdConf.DialTimeout << ", operation timeout: " << etcdTimeout @@ -258,7 +258,7 @@ void MDS::InitEtcdClient(const EtcdConf& etcdConf, << "Run mds err. Check if etcd is running."; LOG(INFO) << "init etcd client ok! " - << "etcdaddr: " << std::string{etcdConf.Endpoints, etcdConf.len} + << "etcdaddr: " << std::string(etcdConf.Endpoints, etcdConf.len) << ", etcdaddr len: " << etcdConf.len << ", etcdtimeout: " << etcdConf.DialTimeout << ", operation timeout: " << etcdTimeout @@ -510,7 +510,6 @@ void MDS::InitCurveFSOptions(CurveFSOption *curveFSOptions) { "mds.curvefs.minFileLength", &curveFSOptions->minFileLength); conf_->GetValueFatalIfFail( "mds.curvefs.maxFileLength", &curveFSOptions->maxFileLength); - FileRecordOptions fileRecordOptions; InitFileRecordOptions(&curveFSOptions->fileRecordOptions); RootAuthOption authOptions; diff --git a/src/mds/snapshotcloneclient/snapshotclone_client.cpp b/src/mds/snapshotcloneclient/snapshotclone_client.cpp index 3f76d111a7..c8a04f41e2 100644 --- a/src/mds/snapshotcloneclient/snapshotclone_client.cpp +++ b/src/mds/snapshotcloneclient/snapshotclone_client.cpp @@ -24,32 +24,32 @@ #include #include -using curve::snapshotcloneserver::kServiceName; +#include + using curve::snapshotcloneserver::kActionStr; -using curve::snapshotcloneserver::kGetCloneRefStatusAction; -using curve::snapshotcloneserver::kVersionStr; -using curve::snapshotcloneserver::kUserStr; -using curve::snapshotcloneserver::kSourceStr; -using curve::snapshotcloneserver::kCodeStr; -using curve::snapshotcloneserver::kRefStatusStr; -using curve::snapshotcloneserver::kTotalCountStr; using curve::snapshotcloneserver::kCloneFileInfoStr; +using curve::snapshotcloneserver::kCodeStr; using curve::snapshotcloneserver::kFileStr; +using curve::snapshotcloneserver::kGetCloneRefStatusAction; using curve::snapshotcloneserver::kInodeStr; +using curve::snapshotcloneserver::kRefStatusStr; +using curve::snapshotcloneserver::kServiceName; +using curve::snapshotcloneserver::kSourceStr; +using curve::snapshotcloneserver::kTotalCountStr; +using curve::snapshotcloneserver::kUserStr; +using curve::snapshotcloneserver::kVersionStr; namespace curve { namespace mds { namespace snapshotcloneclient { -StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, - std::string user, - CloneRefStatus *status, - std::vector *fileCheckList) { +StatusCode SnapshotCloneClient::GetCloneRefStatus( + std::string filename, std::string user, CloneRefStatus *status, + std::vector *fileCheckList) { if (!inited_) { LOG(WARNING) << "GetCloneRefStatus, snapshot clone server not inited" - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::kSnapshotCloneServerNotInit; } @@ -57,17 +57,13 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, brpc::ChannelOptions option; option.protocol = "http"; - std::string url = addr_ - + "/" + kServiceName + "?" - + kActionStr+ "=" + kGetCloneRefStatusAction + "&" - + kVersionStr + "=1&" - + kUserStr + "=" + user + "&" - + kSourceStr + "=" + filename; + std::string url = addr_ + "/" + kServiceName + "?" + kActionStr + "=" + + kGetCloneRefStatusAction + "&" + kVersionStr + "=1&" + + kUserStr + "=" + user + "&" + kSourceStr + "=" + filename; if (channel.Init(url.c_str(), "", &option) != 0) { LOG(ERROR) << "GetCloneRefStatus, Fail to init channel, url is " << url - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::kSnapshotCloneConnectFail; } @@ -77,8 +73,7 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, channel.CallMethod(NULL, &cntl, NULL, NULL, NULL); if (cntl.Failed()) { LOG(ERROR) << "GetCloneRefStatus, CallMethod faile, errMsg :" - << cntl.ErrorText() - << ", filename = " << filename + << cntl.ErrorText() << ", filename = " << filename << ", user = " << user; return StatusCode::KInternalError; } @@ -86,12 +81,16 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, std::stringstream ss; ss << cntl.response_attachment(); std::string data = ss.str(); - Json::Reader jsonReader; + + Json::CharReaderBuilder jsonBuilder; + std::unique_ptr jsonReader(jsonBuilder.newCharReader()); Json::Value jsonObj; - if (!jsonReader.parse(data, jsonObj)) { + JSONCPP_STRING errormsg; + if (!jsonReader->parse(data.data(), data.data() + data.length(), &jsonObj, + &errormsg)) { LOG(ERROR) << "GetCloneRefStatus, parse json fail, data = " << data - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user + << ", error = " << errormsg; return StatusCode::KInternalError; } @@ -100,27 +99,24 @@ StatusCode SnapshotCloneClient::GetCloneRefStatus(std::string filename, std::string requestCode = jsonObj[kCodeStr].asCString(); if (requestCode != "0") { LOG(ERROR) << "GetCloneRefStatus, Code is not 0, data = " << data - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::KInternalError; } CloneRefStatus tempStatus = - static_cast(jsonObj[kRefStatusStr].asInt()); + static_cast(jsonObj[kRefStatusStr].asInt()); *status = tempStatus; - if (tempStatus == CloneRefStatus::kNoRef - || tempStatus == CloneRefStatus::kHasRef) { + if (tempStatus == CloneRefStatus::kNoRef || + tempStatus == CloneRefStatus::kHasRef) { return StatusCode::kOK; } if (tempStatus != CloneRefStatus::kNeedCheck) { LOG(ERROR) << "GetCloneRefStatus, invalid status, data = " << data - << ", filename = " << filename - << ", user = " << user; + << ", filename = " << filename << ", user = " << user; return StatusCode::KInternalError; } - int totalCount = jsonObj[kTotalCountStr].asInt(); int listSize = jsonObj[kCloneFileInfoStr].size(); for (int i = 0; i < listSize; i++) { DestFileInfo file; @@ -138,10 +134,7 @@ void SnapshotCloneClient::Init(const SnapshotCloneClientOption &option) { } } -bool SnapshotCloneClient::GetInitStatus() { - return inited_; -} +bool SnapshotCloneClient::GetInitStatus() { return inited_; } } // namespace snapshotcloneclient } // namespace mds } // namespace curve - diff --git a/src/mds/topology/topology_chunk_allocator.cpp b/src/mds/topology/topology_chunk_allocator.cpp index bf07557172..a86fe52b5e 100644 --- a/src/mds/topology/topology_chunk_allocator.cpp +++ b/src/mds/topology/topology_chunk_allocator.cpp @@ -38,13 +38,11 @@ namespace topology { // logical pool is not designated when calling this function. When executing, // a logical will be chosen following the policy (randomly or weighted) bool TopologyChunkAllocatorImpl::AllocateChunkRandomInSingleLogicalPool( - curve::mds::FileType fileType, - uint32_t chunkNumber, - ChunkSizeType chunkSize, - std::vector *infos) { + curve::mds::FileType fileType, uint32_t chunkNumber, + ChunkSizeType chunkSize, std::vector *infos) { + (void)chunkSize; if (fileType != INODE_PAGEFILE) { - LOG(ERROR) << "Invalid FileType, fileType = " - << fileType; + LOG(ERROR) << "Invalid FileType, fileType = " << fileType; return false; } PoolIdType logicalPoolChosenId = 0; @@ -54,8 +52,8 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRandomInSingleLogicalPool( return false; } - CopySetFilter filter = [](const CopySetInfo& copyset) { - return copyset.IsAvailable(); + CopySetFilter filter = [](const CopySetInfo ©set) { + return copyset.IsAvailable(); }; std::vector copySetIds = topology_->GetCopySetsInLogicalPool(logicalPoolChosenId, filter); @@ -67,21 +65,16 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRandomInSingleLogicalPool( return false; } ret = AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - logicalPoolChosenId, - chunkNumber, - infos); + copySetIds, logicalPoolChosenId, chunkNumber, infos); return ret; } bool TopologyChunkAllocatorImpl::AllocateChunkRoundRobinInSingleLogicalPool( - curve::mds::FileType fileType, - uint32_t chunkNumber, - ChunkSizeType chunkSize, - std::vector *infos) { + curve::mds::FileType fileType, uint32_t chunkNumber, + ChunkSizeType chunkSize, std::vector *infos) { + (void)chunkSize; if (fileType != INODE_PAGEFILE) { - LOG(ERROR) << "Invalid FileType, fileType = " - << fileType; + LOG(ERROR) << "Invalid FileType, fileType = " << fileType; return false; } PoolIdType logicalPoolChosenId = 0; @@ -91,8 +84,8 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRoundRobinInSingleLogicalPool( return false; } - CopySetFilter filter = [](const CopySetInfo& copyset) { - return copyset.IsAvailable(); + CopySetFilter filter = [](const CopySetInfo ©set) { + return copyset.IsAvailable(); }; std::vector copySetIds = topology_->GetCopySetsInLogicalPool(logicalPoolChosenId, filter); @@ -120,11 +113,7 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRoundRobinInSingleLogicalPool( } ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - logicalPoolChosenId, - &nextIndex, - chunkNumber, - infos); + copySetIds, logicalPoolChosenId, &nextIndex, chunkNumber, infos); if (ret) { nextIndexMap_[logicalPoolChosenId] = nextIndex; } @@ -132,8 +121,7 @@ bool TopologyChunkAllocatorImpl::AllocateChunkRoundRobinInSingleLogicalPool( } bool TopologyChunkAllocatorImpl::ChooseSingleLogicalPool( - curve::mds::FileType fileType, - PoolIdType *poolOut) { + curve::mds::FileType fileType, PoolIdType *poolOut) { std::vector logicalPools; LogicalPoolType poolType; @@ -149,12 +137,11 @@ bool TopologyChunkAllocatorImpl::ChooseSingleLogicalPool( break; } - auto logicalPoolFilter = - [poolType, this] (const LogicalPool &pool) { + auto logicalPoolFilter = [poolType, this](const LogicalPool &pool) { return pool.GetLogicalPoolAvaliableFlag() && - (!this->enableLogicalPoolStatus_ || - AllocateStatus::ALLOW == pool.GetStatus()) && - pool.GetLogicalPoolType() == poolType; + (!this->enableLogicalPoolStatus_ || + AllocateStatus::ALLOW == pool.GetStatus()) && + pool.GetLogicalPoolType() == poolType; }; logicalPools = topology_->GetLogicalPoolInCluster(logicalPoolFilter); @@ -181,15 +168,15 @@ bool TopologyChunkAllocatorImpl::ChooseSingleLogicalPool( return AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( poolWeightMap, poolOut); } else { - return AllocateChunkPolicy::ChooseSingleLogicalPoolRandom( - poolToChoose, poolOut); + return AllocateChunkPolicy::ChooseSingleLogicalPoolRandom(poolToChoose, + poolOut); } } void TopologyChunkAllocatorImpl::GetRemainingSpaceInLogicalPool( - const std::vector& logicalPools, - std::map* enoughSpacePools) { - for (auto pid : logicalPools) { + const std::vector &logicalPools, + std::map *enoughSpacePools) { + for (auto pid : logicalPools) { LogicalPool lPool; if (!topology_->GetLogicalPool(pid, &lPool)) { continue; @@ -202,9 +189,9 @@ void TopologyChunkAllocatorImpl::GetRemainingSpaceInLogicalPool( double available = available_; if (chunkFilePoolAllocHelp_->GetUseChunkFilepool()) { topoStat_->GetChunkPoolSize(lPool.GetPhysicalPoolId(), - &diskCapacity); - available = available * - chunkFilePoolAllocHelp_->GetAvailable() / 100; + &diskCapacity); + available = + available * chunkFilePoolAllocHelp_->GetAvailable() / 100; diskCapacity = diskCapacity * available / 100; } else { diskCapacity = pPool.GetDiskCapacity(); @@ -221,24 +208,21 @@ void TopologyChunkAllocatorImpl::GetRemainingSpaceInLogicalPool( alloc *= lPool.GetReplicaNum(); // calculate remaining capacity - uint64_t diskRemainning = - (diskCapacity > alloc) ? diskCapacity - alloc : 0; + uint64_t diskRemainning = (static_cast(diskCapacity) > alloc) + ? diskCapacity - alloc + : 0; LOG(INFO) << "ChooseSingleLogicalPool find pool {" - << "diskCapacity:" << diskCapacity - << ", diskAlloc:" << alloc - << ", diskRemainning:" << diskRemainning - << "}"; + << "diskCapacity:" << diskCapacity << ", diskAlloc:" << alloc + << ", diskRemainning:" << diskRemainning << "}"; if (diskRemainning > 0) { (*enoughSpacePools)[pid] = diskRemainning; } } } bool AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t chunkNumber, - std::vector *infos) { + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t chunkNumber, std::vector *infos) { infos->clear(); static std::random_device rd; // generating seed for random number engine @@ -256,10 +240,8 @@ bool AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( } bool AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t *nextIndex, - uint32_t chunkNumber, + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t *nextIndex, uint32_t chunkNumber, std::vector *infos) { if (copySetIds.empty()) { return false; @@ -279,8 +261,7 @@ bool AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( } bool AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - const std::map &poolWeightMap, - PoolIdType *poolIdOut) { + const std::map &poolWeightMap, PoolIdType *poolIdOut) { if (poolWeightMap.empty()) { LOG(ERROR) << "ChooseSingleLogicalPoolByWeight, " << "poolWeightMap is empty."; @@ -316,8 +297,7 @@ bool AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( } bool AllocateChunkPolicy::ChooseSingleLogicalPoolRandom( - const std::vector &pools, - PoolIdType *poolIdOut) { + const std::vector &pools, PoolIdType *poolIdOut) { if (pools.empty()) { LOG(ERROR) << "ChooseSingleLogicalPoolRandom, " << "pools is empty."; diff --git a/src/mds/topology/topology_chunk_allocator.h b/src/mds/topology/topology_chunk_allocator.h index 9f9771f5fd..c4a6e593d4 100644 --- a/src/mds/topology/topology_chunk_allocator.h +++ b/src/mds/topology/topology_chunk_allocator.h @@ -49,18 +49,18 @@ enum class ChoosePoolPolicy { class ChunkFilePoolAllocHelp { public: ChunkFilePoolAllocHelp() - : ChunkFilePoolPoolWalReserve(0), - useChunkFilepool(false), - useChunkFilePoolAsWalPool(false) {} + : useChunkFilepool(false), useChunkFilePoolAsWalPool(false), + ChunkFilePoolPoolWalReserve(0) {} ~ChunkFilePoolAllocHelp() {} - void UpdateChunkFilePoolAllocConfig(bool useChunkFilepool_, - bool useChunkFilePoolAsWalPool_, - uint32_t useChunkFilePoolAsWalPoolReserve_) { + void + UpdateChunkFilePoolAllocConfig(bool useChunkFilepool_, + bool useChunkFilePoolAsWalPool_, + uint32_t useChunkFilePoolAsWalPoolReserve_) { useChunkFilepool.store(useChunkFilepool_, std::memory_order_release); useChunkFilePoolAsWalPool.store(useChunkFilePoolAsWalPool_, - std::memory_order_release); + std::memory_order_release); ChunkFilePoolPoolWalReserve.store(useChunkFilePoolAsWalPoolReserve_, - std::memory_order_release); + std::memory_order_release); } bool GetUseChunkFilepool() { return useChunkFilepool.load(std::memory_order_acquire); @@ -68,8 +68,8 @@ class ChunkFilePoolAllocHelp { // After removing the reserved space, the remaining percentage uint32_t GetAvailable() { if (useChunkFilePoolAsWalPool.load(std::memory_order_acquire)) { - return 100 - ChunkFilePoolPoolWalReserve.load( - std::memory_order_acquire); + return 100 - + ChunkFilePoolPoolWalReserve.load(std::memory_order_acquire); } else { return 100; } @@ -80,7 +80,7 @@ class ChunkFilePoolAllocHelp { std::atomic useChunkFilepool; std::atomic useChunkFilePoolAsWalPool; // Reserve extra space for walpool - std::atomic ChunkFilePoolPoolWalReserve; + std::atomic ChunkFilePoolPoolWalReserve; }; class TopologyChunkAllocator { @@ -88,18 +88,14 @@ class TopologyChunkAllocator { TopologyChunkAllocator() {} virtual ~TopologyChunkAllocator() {} virtual bool AllocateChunkRandomInSingleLogicalPool( - ::curve::mds::FileType fileType, - uint32_t chunkNumer, - ChunkSizeType chunkSize, - std::vector *infos) = 0; + ::curve::mds::FileType fileType, uint32_t chunkNumer, + ChunkSizeType chunkSize, std::vector *infos) = 0; virtual bool AllocateChunkRoundRobinInSingleLogicalPool( - ::curve::mds::FileType fileType, - uint32_t chunkNumer, - ChunkSizeType chunkSize, - std::vector *infos) = 0; + ::curve::mds::FileType fileType, uint32_t chunkNumer, + ChunkSizeType chunkSize, std::vector *infos) = 0; virtual void GetRemainingSpaceInLogicalPool( - const std::vector& logicalPools, - std::map* remianingSpace) = 0; + const std::vector &logicalPools, + std::map *remianingSpace) = 0; virtual void UpdateChunkFilePoolAllocConfig( bool useChunkFilepool_, bool useChunkFilePoolAsWalPool_, uint32_t useChunkFilePoolAsWalPoolReserve_) = 0; @@ -107,18 +103,18 @@ class TopologyChunkAllocator { class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { public: - TopologyChunkAllocatorImpl(std::shared_ptr topology, + TopologyChunkAllocatorImpl( + std::shared_ptr topology, std::shared_ptr allocStatistic, std::shared_ptr topologyStat, std::shared_ptr ChunkFilePoolAllocHelp, const TopologyOption &option) - : topology_(topology), - allocStatistic_(allocStatistic), - topoStat_(topologyStat), - chunkFilePoolAllocHelp_(ChunkFilePoolAllocHelp), - available_(option.PoolUsagePercentLimit), - policy_(static_cast(option.choosePoolPolicy)), - enableLogicalPoolStatus_(option.enableLogicalPoolStatus) { + : topology_(topology), allocStatistic_(allocStatistic), + available_(option.PoolUsagePercentLimit), + topoStat_(topologyStat), + chunkFilePoolAllocHelp_(ChunkFilePoolAllocHelp), + policy_(static_cast(option.choosePoolPolicy)), + enableLogicalPoolStatus_(option.enableLogicalPoolStatus) { std::srand(std::time(nullptr)); } ~TopologyChunkAllocatorImpl() {} @@ -136,10 +132,8 @@ class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { * @retval false if failed */ bool AllocateChunkRandomInSingleLogicalPool( - curve::mds::FileType fileType, - uint32_t chunkNumber, - ChunkSizeType chunkSize, - std::vector *infos) override; + curve::mds::FileType fileType, uint32_t chunkNumber, + ChunkSizeType chunkSize, std::vector *infos) override; /** * @brief allocate chunks by round robin in a single logical pool @@ -153,20 +147,18 @@ class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { * @retval false if failed */ bool AllocateChunkRoundRobinInSingleLogicalPool( - curve::mds::FileType fileType, - uint32_t chunkNumber, - ChunkSizeType chunkSize, - std::vector *infos) override; + curve::mds::FileType fileType, uint32_t chunkNumber, + ChunkSizeType chunkSize, std::vector *infos) override; void GetRemainingSpaceInLogicalPool( - const std::vector& logicalPools, - std::map* remianingSpace) override; - void UpdateChunkFilePoolAllocConfig(bool useChunkFilepool_, - bool useChunkFilePoolAsWalPool_, - uint32_t useChunkFilePoolAsWalPoolReserve_) override { - chunkFilePoolAllocHelp_->UpdateChunkFilePoolAllocConfig( - useChunkFilepool_, useChunkFilePoolAsWalPool_, - useChunkFilePoolAsWalPoolReserve_); - } + const std::vector &logicalPools, + std::map *remianingSpace) override; + void UpdateChunkFilePoolAllocConfig( + bool useChunkFilepool_, bool useChunkFilePoolAsWalPool_, + uint32_t useChunkFilePoolAsWalPoolReserve_) override { + chunkFilePoolAllocHelp_->UpdateChunkFilePoolAllocConfig( + useChunkFilepool_, useChunkFilePoolAsWalPool_, + useChunkFilePoolAsWalPoolReserve_); + } private: /** @@ -179,7 +171,7 @@ class TopologyChunkAllocatorImpl : public TopologyChunkAllocator { * @retval false if failed */ bool ChooseSingleLogicalPool(curve::mds::FileType fileType, - PoolIdType *poolOut); + PoolIdType *poolOut); private: std::shared_ptr topology_; @@ -230,10 +222,8 @@ class AllocateChunkPolicy { * @retval false if failed */ static bool AllocateChunkRandomInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t chunkNumber, - std::vector *infos); + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t chunkNumber, std::vector *infos); /** * @brief allocate chunks by round robin in a single logical pool @@ -253,10 +243,8 @@ class AllocateChunkPolicy { * @retval false if failed */ static bool AllocateChunkRoundRobinInSingleLogicalPool( - std::vector copySetIds, - PoolIdType logicalPoolId, - uint32_t *nextIndex, - uint32_t chunkNumber, + std::vector copySetIds, PoolIdType logicalPoolId, + uint32_t *nextIndex, uint32_t chunkNumber, std::vector *infos); /** @@ -281,13 +269,12 @@ class AllocateChunkPolicy { * @retval true if succeeded * @retval false if failed */ - static bool ChooseSingleLogicalPoolRandom( - const std::vector &pools, - PoolIdType *poolIdOut); + static bool + ChooseSingleLogicalPoolRandom(const std::vector &pools, + PoolIdType *poolIdOut); }; - } // namespace topology } // namespace mds } // namespace curve diff --git a/src/mds/topology/topology_item.cpp b/src/mds/topology/topology_item.cpp index 951ca03d23..f4278aedc8 100644 --- a/src/mds/topology/topology_item.cpp +++ b/src/mds/topology/topology_item.cpp @@ -24,6 +24,7 @@ #include #include +#include #include "json/json.h" #include "src/common/string_util.h" @@ -47,51 +48,57 @@ bool ClusterInformation::ParseFromString(const std::string &value) { } bool LogicalPool::TransRedundanceAndPlaceMentPolicyFromJsonStr( - const std::string &jsonStr, - LogicalPoolType type, + const std::string &jsonStr, LogicalPoolType type, RedundanceAndPlaceMentPolicy *rap) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value rapJson; - if (!reader.parse(jsonStr, rapJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + &rapJson, &errormsg)) { return false; } switch (type) { - case LogicalPoolType::PAGEFILE: { - if (!rapJson["replicaNum"].isNull()) { - rap->pageFileRAP.replicaNum = rapJson["replicaNum"].asInt(); - } else { - return false; - } - if (!rapJson["copysetNum"].isNull()) { - rap->pageFileRAP.copysetNum = rapJson["copysetNum"].asInt(); - } else { - return false; - } - if (!rapJson["zoneNum"].isNull()) { - rap->pageFileRAP.zoneNum = rapJson["zoneNum"].asInt(); - } else { - return false; - } - break; - } - case LogicalPoolType::APPENDFILE: { - // TODO(xuchaojie): it is not done. + case LogicalPoolType::PAGEFILE: { + if (!rapJson["replicaNum"].isNull()) { + rap->pageFileRAP.replicaNum = rapJson["replicaNum"].asInt(); + } else { return false; } - case LogicalPoolType::APPENDECFILE: { - // TODO(xuchaojie): it is not done. + if (!rapJson["copysetNum"].isNull()) { + rap->pageFileRAP.copysetNum = rapJson["copysetNum"].asInt(); + } else { return false; } - default: { + if (!rapJson["zoneNum"].isNull()) { + rap->pageFileRAP.zoneNum = rapJson["zoneNum"].asInt(); + } else { return false; } + break; + } + case LogicalPoolType::APPENDFILE: { + // TODO(xuchaojie): it is not done. + return false; + } + case LogicalPoolType::APPENDECFILE: { + // TODO(xuchaojie): it is not done. + return false; + } + default: { + return false; + } } return true; } -bool LogicalPool::TransUserPolicyFromJsonStr( - const std::string &jsonStr, LogicalPoolType type, UserPolicy *policy) { +bool LogicalPool::TransUserPolicyFromJsonStr(const std::string &jsonStr, + LogicalPoolType type, + UserPolicy *policy) { + (void)jsonStr; + (void)type; + (void)policy; // TODO(xuchaojie): to finish it. return true; } @@ -99,32 +106,30 @@ bool LogicalPool::TransUserPolicyFromJsonStr( bool LogicalPool::SetRedundanceAndPlaceMentPolicyByJson( const std::string &jsonStr) { return LogicalPool::TransRedundanceAndPlaceMentPolicyFromJsonStr( - jsonStr, - GetLogicalPoolType(), - &rap_); + jsonStr, GetLogicalPoolType(), &rap_); } std::string LogicalPool::GetRedundanceAndPlaceMentPolicyJsonStr() const { std::string rapStr; Json::Value rapJson; switch (GetLogicalPoolType()) { - case LogicalPoolType::PAGEFILE : { - rapJson["replicaNum"] = rap_.pageFileRAP.replicaNum; - rapJson["copysetNum"] = rap_.pageFileRAP.copysetNum; - rapJson["zoneNum"] = rap_.pageFileRAP.zoneNum; - rapStr = rapJson.toStyledString(); - break; - } - case LogicalPoolType::APPENDFILE : { - // TODO(xuchaojie): fix it - break; - } - case LogicalPoolType::APPENDECFILE : { - // TODO(xuchaojie): fix it - break; - } - default: - break; + case LogicalPoolType::PAGEFILE: { + rapJson["replicaNum"] = rap_.pageFileRAP.replicaNum; + rapJson["copysetNum"] = rap_.pageFileRAP.copysetNum; + rapJson["zoneNum"] = rap_.pageFileRAP.zoneNum; + rapStr = rapJson.toStyledString(); + break; + } + case LogicalPoolType::APPENDFILE: { + // TODO(xuchaojie): fix it + break; + } + case LogicalPoolType::APPENDECFILE: { + // TODO(xuchaojie): fix it + break; + } + default: + break; } return rapStr; } @@ -132,9 +137,7 @@ std::string LogicalPool::GetRedundanceAndPlaceMentPolicyJsonStr() const { bool LogicalPool::SetUserPolicyByJson(const std::string &jsonStr) { return LogicalPool::TransUserPolicyFromJsonStr( - jsonStr, - GetLogicalPoolType(), - &policy_); + jsonStr, GetLogicalPoolType(), &policy_); } std::string LogicalPool::GetUserPolicyJsonStr() const { @@ -145,20 +148,20 @@ std::string LogicalPool::GetUserPolicyJsonStr() const { uint16_t LogicalPool::GetReplicaNum() const { uint16_t ret = 0; switch (GetLogicalPoolType()) { - case LogicalPoolType::PAGEFILE : { - ret = rap_.pageFileRAP.replicaNum; - break; - } - case LogicalPoolType::APPENDFILE : { - // TODO(xuchaojie): fix it - break; - } - case LogicalPoolType::APPENDECFILE : { - // TODO(xuchaojie): fix it - break; - } - default: - break; + case LogicalPoolType::PAGEFILE: { + ret = rap_.pageFileRAP.replicaNum; + break; + } + case LogicalPoolType::APPENDFILE: { + // TODO(xuchaojie): fix it + break; + } + case LogicalPoolType::APPENDECFILE: { + // TODO(xuchaojie): fix it + break; + } + default: + break; } return ret; } @@ -187,10 +190,8 @@ bool LogicalPool::ParseFromString(const std::string &value) { name_ = data.logicalpoolname(); physicalPoolId_ = data.physicalpoolid(); type_ = data.type(); - SetRedundanceAndPlaceMentPolicyByJson( - data.redundanceandplacementpolicy()); - SetUserPolicyByJson( - data.userpolicy()); + SetRedundanceAndPlaceMentPolicyByJson(data.redundanceandplacementpolicy()); + SetUserPolicyByJson(data.userpolicy()); initialScatterWidth_ = data.initialscatterwidth(); createTime_ = data.createtime(); status_ = data.status(); @@ -309,13 +310,17 @@ std::string CopySetInfo::GetCopySetMembersStr() const { } bool CopySetInfo::SetCopySetMembersByJson(const std::string &jsonStr) { - Json::Reader reader; + Json::CharReaderBuilder builder; + std::unique_ptr reader(builder.newCharReader()); Json::Value copysetMemJson; - if (!reader.parse(jsonStr, copysetMemJson)) { + JSONCPP_STRING errormsg; + if (!reader->parse(jsonStr.data(), jsonStr.data() + jsonStr.length(), + ©setMemJson, &errormsg)) { return false; } + peers_.clear(); - for (int i = 0; i < copysetMemJson.size(); i++) { + for (int i = 0; i < static_cast(copysetMemJson.size()); i++) { if (copysetMemJson[i].isInt()) { peers_.insert(copysetMemJson[i].asInt()); } else { @@ -355,16 +360,13 @@ bool CopySetInfo::ParseFromString(const std::string &value) { peers_.insert(data.chunkserverids(i)); } lastScanSec_ = data.has_lastscansec() ? data.lastscansec() : 0; - lastScanConsistent_ = data.has_lastscanconsistent() ? - data.lastscanconsistent() : true; + lastScanConsistent_ = + data.has_lastscanconsistent() ? data.lastscanconsistent() : true; return ret; } -bool SplitPeerId( - const std::string &peerId, - std::string *ip, - uint32_t *port, - uint32_t *idx) { +bool SplitPeerId(const std::string &peerId, std::string *ip, uint32_t *port, + uint32_t *idx) { std::vector items; curve::common::SplitString(peerId, ":", &items); if (3 == items.size()) { diff --git a/src/mds/topology/topology_service_manager.cpp b/src/mds/topology/topology_service_manager.cpp index 1abd39f85e..100c91632f 100644 --- a/src/mds/topology/topology_service_manager.cpp +++ b/src/mds/topology/topology_service_manager.cpp @@ -347,6 +347,7 @@ void TopologyServiceManager::GetChunkServer( void TopologyServiceManager::GetChunkServerInCluster( const GetChunkServerInClusterRequest *request, GetChunkServerInClusterResponse *response) { + (void)request; response->set_statuscode(kTopoErrCodeSuccess); auto chunkserverIds = topology_->GetChunkServerInCluster(); for (const auto id : chunkserverIds) { @@ -863,6 +864,7 @@ void TopologyServiceManager::GetPhysicalPool(const PhysicalPoolRequest *request, void TopologyServiceManager::ListPhysicalPool( const ListPhysicalPoolRequest *request, ListPhysicalPoolResponse *response) { + (void)request; response->set_statuscode(kTopoErrCodeSuccess); auto poolList = topology_->GetPhysicalPoolInCluster(); for (PoolIdType id : poolList) { @@ -1570,6 +1572,7 @@ void TopologyServiceManager::GetCopyset(const GetCopysetRequest* request, void TopologyServiceManager::GetClusterInfo( const GetClusterInfoRequest* request, GetClusterInfoResponse* response) { + (void)request; ClusterInformation info; if (topology_->GetClusterInfo(&info)) { response->set_statuscode(kTopoErrCodeSuccess); @@ -1599,6 +1602,7 @@ void TopologyServiceManager::SetCopysetsAvailFlag( void TopologyServiceManager::ListUnAvailCopySets( const ListUnAvailCopySetsRequest* request, ListUnAvailCopySetsResponse* response) { + (void)request; std::vector copysets = topology_->GetCopySetsInCluster(); for (const CopySetKey& copyset : copysets) { diff --git a/src/mds/topology/topology_storge_etcd.cpp b/src/mds/topology/topology_storge_etcd.cpp index 51f15e4929..7ff793877a 100644 --- a/src/mds/topology/topology_storge_etcd.cpp +++ b/src/mds/topology/topology_storge_etcd.cpp @@ -45,7 +45,7 @@ bool TopologyStorageEtcd::LoadLogicalPool( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { LogicalPool data; errCode = codec_->DecodeLogicalPoolData(out[i], &data); if (!errCode) { @@ -82,7 +82,7 @@ bool TopologyStorageEtcd::LoadPhysicalPool( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { PhysicalPool data; errCode = codec_->DecodePhysicalPoolData(out[i], &data); if (!errCode) { @@ -118,7 +118,7 @@ bool TopologyStorageEtcd::LoadZone( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { Zone data; errCode = codec_->DecodeZoneData(out[i], &data); if (!errCode) { @@ -154,7 +154,7 @@ bool TopologyStorageEtcd::LoadServer( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { Server data; errCode = codec_->DecodeServerData(out[i], &data); if (!errCode) { @@ -190,7 +190,7 @@ bool TopologyStorageEtcd::LoadChunkServer( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { ChunkServer data; errCode = codec_->DecodeChunkServerData(out[i], &data); if (!errCode) { @@ -228,7 +228,7 @@ bool TopologyStorageEtcd::LoadCopySet( LOG(ERROR) << "etcd list err:" << errCode; return false; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { CopySetInfo data; errCode = codec_->DecodeCopySetData(out[i], &data); if (!errCode) { diff --git a/src/snapshotcloneserver/clone/clone_closure.h b/src/snapshotcloneserver/clone/clone_closure.h index 68a9199c4c..65847d109e 100644 --- a/src/snapshotcloneserver/clone/clone_closure.h +++ b/src/snapshotcloneserver/clone/clone_closure.h @@ -34,60 +34,38 @@ #include "src/common/concurrent/name_lock.h" #include "src/common/concurrent/dlock.h" -using ::google::protobuf::RpcController; -using ::google::protobuf::Closure; -using ::curve::common::NameLockGuard; using ::curve::common::DLock; +using ::curve::common::NameLockGuard; +using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; namespace curve { namespace snapshotcloneserver { class CloneClosure : public Closure { public: - CloneClosure(brpc::Controller* bcntl = nullptr, - Closure* done = nullptr) - : bcntl_(bcntl), - done_(done), - requestId_(""), - taskId_(""), - dlock_(nullptr), - retCode_(kErrCodeInternalError) {} - - brpc::Controller * GetController() { - return bcntl_; - } + explicit CloneClosure(brpc::Controller *bcntl = nullptr, + Closure *done = nullptr) + : dlock_(nullptr), bcntl_(bcntl), done_(done), requestId_(""), + taskId_(""), retCode_(kErrCodeInternalError) {} - void SetRequestId(const UUID &requestId) { - requestId_ = requestId; - } + brpc::Controller *GetController() { return bcntl_; } - void SetTaskId(const TaskIdType &taskId) { - taskId_ = taskId; - } + void SetRequestId(const UUID &requestId) { requestId_ = requestId; } - TaskIdType GetTaskId() { - return taskId_; - } + void SetTaskId(const TaskIdType &taskId) { taskId_ = taskId; } - void SetErrCode(int retCode) { - retCode_ = retCode; - } + TaskIdType GetTaskId() { return taskId_; } - int GetErrCode() { - return retCode_; - } + void SetErrCode(int retCode) { retCode_ = retCode; } - void SetDestFileLock(std::shared_ptr lock) { - lock_ = lock; - } + int GetErrCode() { return retCode_; } - void SetDLock(std::shared_ptr lock) { - dlock_ = lock; - } + void SetDestFileLock(std::shared_ptr lock) { lock_ = lock; } - std::shared_ptr GetDLock() { - return dlock_; - } + void SetDLock(std::shared_ptr lock) { dlock_ = lock; } + + std::shared_ptr GetDLock() { return dlock_; } void SetDestFileName(const std::string &destFileName) { destFileName_ = destFileName; @@ -100,9 +78,8 @@ class CloneClosure : public Closure { bcntl_->http_response().set_status_code( brpc::HTTP_STATUS_INTERNAL_SERVER_ERROR); butil::IOBufBuilder os; - std::string msg = BuildErrorMessage(retCode_, - requestId_, - taskId_); + std::string msg = + BuildErrorMessage(retCode_, requestId_, taskId_); os << msg; os.move_to(bcntl_->response_attachment()); } else { @@ -137,7 +114,7 @@ class CloneClosure : public Closure { std::shared_ptr dlock_; std::string destFileName_; brpc::Controller *bcntl_; - Closure* done_; + Closure *done_; UUID requestId_; TaskIdType taskId_; int retCode_; diff --git a/src/snapshotcloneserver/clone/clone_core.cpp b/src/snapshotcloneserver/clone/clone_core.cpp index e2843f6cd9..402accf0a5 100644 --- a/src/snapshotcloneserver/clone/clone_core.cpp +++ b/src/snapshotcloneserver/clone/clone_core.cpp @@ -174,7 +174,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeInternalError; } - //是否为快照 + // 是否为快照 SnapshotInfo snapInfo; CloneFileType fileType; @@ -290,6 +290,7 @@ int CloneCoreImpl::FlattenPre( const std::string &user, const TaskIdType &taskId, CloneInfo *cloneInfo) { + (void)user; int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); if (ret < 0) { return kErrCodeFileNotExist; @@ -868,6 +869,8 @@ int CloneCoreImpl::CompleteCloneMeta( std::shared_ptr task, const FInfo &fInfo, const CloneSegmentMap &segInfos) { + (void)fInfo; + (void)segInfos; std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string user = task->GetCloneInfo().GetUser(); @@ -1077,6 +1080,7 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( int CloneCoreImpl::ChangeOwner( std::shared_ptr task, const FInfo &fInfo) { + (void)fInfo; std::string user = task->GetCloneInfo().GetUser(); std::string origin = cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); @@ -1171,6 +1175,8 @@ int CloneCoreImpl::CompleteCloneFile( std::shared_ptr task, const FInfo &fInfo, const CloneSegmentMap &segInfos) { + (void)fInfo; + (void)segInfos; std::string fileName; if (IsLazy(task)) { fileName = task->GetCloneInfo().GetDest(); @@ -1542,7 +1548,7 @@ void CloneCoreImpl::HandleCleanCloneOrRecoverTask( if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) { // 错误情况下可能未清除镜像被克隆标志 if (IsFile(task)) { - //重新发送 + // 重新发送 std::string source = task->GetCloneInfo().GetSrc(); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { diff --git a/src/snapshotcloneserver/common/snapshotclone_info.h b/src/snapshotcloneserver/common/snapshotclone_info.h index 00433695d2..e51a47d1b5 100644 --- a/src/snapshotcloneserver/common/snapshotclone_info.h +++ b/src/snapshotcloneserver/common/snapshotclone_info.h @@ -247,7 +247,7 @@ class CloneInfo { std::ostream& operator<<(std::ostream& os, const CloneInfo &cloneInfo); -//快照处理状态 +// 快照处理状态 enum class Status{ done = 0, pending, @@ -257,7 +257,7 @@ enum class Status{ error }; -//快照信息 +// 快照信息 class SnapshotInfo { public: SnapshotInfo() @@ -427,7 +427,7 @@ class SnapshotInfo { uint32_t chunkSize_; // 文件的segment大小 uint64_t segmentSize_; - //文件大小 + // 文件大小 uint64_t fileLength_; // stripe size uint64_t stripeUnit_; diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp index 9533ded57e..fc1edd783b 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.cpp @@ -304,7 +304,7 @@ int SnapshotCloneMetaStoreEtcd::LoadSnapshotInfos() { LOG(ERROR) << "etcd list err:" << errCode; return -1; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { SnapshotInfo data; errCode = codec_->DecodeSnapshotData(out[i], &data); if (!errCode) { @@ -327,7 +327,7 @@ int SnapshotCloneMetaStoreEtcd::LoadCloneInfos() { LOG(ERROR) << "etcd list err:" << errCode; return -1; } - for (int i = 0; i < out.size(); i++) { + for (size_t i = 0; i < out.size(); i++) { CloneInfo data; errCode = codec_->DecodeCloneInfoData(out[i], &data); if (!errCode) { diff --git a/src/snapshotcloneserver/common/task_tracker.h b/src/snapshotcloneserver/common/task_tracker.h index 809bcf5aa4..121fb01b30 100644 --- a/src/snapshotcloneserver/common/task_tracker.h +++ b/src/snapshotcloneserver/common/task_tracker.h @@ -23,9 +23,12 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_TASK_TRACKER_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_TASK_TRACKER_H_ -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/task_tracker.h" +#include + +#include "src/common/snapshotclone/snapshotclone_define.h" + using ::curve::common::TaskTracker; using ::curve::common::ContextTaskTracker; diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.cpp b/src/snapshotcloneserver/snapshot/snapshot_core.cpp index 9cac841b11..4e9e5ab8ff 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_core.cpp @@ -73,7 +73,7 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, snapshotNum--; } } - if (snapshotNum >= maxSnapshotLimit_) { + if (snapshotNum >= static_cast(maxSnapshotLimit_)) { LOG(ERROR) << "Snapshot count reach the max limit."; return kErrCodeSnapshotCountReachLimit; } diff --git a/src/snapshotcloneserver/snapshotclone_service.cpp b/src/snapshotcloneserver/snapshotclone_service.cpp index 0a7e379d2b..a2832ed705 100644 --- a/src/snapshotcloneserver/snapshotclone_service.cpp +++ b/src/snapshotcloneserver/snapshotclone_service.cpp @@ -41,6 +41,8 @@ void SnapshotCloneServiceImpl::default_method(RpcController* cntl, const HttpRequest* req, HttpResponse* resp, Closure* done) { + (void)req; + (void)resp; brpc::ClosureGuard done_guard(done); brpc::Controller* bcntl = static_cast(cntl); @@ -913,7 +915,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneRefStatusAction( if (refStatus == CloneRefStatus::kNeedCheck) { mainObj[kTotalCountStr] = cloneInfos.size(); Json::Value listObj; - for (int i = 0; i < cloneInfos.size(); i++) { + for (size_t i = 0; i < cloneInfos.size(); i++) { Json::Value cloneTaskObj; cloneTaskObj[kUserStr] = cloneInfos[i].GetUser(); cloneTaskObj[kFileStr] = cloneInfos[i].GetDest(); diff --git a/src/tools/copyset_check_core.cpp b/src/tools/copyset_check_core.cpp index 350dddc368..f32a7a923d 100644 --- a/src/tools/copyset_check_core.cpp +++ b/src/tools/copyset_check_core.cpp @@ -19,8 +19,9 @@ * Created Date: 2019-10-30 * Author: charisu */ -#include #include "src/tools/copyset_check_core.h" +#include +#include DEFINE_uint64(margin, 1000, "The threshold of the gap between peers"); DEFINE_uint64(replicasNum, 3, "the number of replicas that required"); @@ -404,7 +405,7 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, std::vector threadpool; std::map> queryCsResult; uint32_t index = 0; - for (int i = 0; i < FLAGS_rpcConcurrentNum; i++) { + for (uint64_t i = 0; i < FLAGS_rpcConcurrentNum; i++) { threadpool.emplace_back(Thread( &CopysetCheckCore::ConcurrentCheckCopysetsOnServer, this, std::ref(chunkservers), &index, @@ -751,7 +752,7 @@ CheckResult CopysetCheckCore::CheckPeerOnlineStatus( } if (notOnlineNum > 0) { uint32_t majority = peers.size() / 2 + 1; - if (notOnlineNum < majority) { + if (notOnlineNum < static_cast(majority)) { return CheckResult::kMinorityPeerNotOnline; } else { return CheckResult::kMajorityPeerNotOnline; @@ -928,6 +929,7 @@ int CopysetCheckCore::ListMayBrokenVolumes( void CopysetCheckCore::GetCopysetInfos(const char* key, std::vector* copysets) { + (void)key; for (auto iter = copysets_[kMajorityPeerNotOnline].begin(); iter != copysets_[kMajorityPeerNotOnline].end(); ++iter) { std::string gid = *iter; diff --git a/src/tools/copyset_tool.cpp b/src/tools/copyset_tool.cpp index bb203e4cf4..5cfaca5d94 100644 --- a/src/tools/copyset_tool.cpp +++ b/src/tools/copyset_tool.cpp @@ -84,7 +84,7 @@ int CopysetTool::Init() { } void PrintCopysets(const std::vector& copysets) { - for (int i = 0; i < copysets.size(); ++i) { + for (size_t i = 0; i < copysets.size(); ++i) { if (i != 0) { std::cout << ","; } diff --git a/src/tools/curve_cli.cpp b/src/tools/curve_cli.cpp index 00dbeea052..0dc5dcf46e 100644 --- a/src/tools/curve_cli.cpp +++ b/src/tools/curve_cli.cpp @@ -269,6 +269,8 @@ int CurveCli::DoSnapshot() { int CurveCli::DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, const curve::common::Peer& peer) { + (void)lgPoolId; + (void)copysetId; braft::cli::CliOptions opt; opt.timeout_ms = FLAGS_timeout_ms; opt.max_retry = FLAGS_max_retry; diff --git a/src/tools/curve_meta_tool.cpp b/src/tools/curve_meta_tool.cpp index fbcdb30556..5d9da78ec0 100644 --- a/src/tools/curve_meta_tool.cpp +++ b/src/tools/curve_meta_tool.cpp @@ -121,7 +121,7 @@ int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); - if (rc != FLAGS_pageSize) { + if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { std::cout << "Fail to read metaPage from " << chunkFileName << ", " << berror() << std::endl; @@ -157,7 +157,7 @@ int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); localFS_->Close(fd); - if (rc != FLAGS_pageSize) { + if (rc != static_cast(FLAGS_pageSize)) { if (rc < 0) { std::cout << "Fail to read metaPage from " << snapFileName << ", " << berror() << std::endl; diff --git a/src/tools/etcd_client.cpp b/src/tools/etcd_client.cpp index a9d4a8bad7..6e1821a8ba 100644 --- a/src/tools/etcd_client.cpp +++ b/src/tools/etcd_client.cpp @@ -22,10 +22,12 @@ #include "src/tools/etcd_client.h" +#include + namespace curve { namespace tool { -int EtcdClient::Init(const std::string& etcdAddr) { +int EtcdClient::Init(const std::string &etcdAddr) { curve::common::SplitString(etcdAddr, ",", &etcdAddrVec_); if (etcdAddrVec_.empty()) { std::cout << "Split etcd address fail!" << std::endl; @@ -34,8 +36,8 @@ int EtcdClient::Init(const std::string& etcdAddr) { return 0; } -int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, - std::map* onlineState) { +int EtcdClient::GetEtcdClusterStatus(std::vector *leaderAddrVec, + std::map *onlineState) { if (!leaderAddrVec || !onlineState) { std::cout << "The argument is a null pointer!" << std::endl; return -1; @@ -43,7 +45,7 @@ int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, brpc::Channel httpChannel; brpc::ChannelOptions options; options.protocol = brpc::PROTOCOL_HTTP; - for (const auto& addr : etcdAddrVec_) { + for (const auto &addr : etcdAddrVec_) { int res = httpChannel.Init(addr.c_str(), &options); if (res != 0) { (*onlineState)[addr] = false; @@ -59,12 +61,18 @@ int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, } (*onlineState)[addr] = true; std::string resp = cntl.response_attachment().to_string(); - Json::Reader reader(Json::Features::strictMode()); + Json::CharReaderBuilder builder; + Json::CharReaderBuilder::strictMode(&builder.settings_); + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(resp, value)) { - std::cout << "Parse the response fail!" << std::endl; + JSONCPP_STRING errormsg; + if (!reader->parse(resp.data(), resp.data() + resp.length(), &value, + &errormsg)) { + std::cout << "Parse the response fail! Error: " << errormsg + << std::endl; return -1; } + if (!value[kEtcdLeader].isNull()) { if (value[kEtcdLeader] == value[kEtcdHeader][kEtcdMemberId]) { leaderAddrVec->emplace_back(addr); @@ -74,13 +82,13 @@ int EtcdClient::GetEtcdClusterStatus(std::vector* leaderAddrVec, return 0; } -int EtcdClient::GetAndCheckEtcdVersion(std::string* version, - std::vector* failedList) { +int EtcdClient::GetAndCheckEtcdVersion(std::string *version, + std::vector *failedList) { brpc::Channel httpChannel; brpc::ChannelOptions options; options.protocol = brpc::PROTOCOL_HTTP; VersionMapType versionMap; - for (const auto& addr : etcdAddrVec_) { + for (const auto &addr : etcdAddrVec_) { int res = httpChannel.Init(addr.c_str(), &options); if (res != 0) { std::cout << "Init channel to " << addr << " failed" << std::endl; @@ -98,10 +106,16 @@ int EtcdClient::GetAndCheckEtcdVersion(std::string* version, continue; } std::string resp = cntl.response_attachment().to_string(); - Json::Reader reader(Json::Features::strictMode()); + + Json::CharReaderBuilder builder; + Json::CharReaderBuilder::strictMode(&builder.settings_); + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(resp, value)) { - std::cout << "Parse the response fail!" << std::endl; + JSONCPP_STRING errormsg; + if (!reader->parse(resp.data(), resp.data() + resp.length(), &value, + &errormsg)) { + std::cout << "Parse the response fail! Error: " << errormsg + << std::endl; return -1; } if (value[kEtcdCluster].isNull()) { diff --git a/src/tools/mds_client.cpp b/src/tools/mds_client.cpp index 502d58ccd5..c4c6668d96 100644 --- a/src/tools/mds_client.cpp +++ b/src/tools/mds_client.cpp @@ -960,7 +960,7 @@ int MDSClient::GetMetric(const std::string& metricName, std::string* value) { bool MDSClient::ChangeMDServer() { currentMdsIndex_++; - if (currentMdsIndex_ > mdsAddrVec_.size() - 1) { + if (currentMdsIndex_ > static_cast(mdsAddrVec_.size() - 1)) { currentMdsIndex_ = 0; } if (channel_.Init(mdsAddrVec_[currentMdsIndex_].c_str(), diff --git a/src/tools/metric_client.cpp b/src/tools/metric_client.cpp index df3b2713dd..776347f738 100644 --- a/src/tools/metric_client.cpp +++ b/src/tools/metric_client.cpp @@ -22,23 +22,24 @@ #include "src/tools/metric_client.h" +#include + DECLARE_uint64(rpcTimeout); DECLARE_uint64(rpcRetryTimes); namespace curve { namespace tool { -MetricRet MetricClient::GetMetric(const std::string& addr, - const std::string& metricName, - std::string* value) { +MetricRet MetricClient::GetMetric(const std::string &addr, + const std::string &metricName, + std::string *value) { brpc::Channel httpChannel; brpc::ChannelOptions options; brpc::Controller cntl; options.protocol = brpc::PROTOCOL_HTTP; int res = httpChannel.Init(addr.c_str(), &options); if (res != 0) { - std::cout << "Init httpChannel to " << addr << " fail!" - << std::endl; + std::cout << "Init httpChannel to " << addr << " fail!" << std::endl; return MetricRet::kOtherErr; } @@ -46,17 +47,15 @@ MetricRet MetricClient::GetMetric(const std::string& addr, cntl.set_timeout_ms(FLAGS_rpcTimeout); httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); if (!cntl.Failed()) { - std::string attachment = - cntl.response_attachment().to_string(); + std::string attachment = cntl.response_attachment().to_string(); res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } - bool needRetry = (cntl.Failed() && - cntl.ErrorCode() != EHOSTDOWN && - cntl.ErrorCode() != ETIMEDOUT && - cntl.ErrorCode() != brpc::ELOGOFF && - cntl.ErrorCode() != brpc::ERPCTIMEDOUT); + bool needRetry = + (cntl.Failed() && cntl.ErrorCode() != EHOSTDOWN && + cntl.ErrorCode() != ETIMEDOUT && cntl.ErrorCode() != brpc::ELOGOFF && + cntl.ErrorCode() != brpc::ERPCTIMEDOUT); uint64_t retryTimes = 0; while (needRetry && retryTimes < FLAGS_rpcRetryTimes) { cntl.Reset(); @@ -67,8 +66,7 @@ MetricRet MetricClient::GetMetric(const std::string& addr, retryTimes++; continue; } - std::string attachment = - cntl.response_attachment().to_string(); + std::string attachment = cntl.response_attachment().to_string(); res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } @@ -78,14 +76,13 @@ MetricRet MetricClient::GetMetric(const std::string& addr, return notExist ? MetricRet::kNotFound : MetricRet::kOtherErr; } -MetricRet MetricClient::GetMetricUint(const std::string& addr, - const std::string& metricName, - uint64_t* value) { +MetricRet MetricClient::GetMetricUint(const std::string &addr, + const std::string &metricName, + uint64_t *value) { std::string str; MetricRet res = GetMetric(addr, metricName, &str); if (res != MetricRet::kOK) { - std::cout << "get metric " << metricName << " from " - << addr << " fail"; + std::cout << "get metric " << metricName << " from " << addr << " fail"; return res; } if (!curve::common::StringToUll(str, value)) { @@ -95,31 +92,37 @@ MetricRet MetricClient::GetMetricUint(const std::string& addr, return MetricRet::kOK; } -MetricRet MetricClient::GetConfValueFromMetric(const std::string& addr, - const std::string& metricName, - std::string* confValue) { +MetricRet MetricClient::GetConfValueFromMetric(const std::string &addr, + const std::string &metricName, + std::string *confValue) { std::string jsonString; brpc::Controller cntl; MetricRet res = GetMetric(addr, metricName, &jsonString); if (res != MetricRet::kOK) { return res; } - Json::Reader reader(Json::Features::strictMode()); + + Json::CharReaderBuilder builder; + Json::CharReaderBuilder::strictMode(&builder.settings_); + std::unique_ptr reader(builder.newCharReader()); Json::Value value; - if (!reader.parse(jsonString, value)) { - std::cout << "Parse metric as json fail" << std::endl; + JSONCPP_STRING errormsg; + if (!reader->parse(jsonString.data(), + jsonString.data() + jsonString.length(), &value, + &errormsg)) { + std::cout << "Parse metric as json fail: " << errormsg << std::endl; return MetricRet::kOtherErr; } + *confValue = value[kConfValue].asString(); return MetricRet::kOK; } -int MetricClient::GetValueFromAttachment(const std::string& attachment, - std::string* value) { +int MetricClient::GetValueFromAttachment(const std::string &attachment, + std::string *value) { auto pos = attachment.find(":"); if (pos == std::string::npos) { - std::cout << "parse response attachment fail!" - << std::endl; + std::cout << "parse response attachment fail!" << std::endl; return -1; } *value = attachment.substr(pos + 1); diff --git a/src/tools/namespace_tool_core.cpp b/src/tools/namespace_tool_core.cpp index 2cf644bcf4..427bb56078 100644 --- a/src/tools/namespace_tool_core.cpp +++ b/src/tools/namespace_tool_core.cpp @@ -203,7 +203,7 @@ int NameSpaceToolCore::UpdateFileThrottle(const std::string& fileName, params.set_limit(limit); params.set_type(type); if (burst >= 0) { - if (burst < limit) { + if (burst < static_cast(limit)) { std::cout << "burst should greater equal to limit" << std::endl; return -1; } @@ -253,7 +253,7 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } uint64_t chunkIndex = (offset - segOffset) / segment.chunksize(); - if (chunkIndex >= segment.chunks_size()) { + if (static_cast(chunkIndex) >= segment.chunks_size()) { std::cout << "ChunkIndex exceed chunks num in segment!" << std::endl; return -1; } diff --git a/src/tools/snapshot_clone_client.cpp b/src/tools/snapshot_clone_client.cpp index 02e617cfea..745ea01326 100644 --- a/src/tools/snapshot_clone_client.cpp +++ b/src/tools/snapshot_clone_client.cpp @@ -104,8 +104,7 @@ std::vector SnapshotCloneClient::GetActiveAddrs() { void SnapshotCloneClient::GetOnlineStatus( std::map* onlineStatus) { onlineStatus->clear(); - int result = 0; - for (const auto item : dummyServerMap_) { + for (const auto &item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index a2a91ed915..e6bfc116a4 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -856,7 +856,7 @@ int PrintChunkserverOnlineStatus( int i = 0; for (ChunkServerIdType csId : offlineRecover) { i++; - if (i == offlineRecover.size()) { + if (i == static_cast(offlineRecover.size())) { std::cout << csId; } else { std::cout << csId << ", "; diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 89e7882bb0..2b54d70943 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -49,13 +49,13 @@ #include "src/tools/snapshot_clone_client.h" #include "src/common/uri_parser.h" +using curve::mds::topology::ChunkServerInfo; using curve::mds::topology::ChunkServerStatus; using curve::mds::topology::DiskState; +using curve::mds::topology::LogicalPoolInfo; using curve::mds::topology::OnlineState; using curve::mds::topology::PhysicalPoolInfo; -using curve::mds::topology::LogicalPoolInfo; using curve::mds::topology::PoolIdType; -using curve::mds::topology::ChunkServerInfo; namespace curve { namespace tool { @@ -99,14 +99,11 @@ class StatusTool : public CurveTool { std::shared_ptr copysetCheckCore, std::shared_ptr versionTool, std::shared_ptr metricClient, - std::shared_ptr snapshotClient) : - mdsClient_(mdsClient), etcdClient_(etcdClient), - copysetCheckCore_(copysetCheckCore), - versionTool_(versionTool), - metricClient_(metricClient), - snapshotClient_(snapshotClient), - mdsInited_(false), etcdInited_(false), - noSnapshotServer_(false) {} + std::shared_ptr snapshotClient) + : mdsClient_(mdsClient), copysetCheckCore_(copysetCheckCore), + etcdClient_(etcdClient), metricClient_(metricClient), + snapshotClient_(snapshotClient), versionTool_(versionTool), + mdsInited_(false), etcdInited_(false), noSnapshotServer_(false) {} ~StatusTool() = default; /** @@ -128,7 +125,7 @@ class StatusTool : public CurveTool { * @param command:执行的命令 * @return true / false */ - static bool SupportCommand(const std::string& command); + static bool SupportCommand(const std::string &command); /** * @brief 判断集群是否健康 @@ -136,16 +133,16 @@ class StatusTool : public CurveTool { bool IsClusterHeatlhy(); private: - int Init(const std::string& command); + int Init(const std::string &command); int SpaceCmd(); int StatusCmd(); int ChunkServerListCmd(); int ServerListCmd(); int LogicalPoolListCmd(); int ChunkServerStatusCmd(); - int GetPoolsInCluster(std::vector* phyPools, - std::vector* lgPools); - int GetSpaceInfo(SpaceInfo* spaceInfo); + int GetPoolsInCluster(std::vector *phyPools, + std::vector *lgPools); + int GetSpaceInfo(SpaceInfo *spaceInfo); int PrintClusterStatus(); int PrintMdsStatus(); int PrintEtcdStatus(); @@ -153,9 +150,9 @@ class StatusTool : public CurveTool { int PrintClientStatus(); int ClientListCmd(); int ScanStatusCmd(); - void PrintCsLeftSizeStatistics(const std::string& name, - const std::map>& poolLeftSize); + void PrintCsLeftSizeStatistics( + const std::string &name, + const std::map> &poolLeftSize); int PrintSnapshotCloneStatus(); /** @@ -163,7 +160,7 @@ class StatusTool : public CurveTool { * @param command:执行的命令 * @return 需要返回true,否则返回false */ - bool CommandNeedEtcd(const std::string& command); + bool CommandNeedEtcd(const std::string &command); /** @@ -171,22 +168,22 @@ class StatusTool : public CurveTool { * @param command:执行的命令 * @return 需要返回true,否则返回false */ - bool CommandNeedMds(const std::string& command); + bool CommandNeedMds(const std::string &command); /** * @brief 判断命令是否需要snapshot clone server * @param command:执行的命令 * @return 需要返回true,否则返回false */ - bool CommandNeedSnapshotClone(const std::string& command); + bool CommandNeedSnapshotClone(const std::string &command); /** * @brief 打印在线状态 * @param name : 在线状态对应的名字 * @param onlineStatus 在线状态的map */ - void PrintOnlineStatus(const std::string& name, - const std::map& onlineStatus); + void PrintOnlineStatus(const std::string &name, + const std::map &onlineStatus); /** * @brief 获取并打印mds version信息 @@ -197,7 +194,7 @@ class StatusTool : public CurveTool { * @brief 检查服务是否健康 * @param name 服务名 */ - bool CheckServiceHealthy(const ServiceName& name); + bool CheckServiceHealthy(const ServiceName &name); private: // 向mds发送RPC的client diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 50cfbfb497..9231d1e4fc 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -49,9 +49,8 @@ class VersionTool { explicit VersionTool(std::shared_ptr mdsClient, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), - metricClient_(metricClient), - snapshotClient_(snapshotClient) {} + : mdsClient_(mdsClient), snapshotClient_(snapshotClient), + metricClient_(metricClient) {} virtual ~VersionTool() {} /** @@ -59,43 +58,45 @@ class VersionTool { * @param[out] version 版本 * @return 成功返回0,失败返回-1 */ - virtual int GetAndCheckMdsVersion(std::string* version, - std::vector* failedList); + virtual int GetAndCheckMdsVersion(std::string *version, + std::vector *failedList); /** * @brief 获取chunkserver的版本并检查版本一致性 * @param[out] version 版本 * @return 成功返回0,失败返回-1 */ - virtual int GetAndCheckChunkServerVersion(std::string* version, - std::vector* failedList); + virtual int + GetAndCheckChunkServerVersion(std::string *version, + std::vector *failedList); /** * @brief 获取snapshot clone server的版本 * @param[out] version 版本 * @return 成功返回0,失败返回-1 */ - virtual int GetAndCheckSnapshotCloneVersion(std::string* version, - std::vector* failedList); + virtual int + GetAndCheckSnapshotCloneVersion(std::string *version, + std::vector *failedList); /** * @brief 获取client的版本 * @param[out] versionMap process->版本->地址的映射表 * @return 成功返回0,失败返回-1 */ - virtual int GetClientVersion(ClientVersionMapType* versionMap); + virtual int GetClientVersion(ClientVersionMapType *versionMap); /** * @brief 打印每个version对应的地址 * @param versionMap version到地址列表的map */ - static void PrintVersionMap(const VersionMapType& versionMap); + static void PrintVersionMap(const VersionMapType &versionMap); /** * @brief 打印访问失败的地址 * @param failedList 访问失败的地址列表 */ - static void PrintFailedList(const std::vector& failedList); + static void PrintFailedList(const std::vector &failedList); private: /** @@ -104,17 +105,17 @@ class VersionTool { * @param[out] versionMap version到地址的map * @param[out] failedList 查询version失败的地址列表 */ - void GetVersionMap(const std::vector& addrVec, - VersionMapType* versionMap, - std::vector* failedList); + void GetVersionMap(const std::vector &addrVec, + VersionMapType *versionMap, + std::vector *failedList); /** * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 * @param addrVec 地址列表 * @param[out] processMap 不同的process对应的client的地址列表 */ - void FetchClientProcessMap(const std::vector& addrVec, - ProcessMapType* processMap); + void FetchClientProcessMap(const std::vector &addrVec, + ProcessMapType *processMap); /** * @brief 从启动server的命令行获取对应的程序的名字 @@ -129,7 +130,7 @@ class VersionTool { * @param addrVec 地址列表 * @return 进程的名字 */ - std::string GetProcessNameFromCmd(const std::string& cmd); + std::string GetProcessNameFromCmd(const std::string &cmd); private: // 向mds发送RPC的client diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 6f6a970448..d770fd6663 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -159,7 +159,6 @@ TEST_F(ChunkserverTest, normal_read_write_test) { }; WaitpidGuard waitpidGuard(pid1, pid2, pid3); - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; PeerId leader; LogicPoolID logicPoolId = 1; CopysetID copysetId = 100001; diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 21a594c2d2..3422f0f314 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -164,7 +164,6 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { CopysetID copysetId = 100001; uint64_t chunkId = 1; uint64_t sn = 1; - char ch = 'a'; char expectData[kOpRequestAlignSize + 1]; ::memset(expectData, 'a', kOpRequestAlignSize); expectData[kOpRequestAlignSize] = '\0'; diff --git a/test/chunkserver/chunkserver_test.cpp b/test/chunkserver/chunkserver_test.cpp index 7963df4f23..f945e4e707 100644 --- a/test/chunkserver/chunkserver_test.cpp +++ b/test/chunkserver/chunkserver_test.cpp @@ -34,11 +34,9 @@ #include "test/client/fake/fakeMDS.h" -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9301"; // NOLINT - -char* confPath = "conf/client.conf"; +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9301"; // NOLINT butil::AtExitManager atExitManager; @@ -53,17 +51,17 @@ struct ChunkServerPackage { char **argv; }; -void* run_chunkserver_thread(void *arg) { +void *run_chunkserver_thread(void *arg) { ChunkServerPackage *package = reinterpret_cast(arg); package->chunkserver->Run(package->argc, package->argv); return NULL; } -using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; +using curve::fs::LocalFsFactory; -static int ExecCmd(const std::string& cmd) { +static int ExecCmd(const std::string &cmd) { LOG(INFO) << "executing command: " << cmd; return system(cmd.c_str()); @@ -73,7 +71,6 @@ class ChunkserverTest : public ::testing::Test { public: void SetUp() { std::string filename = "test.img"; - size_t filesize = 10uL * 1024 * 1024 * 1024; mds_ = new FakeMDS(filename); @@ -87,8 +84,7 @@ class ChunkserverTest : public ::testing::Test { } private: - FakeMDS* mds_; - int fd_; + FakeMDS *mds_; }; TEST(ChunkserverCommonTest, GroupIdTest) { @@ -111,25 +107,26 @@ TEST(ChunkserverCommonTest, GroupIdTest) { TEST(ChunkServerGflagTest, test_load_gflag) { int argc = 1; - char *argvv[] = {""}; + char *argvv[] = {const_cast("")}; char **argv = argvv; gflags::ParseCommandLineFlags(&argc, &argv, true); google::CommandLineFlagInfo info; ASSERT_TRUE(GetCommandLineFlagInfo("chunkservertest", &info)); ASSERT_TRUE(info.is_default); ASSERT_EQ("testdefault", FLAGS_chunkservertest); - ASSERT_FALSE( - GetCommandLineFlagInfo("chunkservertest", &info) && !info.is_default); + ASSERT_FALSE(GetCommandLineFlagInfo("chunkservertest", &info) && + !info.is_default); - char *argvj[] = {"", "-chunkservertest=test1"}; + char *argvj[] = {const_cast(""), + const_cast("-chunkservertest=test1")}; argv = argvj; argc = 2; gflags::ParseCommandLineFlags(&argc, &argv, true); ASSERT_TRUE(GetCommandLineFlagInfo("chunkservertest", &info)); ASSERT_FALSE(info.is_default); ASSERT_EQ("test1", FLAGS_chunkservertest); - ASSERT_TRUE( - GetCommandLineFlagInfo("chunkservertest", &info) && !info.is_default); + ASSERT_TRUE(GetCommandLineFlagInfo("chunkservertest", &info) && + !info.is_default); } } // namespace chunkserver } // namespace curve diff --git a/test/chunkserver/clone/clone_core_test.cpp b/test/chunkserver/clone/clone_core_test.cpp index b245b11eb9..737b8d9422 100644 --- a/test/chunkserver/clone/clone_core_test.cpp +++ b/test/chunkserver/clone/clone_core_test.cpp @@ -38,14 +38,13 @@ namespace curve { namespace chunkserver { using curve::chunkserver::CHUNK_OP_TYPE; -using curve::fs::LocalFsFactory; using curve::fs::FileSystemType; +using curve::fs::LocalFsFactory; -ACTION_TEMPLATE(SaveBraftTask, - HAS_1_TEMPLATE_PARAMS(int, k), +ACTION_TEMPLATE(SaveBraftTask, HAS_1_TEMPLATE_PARAMS(int, k), AND_1_VALUE_PARAMS(value)) { auto input = static_cast(::testing::get(args)); - auto output = static_cast(value); + auto output = static_cast(value); output->data->swap(*input.data); output->done = input.done; } @@ -68,20 +67,17 @@ class CloneCoreTest : public testing::Test { } void FakeCopysetNode() { - EXPECT_CALL(*node_, IsLeaderTerm()) - .WillRepeatedly(Return(true)); - EXPECT_CALL(*node_, GetDataStore()) - .WillRepeatedly(Return(datastore_)); + EXPECT_CALL(*node_, IsLeaderTerm()).WillRepeatedly(Return(true)); + EXPECT_CALL(*node_, GetDataStore()).WillRepeatedly(Return(datastore_)); EXPECT_CALL(*node_, GetConcurrentApplyModule()) .WillRepeatedly(Return(nullptr)); EXPECT_CALL(*node_, GetAppliedIndex()) .WillRepeatedly(Return(LAST_INDEX)); } - std::shared_ptr GenerateReadRequest(CHUNK_OP_TYPE optype, - off_t offset, - size_t length) { - ChunkRequest* readRequest = new ChunkRequest(); + std::shared_ptr + GenerateReadRequest(CHUNK_OP_TYPE optype, off_t offset, size_t length) { + ChunkRequest *readRequest = new ChunkRequest(); readRequest->set_logicpoolid(LOGICPOOL_ID); readRequest->set_copysetid(COPYSET_ID); readRequest->set_chunkid(CHUNK_ID); @@ -95,31 +91,25 @@ class CloneCoreTest : public testing::Test { closure->SetRequest(readRequest); closure->SetResponse(response); std::shared_ptr req = - std::make_shared(node_, - nullptr, - cntl, - readRequest, - response, - closure); + std::make_shared(node_, nullptr, cntl, + readRequest, response, closure); return req; } void SetCloneParam(std::shared_ptr readRequest) { - ChunkRequest* request = - const_cast(readRequest->GetChunkRequest()); + ChunkRequest *request = + const_cast(readRequest->GetChunkRequest()); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); } - void CheckTask(const braft::Task& task, - off_t offset, - size_t length, - char* buf) { + void CheckTask(const braft::Task &task, off_t offset, size_t length, + char *buf) { butil::IOBuf data; ChunkRequest request; auto req = ChunkOpRequest::Decode(*task.data, &request, &data, 0, PeerId("127.0.0.1:8200:0")); - auto preq = dynamic_cast(req.get()); + auto preq = dynamic_cast(req.get()); ASSERT_TRUE(preq != nullptr); ASSERT_EQ(LOGICPOOL_ID, request.logicpoolid()); @@ -143,34 +133,28 @@ class CloneCoreTest : public testing::Test { TEST_F(CloneCoreTest, ReadChunkTest1) { off_t offset = 0; size_t length = 5 * PAGE_SIZE; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); // 不会从源端拷贝数据 - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); // 获取chunk信息 CSChunkInfo info; info.isClone = false; info.pageSize = PAGE_SIZE; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(1); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(1); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -196,93 +180,94 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .WillOnce(DoAll(SetArrayArgument<2>(chunkData, - chunkData + length), + .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + EXPECT_CALL(*node_, Propose(_)).Times(0); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, closure->resContent_.status); - ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - length), 0); + ASSERT_EQ( + memcmp( + chunkData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + length), + 0); + delete[] chunkData; } // case2 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - closure->resContent_.status); + closure->resContent_.status); CheckTask(task, offset, length, cloneData); // 正常propose后,会将closure交给并发层处理, // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - length), 0); + ASSERT_EQ( + memcmp( + cloneData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + length), + 0); + delete[] cloneData; } // case3 @@ -290,41 +275,39 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { info.bitmap->Clear(); info.bitmap->Set(0, 2); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 char chunkData[3 * PAGE_SIZE]; memset(chunkData, 'a', 3 * PAGE_SIZE); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, 0, 3 * PAGE_SIZE)) - .WillOnce(DoAll(SetArrayArgument<2>(chunkData, - chunkData + 3 * PAGE_SIZE), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArrayArgument<2>(chunkData, chunkData + 3 * PAGE_SIZE), + Return(CSErrorCode::Success))); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -335,12 +318,18 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - 3 * PAGE_SIZE), 0); + ASSERT_EQ( + memcmp( + chunkData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + 3 * PAGE_SIZE), + 0); ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str() + 3 * PAGE_SIZE, //NOLINT - 2 * PAGE_SIZE), 0); + closure->resContent_.attachment.to_string().c_str() + + 3 * PAGE_SIZE, // NOLINT + 2 * PAGE_SIZE), + 0); + delete[] cloneData; } // case4 { @@ -349,29 +338,25 @@ TEST_F(CloneCoreTest, ReadChunkTest2) { info.bitmap->Clear(); info.bitmap->Set(0, 2); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(0); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); // 不产生PasteChunkRequest braft::Task task; - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(-1, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(-1, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, @@ -391,57 +376,58 @@ TEST_F(CloneCoreTest, ReadChunkTest3) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); SetCloneParam(readRequest); - char cloneData[length]; // NOLINT + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly(Return(CSErrorCode::ChunkNotExistError)); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - closure->resContent_.status); + closure->resContent_.status); CheckTask(task, offset, length, cloneData); // 正常propose后,会将closure交给并发层处理, // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); - ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT - length), 0); + ASSERT_EQ( + memcmp( + cloneData, + closure->resContent_.attachment.to_string().c_str(), // NOLINT + length), + 0); + delete[] cloneData; } } @@ -464,26 +450,23 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); info.bitmap->Clear(); info.bitmap->Set(0, 2); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); - EXPECT_CALL(*node_, Propose(_)) - .Times(0); - - ASSERT_EQ(-1, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); + + ASSERT_EQ(-1, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -491,61 +474,60 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { } // case2 { - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - char cloneData[length]; // NOLINT + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); closure->SetFailed(); })); - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, closure->resContent_.status); + delete[] cloneData; } // case3 { - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); - char cloneData[length]; // NOLINT + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 产生PasteChunkRequest + // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -556,6 +538,7 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 ASSERT_NE(nullptr, task.done); task.done->Run(); + delete[] cloneData; } } @@ -566,31 +549,26 @@ TEST_F(CloneCoreTest, ReadChunkErrorTest) { TEST_F(CloneCoreTest, RecoverChunkTest1) { off_t offset = 0; size_t length = 5 * PAGE_SIZE; - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // 不会从源端拷贝数据 - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); // 获取chunk信息 CSChunkInfo info; info.isClone = false; info.pageSize = PAGE_SIZE; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -612,29 +590,26 @@ TEST_F(CloneCoreTest, RecoverChunkTest2) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, true, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, true, copyer_); // case1 { info.bitmap->Set(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); //NOLINT - EXPECT_CALL(*copyer_, DownloadAsync(_)) - .Times(0); + std::shared_ptr readRequest = GenerateReadRequest( + CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT + EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); // 不会产生PasteChunkRequest - EXPECT_CALL(*node_, Propose(_)) - .Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + EXPECT_CALL(*node_, Propose(_)).Times(0); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -645,33 +620,31 @@ TEST_F(CloneCoreTest, RecoverChunkTest2) { { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); //NOLINT - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = GenerateReadRequest( + CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); // closure被转交给PasteRequest处理,这里closure还未执行 ASSERT_FALSE(closure->isDone_); @@ -684,6 +657,7 @@ TEST_F(CloneCoreTest, RecoverChunkTest2) { ASSERT_TRUE(closure->isDone_); ASSERT_EQ(0, closure->resContent_.appliedindex); ASSERT_EQ(0, closure->resContent_.status); + delete[] cloneData; } } @@ -697,79 +671,75 @@ TEST_F(CloneCoreTest, DisablePasteTest) { info.pageSize = PAGE_SIZE; info.chunkSize = CHUNK_SIZE; info.bitmap = std::make_shared(CHUNK_SIZE / PAGE_SIZE); - std::shared_ptr core - = std::make_shared(SLICE_SIZE, false, copyer_); + std::shared_ptr core = + std::make_shared(SLICE_SIZE, false, copyer_); // case1 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = + GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 更新 applied index - EXPECT_CALL(*node_, UpdateAppliedIndex(_)) - .Times(1); + EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); // 不会产生paste chunk请求 - EXPECT_CALL(*node_, Propose(_)) - .Times(0); + EXPECT_CALL(*node_, Propose(_)).Times(0); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->resContent_.appliedindex); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, - closure->resContent_.status); + closure->resContent_.status); + delete[] cloneData; } // case2 { info.bitmap->Clear(); // 每次调HandleReadRequest后会被closure释放 - std::shared_ptr readRequest - = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); //NOLINT - char cloneData[length]; // NOLINT + std::shared_ptr readRequest = GenerateReadRequest( + CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT + char *cloneData = new char[length]; memset(cloneData, 'b', length); EXPECT_CALL(*copyer_, DownloadAsync(_)) - .WillOnce(Invoke([&](DownloadClosure* closure){ + .WillOnce(Invoke([&](DownloadClosure *closure) { brpc::ClosureGuard guard(closure); - AsyncDownloadContext* context = closure->GetDownloadContext(); + AsyncDownloadContext *context = closure->GetDownloadContext(); memcpy(context->buf, cloneData, length); })); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(info), - Return(CSErrorCode::Success))); + .WillOnce( + DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 不会读chunk文件 - EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) - .Times(0); + EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); // 产生PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; - EXPECT_CALL(*node_, Propose(_)) - .WillOnce(SaveBraftTask<0>(&task)); + EXPECT_CALL(*node_, Propose(_)).WillOnce(SaveBraftTask<0>(&task)); - ASSERT_EQ(0, core->HandleReadRequest(readRequest, - readRequest->Closure())); - FakeChunkClosure* closure = - reinterpret_cast(readRequest->Closure()); + ASSERT_EQ(0, + core->HandleReadRequest(readRequest, readRequest->Closure())); + FakeChunkClosure *closure = + reinterpret_cast(readRequest->Closure()); // closure被转交给PasteRequest处理,这里closure还未执行 ASSERT_FALSE(closure->isDone_); @@ -782,6 +752,7 @@ TEST_F(CloneCoreTest, DisablePasteTest) { ASSERT_TRUE(closure->isDone_); ASSERT_EQ(0, closure->resContent_.appliedindex); ASSERT_EQ(0, closure->resContent_.status); + delete[] cloneData; } } diff --git a/test/chunkserver/clone/op_request_test.cpp b/test/chunkserver/clone/op_request_test.cpp index 20e821132f..8126bdc959 100644 --- a/test/chunkserver/clone/op_request_test.cpp +++ b/test/chunkserver/clone/op_request_test.cpp @@ -668,7 +668,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -687,6 +687,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { } ASSERT_TRUE(closure->isDone_); + delete[] chunkData; } /** @@ -704,7 +705,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -723,6 +724,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { cntl->response_attachment().to_string().c_str(), // NOLINT length), 0); + delete[] chunkData; } /** @@ -741,7 +743,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), @@ -762,6 +764,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .c_str(), // NOLINT length), 0); + delete[] chunkData; } /** @@ -879,7 +882,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); // 读chunk文件 - char chunkData[length]; // NOLINT + char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, @@ -899,6 +902,7 @@ TEST_F(OpRequestTest, ReadChunkTest) { ASSERT_EQ(memcmp(chunkData, closure->cntl_->response_attachment().to_string().c_str(), //NOLINT length), 0); + delete[] chunkData; } /** * 测试OnApply diff --git a/test/chunkserver/conf_epoch_file_test.cpp b/test/chunkserver/conf_epoch_file_test.cpp index a6a1fc53b8..a1ee08ace3 100644 --- a/test/chunkserver/conf_epoch_file_test.cpp +++ b/test/chunkserver/conf_epoch_file_test.cpp @@ -33,20 +33,20 @@ namespace curve { namespace chunkserver { using ::testing::_; -using ::testing::Invoke; -using ::testing::Return; using ::testing::AnyNumber; -using ::testing::Matcher; +using ::testing::AtLeast; using ::testing::DoAll; -using ::testing::SetArgPointee; -using ::testing::SetArrayArgument; -using ::testing::SetArgReferee; using ::testing::InSequence; -using ::testing::AtLeast; +using ::testing::Invoke; +using ::testing::Matcher; +using ::testing::Return; using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SetArgReferee; +using ::testing::SetArrayArgument; -using curve::fs::MockLocalFileSystem; using curve::fs::FileSystemType; +using curve::fs::MockLocalFileSystem; TEST(ConfEpochFileTest, load_save) { LogicPoolID logicPoolID = 123; @@ -65,9 +65,7 @@ TEST(ConfEpochFileTest, load_save) { LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; - ASSERT_EQ(0, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, + ASSERT_EQ(0, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, &loadEpoch)); ASSERT_EQ(logicPoolID, loadLogicPoolID); ASSERT_EQ(copysetID, loadCopysetID); @@ -78,22 +76,20 @@ TEST(ConfEpochFileTest, load_save) { // load: open failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(-1)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, - &loadEpoch)); + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, + &loadEpoch)); } // load: open success, read failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; @@ -101,118 +97,111 @@ TEST(ConfEpochFileTest, load_save) { EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1).WillOnce(Return(-1)); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, &loadEpoch)); } // load: open success, read success, decode success, crc32c right { - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":599727352}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":" + "0,\"checksum\":599727352}"; // NOLINT std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*fs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(json, json + jsonStr.size()), Return(jsonStr.size()))); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(0, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, - &loadEpoch)); + ASSERT_EQ(0, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, + &loadEpoch)); } // load: open success, read success, decode failed, crc32c right { - char *json = "{\"logicPoolId"; + const char *json = "{\"logicPoolId"; std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*fs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(json, json + jsonStr.size()), Return(jsonStr.size()))); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, &loadEpoch)); } // load: open success, read success, decode success, crc32c not right { - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":123}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":" + "0,\"checksum\":123}"; // NOLINT std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); LogicPoolID loadLogicPoolID; CopysetID loadCopysetID; uint64_t loadEpoch; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Read(_, _, _, _)).Times(1) + EXPECT_CALL(*fs, Read(_, _, _, _)) + .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(json, json + jsonStr.size()), Return(jsonStr.size()))); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Load(path, - &loadLogicPoolID, - &loadCopysetID, - &loadEpoch)); + ASSERT_EQ(-1, confEpochFile.Load(path, &loadLogicPoolID, &loadCopysetID, + &loadEpoch)); } // save: open failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); - LogicPoolID loadLogicPoolID; - CopysetID loadCopysetID; - uint64_t loadEpoch; + LogicPoolID loadLogicPoolID = 0; + CopysetID loadCopysetID = 0; + uint64_t loadEpoch = 0; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(-1)); - ASSERT_EQ(-1, confEpochFile.Save(path, - loadLogicPoolID, - loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Save(path, loadLogicPoolID, loadCopysetID, loadEpoch)); } // save: open success, write failed { - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); - LogicPoolID loadLogicPoolID; - CopysetID loadCopysetID; - uint64_t loadEpoch; + LogicPoolID loadLogicPoolID = 0; + CopysetID loadCopysetID = 0; + uint64_t loadEpoch = 0; EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(-1)); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); - ASSERT_EQ(-1, confEpochFile.Save(path, - loadLogicPoolID, - loadCopysetID, + ASSERT_EQ(-1, confEpochFile.Save(path, loadLogicPoolID, loadCopysetID, loadEpoch)); } // save: open success, write success, fsync failed { - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":599727352}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":" + "0,\"checksum\":599727352}"; // NOLINT std::string jsonStr(json); - std::shared_ptr fs - = std::make_shared(); + std::shared_ptr fs = + std::make_shared(); ConfEpochFile confEpochFile(fs); EXPECT_CALL(*fs, Open(_, _)).Times(1).WillOnce(Return(10)); - EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)).Times(1) + EXPECT_CALL(*fs, Write(_, Matcher(_), _, _)) + .Times(1) .WillOnce(Return(jsonStr.size())); EXPECT_CALL(*fs, Close(_)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*fs, Fsync(_)).Times(1).WillOnce(Return(-1)); - ASSERT_EQ(-1, confEpochFile.Save(path, - logicPoolID, - copysetID, - epoch)); + ASSERT_EQ(-1, confEpochFile.Save(path, logicPoolID, copysetID, epoch)); } } diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index eee7a3fa02..4b7e221976 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -160,7 +160,6 @@ class CopysetNodeTest : public ::testing::Test { TEST_F(CopysetNodeTest, error_test) { std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -173,7 +172,7 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -235,7 +234,7 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -270,7 +269,7 @@ TEST_F(CopysetNodeTest, error_test) { files.push_back("test-1.txt"); files.push_back("test-2.txt"); - char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT + const char *json = "{\"logicPoolId\":123,\"copysetId\":1345,\"epoch\":0,\"checksum\":774340440}"; // NOLINT std::string jsonStr(json); CopysetNode copysetNode(logicPoolID, copysetID, conf); @@ -590,7 +589,6 @@ TEST_F(CopysetNodeTest, error_test) { TEST_F(CopysetNodeTest, get_conf_change) { std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; @@ -794,7 +792,6 @@ TEST_F(CopysetNodeTest, get_conf_change) { TEST_F(CopysetNodeTest, get_hash) { std::shared_ptr fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); //NOLINT - const uint32_t kMaxChunkSize = 16 * 1024 * 1024; std::string rmCmd("rm -f "); rmCmd += kCurveConfEpochFilename; diff --git a/test/chunkserver/datastore/datastore_mock_unittest.cpp b/test/chunkserver/datastore/datastore_mock_unittest.cpp index 9d5e8444c6..f1668aa7a6 100644 --- a/test/chunkserver/datastore/datastore_mock_unittest.cpp +++ b/test/chunkserver/datastore/datastore_mock_unittest.cpp @@ -669,8 +669,8 @@ TEST_F(CSDataStore_test, WriteChunkTest1) { SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); @@ -733,6 +733,7 @@ TEST_F(CSDataStore_test, WriteChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -751,8 +752,8 @@ TEST_F(CSDataStore_test, WriteChunkTest2) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // snchunk.correctedsn EXPECT_EQ(CSErrorCode::BackwardRequestError, @@ -789,6 +790,7 @@ TEST_F(CSDataStore_test, WriteChunkTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -812,8 +814,8 @@ TEST_F(CSDataStore_test, WriteChunkTest3) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // sn>chunk.sn sn(NotNull()), 0, PAGE_SIZE)) .Times(1); @@ -982,6 +986,7 @@ TEST_F(CSDataStore_test, WriteChunkTest6) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1000,8 +1005,8 @@ TEST_F(CSDataStore_test, WriteChunkTest7) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -1079,6 +1084,7 @@ TEST_F(CSDataStore_test, WriteChunkTest7) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1096,8 +1102,8 @@ TEST_F(CSDataStore_test, WriteChunkTest9) { SequenceNum sn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will not create snapshot // will copy on write EXPECT_CALL(*lfs_, Read(1, NotNull(), PAGE_SIZE + offset, length)) @@ -1131,6 +1137,7 @@ TEST_F(CSDataStore_test, WriteChunkTest9) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1155,8 +1162,8 @@ TEST_F(CSDataStore_test, WriteChunkTest10) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will update metapage EXPECT_CALL(*lfs_, Write(1, Matcher(NotNull()), 0, PAGE_SIZE)) .Times(1); @@ -1184,6 +1191,7 @@ TEST_F(CSDataStore_test, WriteChunkTest10) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1208,8 +1216,8 @@ TEST_F(CSDataStore_test, WriteChunkTest11) { SequenceNum sn = 4; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // sn>chunk.sn, sn>chunk.correctedsn EXPECT_EQ(CSErrorCode::SnapshotConflictError, @@ -1231,6 +1239,7 @@ TEST_F(CSDataStore_test, WriteChunkTest11) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1255,8 +1264,8 @@ TEST_F(CSDataStore_test, WriteChunkTest13) { SequenceNum correctedSn = 0; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -1417,6 +1426,7 @@ TEST_F(CSDataStore_test, WriteChunkTest13) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1441,8 +1451,8 @@ TEST_F(CSDataStore_test, WriteChunkTest14) { SequenceNum correctedSn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -1612,6 +1622,7 @@ TEST_F(CSDataStore_test, WriteChunkTest14) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1646,8 +1657,8 @@ TEST_F(CSDataStore_test, WriteChunkTest15) { SequenceNum sn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will not create snapshot // will not copy on write EXPECT_CALL(*lfs_, Write(2, Matcher(NotNull()), _, _)) @@ -1671,6 +1682,7 @@ TEST_F(CSDataStore_test, WriteChunkTest15) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1705,8 +1717,8 @@ TEST_F(CSDataStore_test, WriteChunkTest16) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will not create snapshot // will not copy on write EXPECT_CALL(*lfs_, Write(2, Matcher(NotNull()), _, _)) @@ -1733,6 +1745,7 @@ TEST_F(CSDataStore_test, WriteChunkTest16) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1749,8 +1762,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest1) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -1817,6 +1830,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -1834,8 +1848,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest2) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -1879,6 +1893,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -1896,8 +1911,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest3) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -2011,6 +2026,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest3) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -2028,8 +2044,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest4) { SequenceNum sn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // will Open snapshot file, snap sn equals 2 string snapPath = string(baseDir) + "/" + FileNameOperator::GenerateSnapshotName(id, 2); @@ -2093,6 +2109,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest4) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -2109,8 +2126,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest5) { SequenceNum sn = 1; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // create new chunk and open it string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); @@ -2211,6 +2228,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest5) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /* @@ -2233,8 +2251,8 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest6) { SequenceNum correctedSn = 0; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -2333,6 +2351,7 @@ TEST_F(CSDataStore_test, WriteChunkErrorTest6) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /** @@ -2349,8 +2368,8 @@ TEST_F(CSDataStore_test, ReadChunkTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadChunk(id, @@ -2365,6 +2384,7 @@ TEST_F(CSDataStore_test, ReadChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2381,8 +2401,8 @@ TEST_F(CSDataStore_test, ReadChunkTest2) { SequenceNum sn = 2; off_t offset = CHUNK_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test read out of range EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->ReadChunk(id, @@ -2415,6 +2435,7 @@ TEST_F(CSDataStore_test, ReadChunkTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2431,8 +2452,8 @@ TEST_F(CSDataStore_test, ReadChunkTest3) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test chunk exists EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .Times(1); @@ -2449,6 +2470,7 @@ TEST_F(CSDataStore_test, ReadChunkTest3) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2502,8 +2524,8 @@ TEST_F(CSDataStore_test, ReadChunkTest4) { // case1: 读取未写过区域 off_t offset = 1 * PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[2 * length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[2 * length]; + memset(buf, 0, 2 * length); EXPECT_CALL(*lfs_, Read(_, _, _, _)) .Times(0); EXPECT_EQ(CSErrorCode::PageNerverWrittenError, @@ -2561,8 +2583,8 @@ TEST_F(CSDataStore_test, ReadChunkErrorTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test read chunk failed EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .WillOnce(Return(-UT_ERRNO)); @@ -2579,6 +2601,7 @@ TEST_F(CSDataStore_test, ReadChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2595,8 +2618,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test chunk not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadSnapshotChunk(id, @@ -2611,6 +2634,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2627,8 +2651,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest2) { SequenceNum sn = 2; off_t offset = CHUNK_SIZE; size_t length = 2 * PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test out of range EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->ReadSnapshotChunk(id, @@ -2672,6 +2696,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2688,8 +2713,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest3) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE * 2; - char writeBuf[length]; // NOLINT - memset(writeBuf, 0, sizeof(writeBuf)); + char *writeBuf = new char[length]; + memset(writeBuf, 0, length); // data in [PAGE_SIZE, 2*PAGE_SIZE) will be cow EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .Times(1); @@ -2715,8 +2740,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest3) { sn = 1; offset = CHUNK_SIZE; length = PAGE_SIZE * 4; - char readBuf[length]; // NOLINT - memset(readBuf, 0, sizeof(readBuf)); + char *readBuf = new char[length]; + memset(readBuf, 0, length); EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->ReadSnapshotChunk(id, sn, @@ -2746,6 +2771,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest3) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] writeBuf; + delete[] readBuf; } /** @@ -2762,8 +2789,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest4) { SequenceNum sn = 3; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test sn not exists EXPECT_EQ(CSErrorCode::ChunkNotExistError, dataStore->ReadSnapshotChunk(id, @@ -2778,6 +2805,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkTest4) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -2794,8 +2822,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest1) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = PAGE_SIZE * 2; - char writeBuf[length]; // NOLINT - memset(writeBuf, 0, sizeof(writeBuf)); + char *writeBuf = new char[length]; + memset(writeBuf, 0, length); // data in [PAGE_SIZE, 2*PAGE_SIZE) will be cow EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) .Times(1); @@ -2821,8 +2849,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest1) { sn = 1; offset = 0; length = PAGE_SIZE * 4; - char readBuf[length]; // NOLINT - memset(readBuf, 0, sizeof(readBuf)); + char *readBuf = new char[length]; + memset(readBuf, 0, length); // read chunk failed EXPECT_CALL(*lfs_, Read(1, NotNull(), PAGE_SIZE, PAGE_SIZE)) .WillOnce(Return(-UT_ERRNO)); @@ -2853,6 +2881,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] writeBuf; + delete[] readBuf; } /** @@ -2869,8 +2899,8 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest2) { SequenceNum sn = 2; off_t offset = PAGE_SIZE; size_t length = 2 * PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // test in range offset = PAGE_SIZE; EXPECT_CALL(*lfs_, Read(1, NotNull(), offset + PAGE_SIZE, length)) @@ -2888,6 +2918,7 @@ TEST_F(CSDataStore_test, ReadSnapshotChunkErrorTest2) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } /** @@ -3040,7 +3071,6 @@ TEST_F(CSDataStore_test, DeleteChunkTest4) { EXPECT_TRUE(dataStore->Initialize()); ChunkID id = 2; - SequenceNum sn = 2; // case1 { @@ -3811,8 +3841,8 @@ TEST_F(CSDataStore_test, PasteChunkTest1) { SequenceNum correctedSn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4013,6 +4043,7 @@ TEST_F(CSDataStore_test, PasteChunkTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /* @@ -4032,8 +4063,8 @@ TEST_F(CSDataStore_test, PasteChunkErrorTest1) { SequenceNum correctedSn = 2; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4118,6 +4149,7 @@ TEST_F(CSDataStore_test, PasteChunkErrorTest1) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } /* @@ -4156,8 +4188,6 @@ TEST_F(CSDataStore_test, GetHashErrorTest2) { ChunkID id = 1; std::string hash; - off_t offset = 0; - size_t length = PAGE_SIZE + CHUNK_SIZE; // test read chunk failed EXPECT_CALL(*lfs_, Read(1, NotNull(), 0, 4096)) .WillOnce(Return(-UT_ERRNO)); @@ -4203,10 +4233,9 @@ TEST_F(CSDataStore_test, CloneChunkUnAlignedTest) { ChunkID id = 3; SequenceNum sn = 2; SequenceNum correctedSn = 3; - off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4370,6 +4399,7 @@ TEST_F(CSDataStore_test, CloneChunkUnAlignedTest) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } TEST_F(CSDataStore_test, CloneChunkAlignedTest) { @@ -4382,8 +4412,8 @@ TEST_F(CSDataStore_test, CloneChunkAlignedTest) { SequenceNum correctedSn = 3; off_t offset = 0; size_t length = PAGE_SIZE; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); CSChunkInfo info; // 创建 clone chunk { @@ -4468,6 +4498,7 @@ TEST_F(CSDataStore_test, CloneChunkAlignedTest) { .Times(1); EXPECT_CALL(*lfs_, Close(4)) .Times(1); + delete[] buf; } TEST_F(CSDataStore_test, NormalChunkAlignmentTest) { @@ -4477,10 +4508,9 @@ TEST_F(CSDataStore_test, NormalChunkAlignmentTest) { ChunkID id = 2; SequenceNum sn = 2; - off_t offset = 0; size_t length = 512; - char buf[length]; // NOLINT - memset(buf, 0, sizeof(buf)); + char *buf = new char[length]; + memset(buf, 0, length); // write unaligned test { @@ -4546,6 +4576,7 @@ TEST_F(CSDataStore_test, NormalChunkAlignmentTest) { .Times(1); EXPECT_CALL(*lfs_, Close(3)) .Times(1); + delete[] buf; } } // namespace chunkserver diff --git a/test/chunkserver/heartbeat_test_common.cpp b/test/chunkserver/heartbeat_test_common.cpp index 146d354ac9..20d6b444f8 100644 --- a/test/chunkserver/heartbeat_test_common.cpp +++ b/test/chunkserver/heartbeat_test_common.cpp @@ -26,7 +26,7 @@ uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT uint32_t chunk_size = 16 * 1024 * 1024; // NOLINT -static char* confPath[3] = { +static const char* confPath[3] = { "./8200/chunkserver.conf", "./8201/chunkserver.conf", "./8202/chunkserver.conf", @@ -74,10 +74,6 @@ void HeartbeatTestCommon::CleanPeer( std::string peersStr = info.peers(0).address(); - if (info.has_configchangeinfo()) { - const ConfigChangeInfo& cxInfo = info.configchangeinfo(); - } - { // answer with cleaning peer response CopySetConf* conf = resp->add_needupdatecopysets(); diff --git a/test/chunkserver/heartbeat_test_main.cpp b/test/chunkserver/heartbeat_test_main.cpp index d25cc3c518..9e9f848dfa 100644 --- a/test/chunkserver/heartbeat_test_main.cpp +++ b/test/chunkserver/heartbeat_test_main.cpp @@ -31,7 +31,7 @@ #include "test/chunkserver/heartbeat_test_common.h" #include "test/integration/common/config_generator.h" -static char *param[3][15] = { +static const char *param[3][15] = { { "heartbeat_test", "-chunkServerIp=127.0.0.1", @@ -135,7 +135,7 @@ int main(int argc, char *argv[]) { * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 */ return RunChunkServer(i, sizeof(param[i]) / sizeof(char *), - param[i]); + const_cast(param[i])); } } @@ -172,8 +172,8 @@ int main(int argc, char *argv[]) { /* * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 */ - ret = - RunChunkServer(1, sizeof(param[1]) / sizeof(char *), param[1]); + ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char *), + const_cast(param[1])); return ret; } sleep(2); diff --git a/test/chunkserver/multiple_copysets_io_test.cpp b/test/chunkserver/multiple_copysets_io_test.cpp index d8163159c3..c19d4ed505 100644 --- a/test/chunkserver/multiple_copysets_io_test.cpp +++ b/test/chunkserver/multiple_copysets_io_test.cpp @@ -145,7 +145,7 @@ int64_t chunk_size = 0; int64_t nr_chunks = 0; int64_t nr_copysets = 0; int64_t chunks_per_copyset = 0; -ThreadInfo thread_infos[8] = {0}; +ThreadInfo thread_infos[8] = {}; LogicPoolID poolId = 10000; CopysetID copysetIdBase = 100; @@ -791,7 +791,7 @@ int main(int argc, char *argv[]) { clock_gettime(CLOCK_REALTIME, &t1); - ThreadInfo total_info = {0}; + ThreadInfo total_info = {}; total_info.io_time = time_diff(t0, t1); total_info.iodepth = FLAGS_iodepth; threads_stats(FLAGS_thread_num, &total_info); diff --git a/test/chunkserver/op_request_test.cpp b/test/chunkserver/op_request_test.cpp index 6fecb3535c..ac16c16466 100644 --- a/test/chunkserver/op_request_test.cpp +++ b/test/chunkserver/op_request_test.cpp @@ -735,11 +735,7 @@ TEST(ChunkOpRequestTest, OnApplyErrorTest) { TEST(ChunkOpRequestTest, OnApplyFromLogTest) { LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; - uint64_t chunkId = 12345; - size_t offset = 0; - uint32_t size = 16; uint64_t sn = 1; - uint64_t appliedIndex = 12; uint32_t followScanRpcTimeoutMs = 1000; Configuration conf; diff --git a/test/chunkserver/raftlog/test_curve_segment.cpp b/test/chunkserver/raftlog/test_curve_segment.cpp index da2f369dee..16e8999845 100644 --- a/test/chunkserver/raftlog/test_curve_segment.cpp +++ b/test/chunkserver/raftlog/test_curve_segment.cpp @@ -135,7 +135,7 @@ TEST_F(CurveSegmentTest, open_segment) { // create and open std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); ASSERT_EQ(0, seg1->create()); ASSERT_TRUE(seg1->is_open()); @@ -191,7 +191,7 @@ TEST_F(CurveSegmentTest, closed_segment) { // create and open std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); ASSERT_EQ(0, seg1->create()); ASSERT_TRUE(seg1->is_open()); diff --git a/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp b/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp index 253b124ac3..dfc9c7ab9e 100644 --- a/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp +++ b/test/chunkserver/raftlog/test_curve_segment_log_storage.cpp @@ -155,13 +155,13 @@ TEST_F(CurveSegmentLogStorageTest, basic_test) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 1000, 5); @@ -200,7 +200,7 @@ TEST_F(CurveSegmentLogStorageTest, basic_test) { // append path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145L); ASSERT_EQ(0, prepare_segment(path)); for (int i = 5001; i <= 7000; i++) { int64_t index = i; @@ -273,10 +273,10 @@ TEST_F(CurveSegmentLogStorageTest, append_close_load_append) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 600, 5); ASSERT_EQ(countWalSegmentFile(), storage->GetStatus().walSegmentFileCount); @@ -293,7 +293,7 @@ TEST_F(CurveSegmentLogStorageTest, append_close_load_append) { // append entry path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097L); ASSERT_EQ(0, prepare_segment(path)); braft::IOMetric metric; for (int i = 600; i < 1000; i++) { @@ -349,7 +349,7 @@ TEST_F(CurveSegmentLogStorageTest, data_lost) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 100, 5); ASSERT_EQ(countWalSegmentFile(), storage->GetStatus().walSegmentFileCount); @@ -393,7 +393,7 @@ TEST_F(CurveSegmentLogStorageTest, compatibility) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 3001); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 3001L); ASSERT_EQ(0, prepare_segment(path)); braft::IOMetric metric; for (int i = 600; i < 1000; i++) { @@ -451,13 +451,13 @@ TEST_F(CurveSegmentLogStorageTest, basic_test_without_direct) { // append entry std::string path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 1L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 2049L); ASSERT_EQ(0, prepare_segment(path)); path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 4097L); ASSERT_EQ(0, prepare_segment(path)); append_entries(storage, 1000, 5); @@ -496,7 +496,7 @@ TEST_F(CurveSegmentLogStorageTest, basic_test_without_direct) { // append path = kRaftLogDataDir; - butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145); + butil::string_appendf(&path, "/" CURVE_SEGMENT_OPEN_PATTERN, 6145L); ASSERT_EQ(0, prepare_segment(path)); for (int i = 5001; i <= 7000; i++) { int64_t index = i; diff --git a/test/chunkserver/server.cpp b/test/chunkserver/server.cpp index 94c40d6368..88de6da291 100644 --- a/test/chunkserver/server.cpp +++ b/test/chunkserver/server.cpp @@ -35,32 +35,30 @@ #include "src/common/uri_parser.h" #include "src/chunkserver/raftsnapshot/curve_snapshot_storage.h" -using curve::chunkserver::CopysetNodeOptions; +using curve::chunkserver::ConcurrentApplyModule; using curve::chunkserver::Configuration; +using curve::chunkserver::CopysetID; using curve::chunkserver::CopysetNodeManager; -using curve::chunkserver::concurrent::ConcurrentApplyModule; -using curve::chunkserver::concurrent::ConcurrentApplyOption; +using curve::chunkserver::CopysetNodeOptions; using curve::chunkserver::FilePool; +using curve::chunkserver::FilePoolHelper; using curve::chunkserver::FilePoolOptions; -using curve::chunkserver::ConcurrentApplyModule; -using curve::common::UriParser; using curve::chunkserver::LogicPoolID; -using curve::chunkserver::CopysetID; -using curve::common::Peer; using curve::chunkserver::PeerId; +using curve::chunkserver::concurrent::ConcurrentApplyModule; +using curve::chunkserver::concurrent::ConcurrentApplyOption; +using curve::common::Peer; +using curve::common::UriParser; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::chunkserver::FilePoolHelper; -DEFINE_string(ip, - "127.0.0.1", +DEFINE_string(ip, "127.0.0.1", "Initial configuration of the replication group"); DEFINE_int32(port, 8200, "Listen port of this peer"); DEFINE_string(copyset_dir, "local://./runlog/chunkserver_test0", - "copyset data dir"); -DEFINE_string(conf, - "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", + "copyset data dir"); +DEFINE_string(conf, "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0", "Initial configuration of the replication group"); DEFINE_int32(election_timeout_ms, 1000, "election timeout"); DEFINE_int32(snapshot_interval_s, 5, "snapshot interval"); @@ -72,8 +70,7 @@ DEFINE_bool(create_chunkfilepool, true, "create chunkfile pool"); butil::AtExitManager atExitManager; -void CreateChunkFilePool(const std::string& dirname, - uint64_t chunksize, +void CreateChunkFilePool(const std::string &dirname, uint64_t chunksize, std::shared_ptr fsptr) { std::string datadir = dirname + "/chunkfilepool"; std::string metapath = dirname + "/chunkfilepool.meta"; @@ -83,9 +80,8 @@ void CreateChunkFilePool(const std::string& dirname, memset(data, 0, 8192); fsptr->Mkdir(datadir); while (count <= 20) { - std::string filename = dirname + - "/chunkfilepool/" + - std::to_string(count); + std::string filename = + dirname + "/chunkfilepool/" + std::to_string(count); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); if (fd < 0) { LOG(ERROR) << "Create file failed!"; @@ -93,8 +89,8 @@ void CreateChunkFilePool(const std::string& dirname, } else { LOG(INFO) << filename.c_str() << " created!"; } - for (int i = 0; i <= chunksize/4096; i++) { - fsptr->Write(fd, data, i*4096, 4096); + for (int i = 0; i <= chunksize / 4096; i++) { + fsptr->Write(fd, data, i * 4096, 4096); } fsptr->Close(fd); count++; @@ -109,12 +105,8 @@ void CreateChunkFilePool(const std::string& dirname, memcpy(cpopt.filePoolDir, datadir.c_str(), datadir.size()); memcpy(cpopt.metaPath, metapath.c_str(), metapath.size()); - int ret = FilePoolHelper::PersistEnCodeMetaInfo( - fsptr, - chunksize, - 4096, - datadir, - metapath); + (void)FilePoolHelper::PersistEnCodeMetaInfo(fsptr, chunksize, 4096, datadir, + metapath); } int main(int argc, char *argv[]) { @@ -131,13 +123,13 @@ int main(int argc, char *argv[]) { curve::chunkserver::CurveSnapshotStorage::set_server_addr(addr); if (server.Start(FLAGS_port, NULL) != 0) { - LOG(ERROR) << "Fail to start Server: " - << errno << ", " << strerror(errno); + LOG(ERROR) << "Fail to start Server: " << errno << ", " + << strerror(errno); return -1; } - std::shared_ptr - fs(LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); + std::shared_ptr fs( + LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); const uint32_t kMaxChunkSize = 16 * 1024 * 1024; // TODO(yyk) 这部分实现不太优雅,后续进行重构 std::string copysetUri = FLAGS_copyset_dir + "/copysets"; @@ -161,8 +153,8 @@ int main(int argc, char *argv[]) { copysetNodeOptions.localFileSystem = fs; std::string chunkDataDir; - std::string - protocol = UriParser::ParseUri(FLAGS_copyset_dir, &chunkDataDir); + std::string protocol = + UriParser::ParseUri(FLAGS_copyset_dir, &chunkDataDir); if (protocol.empty()) { LOG(FATAL) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << chunkDataDir; @@ -197,7 +189,7 @@ int main(int argc, char *argv[]) { ConcurrentApplyOption opt{2, 1, 2, 1}; LOG_IF(FATAL, false == copysetNodeOptions.concurrentapply->Init(opt)) - << "Failed to init concurrent apply module"; + << "Failed to init concurrent apply module"; curve::chunkserver::Configuration conf; if (conf.parse_from(FLAGS_conf) != 0) { @@ -215,16 +207,15 @@ int main(int argc, char *argv[]) { CopysetNodeManager::GetInstance().Init(copysetNodeOptions); CopysetNodeManager::GetInstance().Run(); - CopysetNodeManager::GetInstance().CreateCopysetNode(FLAGS_logic_pool_id, - FLAGS_copyset_id, - peers); + CopysetNodeManager::GetInstance().CreateCopysetNode( + FLAGS_logic_pool_id, FLAGS_copyset_id, peers); /* Wait until 'CTRL-C' is pressed. then Stop() and Join() the service */ server.RunUntilAskedToQuit(); LOG(INFO) << "server test service is going to quit"; - CopysetNodeManager::GetInstance().DeleteCopysetNode( - FLAGS_logic_pool_id, FLAGS_copyset_id); + CopysetNodeManager::GetInstance().DeleteCopysetNode(FLAGS_logic_pool_id, + FLAGS_copyset_id); return 0; } diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3c7975d571..3ddf32f27e 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - //失败的情况下不应删除 + // 失败的情况下不应删除 EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); diff --git a/test/client/client_mdsclient_metacache_unittest.cpp b/test/client/client_mdsclient_metacache_unittest.cpp index 4296630965..7a7b5fcee6 100644 --- a/test/client/client_mdsclient_metacache_unittest.cpp +++ b/test/client/client_mdsclient_metacache_unittest.cpp @@ -55,10 +55,11 @@ uint32_t chunk_size = 4 * 1024 * 1024; uint32_t segment_size = 1 * 1024 * 1024 * 1024; -std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT -std::string configpath = "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:29104"; // NOLINT +std::string configpath = // NOLINT + "./test/client/configs/client_mdsclient_metacache.conf"; // NOLINT -extern curve::client::FileClient* globalclient; +extern curve::client::FileClient *globalclient; namespace curve { namespace client { @@ -190,7 +191,6 @@ TEST_F(MDSClientTest, Createfile) { TEST_F(MDSClientTest, MkDir) { std::string dirpath = "/1"; - size_t len = 4 * 1024 * 1024; // set response file exist ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileExists); @@ -244,7 +244,6 @@ TEST_F(MDSClientTest, MkDir) { TEST_F(MDSClientTest, Closefile) { std::string filename = "/1_userinfo_"; - size_t len = 4 * 1024 * 1024; // file not exist ::curve::mds::CloseFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kFileNotExists); @@ -289,7 +288,6 @@ TEST_F(MDSClientTest, Closefile) { TEST_F(MDSClientTest, Openfile) { std::string filename = "/1_userinfo_"; - size_t len = 4 * 1024 * 1024; /** * set openfile response */ @@ -635,7 +633,6 @@ TEST_F(MDSClientTest, Extendfile) { TEST_F(MDSClientTest, Deletefile) { LOG(INFO) << "Deletefile======================================="; std::string filename1 = "/1_userinfo_"; - uint64_t newsize = 10 * 1024 * 1024 * 1024ul; // set response file exist ::curve::mds::DeleteFileResponse response; @@ -723,7 +720,6 @@ TEST_F(MDSClientTest, Deletefile) { TEST_F(MDSClientTest, Rmdir) { std::string filename1 = "/1/"; - uint64_t newsize = 10 * 1024 * 1024 * 1024ul; // set response dir not exist ::curve::mds::DeleteFileResponse response; @@ -895,9 +891,9 @@ TEST_F(MDSClientTest, GetFileInfo) { curvefsservice.SetGetFileInfoFakeReturn(fakeret2); curvefsservice.CleanRetryTimes(); - ASSERT_EQ(LIBCURVE_ERROR::FAILED, - mdsclient_.GetFileInfo(filename.c_str(), userinfo, - finfo, &fEpoch)); + ASSERT_EQ( + LIBCURVE_ERROR::FAILED, + mdsclient_.GetFileInfo(filename.c_str(), userinfo, finfo, &fEpoch)); delete fakeret; delete fakeret2; @@ -912,16 +908,17 @@ TEST_F(MDSClientTest, GetOrAllocateSegment) { fi.chunksize = 4 * 1024 * 1024; fi.segmentsize = 1 * 1024 * 1024 * 1024ul; - std::chrono::system_clock::time_point start, end; - auto startTimer = [&start]() { start = std::chrono::system_clock::now(); }; - auto endTimer = [&end]() { end = std::chrono::system_clock::now(); }; - auto checkTimer = [&start, &end](uint64_t min, uint64_t max) { - auto elpased = - std::chrono::duration_cast(end - start) - .count(); - ASSERT_GE(elpased, min); - ASSERT_LE(elpased, max); - }; + // std::chrono::system_clock::time_point start, end; + // auto startTimer = [&start]() { start = std::chrono::system_clock::now(); + // }; auto endTimer = [&end]() { end = std::chrono::system_clock::now(); }; + // auto checkTimer = [&start, &end](uint64_t min, uint64_t max) { + // auto elpased = + // std::chrono::duration_cast(end - + // start) + // .count(); + // ASSERT_GE(elpased, min); + // ASSERT_LE(elpased, max); + // }; // TEST CASE: GetOrAllocateSegment failed, block until response ok // curve::mds::GetOrAllocateSegmentResponse resp; @@ -1102,8 +1099,8 @@ TEST_F(MDSClientTest, GetServerList) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation* cslocs; - ::curve::mds::topology::CopySetServerInfo* csinfo; + ::curve::common::ChunkServerLocation *cslocs; + ::curve::mds::topology::CopySetServerInfo *csinfo; for (int j = 0; j < 256; j++) { csinfo = response_1.add_csinfo(); csinfo->set_copysetid(j); @@ -1264,8 +1261,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { response_1.set_statuscode(0); uint32_t chunkserveridc = 1; - ::curve::common::ChunkServerLocation* cslocs; - ::curve::mds::topology::CopySetServerInfo* csinfo; + ::curve::common::ChunkServerLocation *cslocs; + ::curve::mds::topology::CopySetServerInfo *csinfo; csinfo = response_1.add_csinfo(); csinfo->set_copysetid(1234); for (int i = 0; i < 4; i++) { @@ -1707,7 +1704,6 @@ TEST_F(MDSClientTest, ListDir) { curvefsservice.SetListDir(fakeret); - int arrsize; std::vector filestatVec; int ret = globalclient->Listdir(filename1, userinfo, &filestatVec); ASSERT_EQ(ret, -1 * LIBCURVE_ERROR::NOTEXIST); @@ -1736,7 +1732,6 @@ TEST_F(MDSClientTest, ListDir) { curvefsservice.SetListDir(fakeret1); ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Listdir(filename1, userinfo, &filestatVec)); - int arraysize = 0; C_UserInfo_t cuserinfo; memcpy(cuserinfo.owner, "test", 5); FileStatInfo *filestat = new FileStatInfo[5]; @@ -1823,7 +1818,7 @@ TEST_F(MDSClientTest, ListDir) { TEST(LibcurveInterface, InvokeWithOutInit) { CurveAioContext aioctx; UserInfo_t userinfo; - C_UserInfo_t *ui; + C_UserInfo_t *ui = nullptr; FileClient fc; ASSERT_EQ(-LIBCURVE_ERROR::FAILED, fc.Create("", userinfo, 0)); @@ -1930,8 +1925,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { } } - GetLeaderResponse2 - MakeResponse(const curve::client::PeerAddr &addr) { + GetLeaderResponse2 MakeResponse(const curve::client::PeerAddr &addr) { GetLeaderResponse2 response; curve::common::Peer *peer = new curve::common::Peer(); peer->set_address(addr.ToString()); @@ -2003,8 +1997,7 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { // 测试第二次拉取新的leader,直接跳过第一个chunkserver,查找第2,3两个 int32_t currentLeaderIndex = 0; - curve::client::PeerAddr currentLeader = - internalAddrs[currentLeaderIndex]; + curve::client::PeerAddr currentLeader = internalAddrs[currentLeaderIndex]; response = MakeResponse(currentLeader); fakeret1 = FakeReturn(nullptr, static_cast(&response)); @@ -2369,8 +2362,8 @@ TEST_F(MDSClientRefreshSessionTest, NoStartDummyServerTest) { } // namespace client } // namespace curve -const std::vector clientConf { - std::string("mds.listen.addr=") + mdsMetaServerAddr, +const std::vector clientConf{ + std::string("mds.listen.addr=") + std::string(mdsMetaServerAddr), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), std::string("chunkserver.opMaxRetry=3"), @@ -2383,7 +2376,7 @@ const std::vector clientConf { std::string("throttle.enable=true"), }; -int main(int argc, char* argv[]) { +int main(int argc, char *argv[]) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); @@ -2393,8 +2386,8 @@ int main(int argc, char* argv[]) { std::unique_ptr cluster(new curve::CurveCluster()); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); return RUN_ALL_TESTS(); } diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 94e518bd51..e30175cbf6 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -159,8 +159,6 @@ TEST(ClientSession, LeaseTaskTest) { } } - auto iomanager = fileinstance.GetIOManager4File(); - curve::client::LeaseExecutor* lease = fileinstance.GetLeaseExecutor(); // 5. set refresh AuthFail diff --git a/test/client/fake/fakeMDS.cpp b/test/client/fake/fakeMDS.cpp index 8ec2ada4d9..65266ebb44 100644 --- a/test/client/fake/fakeMDS.cpp +++ b/test/client/fake/fakeMDS.cpp @@ -293,7 +293,7 @@ bool FakeMDS::StartService() { /** * set get snap allocate info */ - FakeReturn* snapfakeret = new FakeReturn(nullptr, static_cast(getallocateresponse)); // NOLINT + // FakeReturn* snapfakeret = new FakeReturn(nullptr, static_cast(getallocateresponse)); // NOLINT fakecurvefsservice_.SetGetSnapshotSegmentInfo(fakeret); /** @@ -328,7 +328,6 @@ bool FakeMDS::StartService() { /** * set list physical pool response */ - ListPhysicalPoolResponse* listphypoolresp = new ListPhysicalPoolResponse(); FakeReturn* fakeListPPRet = new FakeReturn(nullptr, response); faketopologyservice_.fakelistpoolret_ = fakeListPPRet; diff --git a/test/client/fake/fakeMDS.h b/test/client/fake/fakeMDS.h index a2c0d49ca2..e29f251c26 100644 --- a/test/client/fake/fakeMDS.h +++ b/test/client/fake/fakeMDS.h @@ -204,6 +204,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { LOG(INFO) << "request filename = " << request->filename(); ASSERT_EQ(request->filename()[0], '/'); }; + (void)checkFullpath; fiu_do_on("test/client/fake/fakeMDS.GetOrAllocateSegment", checkFullpath()); diff --git a/test/client/fake/mock_schedule.cpp b/test/client/fake/mock_schedule.cpp index 2495ec1bcf..b53a3b3444 100644 --- a/test/client/fake/mock_schedule.cpp +++ b/test/client/fake/mock_schedule.cpp @@ -43,9 +43,7 @@ int Schedule::ScheduleRequest( const std::vector& reqlist) { // LOG(INFO) << "ENTER MOCK ScheduleRequest"; char fakedate[10] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k'}; - curve::client::OpType type = curve::client::OpType::UNKNOWN; int processed = 0; - int totallength = 0; std::vector datavec; if (enableScheduleFailed) { @@ -71,9 +69,10 @@ int Schedule::ScheduleRequest( auto req = iter->done_->GetReqCtx(); if (iter->optype_ == curve::client::OpType::READ_SNAP) { - char buf[iter->rawlength_]; // NOLINT + char *buf = new char[iter->rawlength_]; memset(buf, fakedate[processed % 10], iter->rawlength_); iter->readData_.append(buf, iter->rawlength_); + delete[] buf; } if (iter->optype_ == curve::client::OpType::GET_CHUNK_INFO) { @@ -82,9 +81,10 @@ int Schedule::ScheduleRequest( } if (iter->optype_ == curve::client::OpType::READ) { - char buffer[iter->rawlength_]; // NOLINT + char *buffer = new char[iter->rawlength_]; memset(buffer, fakedate[processed % 10], iter->rawlength_); iter->readData_.append(buffer, iter->rawlength_); + delete[] buffer; // LOG(ERROR) << "request split" // << ", off = " << iter->offset_ @@ -96,7 +96,6 @@ int Schedule::ScheduleRequest( } if (iter->optype_ == curve::client::OpType::WRITE) { - type = curve::client::OpType::WRITE; writeData.append(iter->writeData_); } processed++; diff --git a/test/client/iotracker_splitor_unittest.cpp b/test/client/iotracker_splitor_unittest.cpp index 35a09b7f81..fd4df4eaed 100644 --- a/test/client/iotracker_splitor_unittest.cpp +++ b/test/client/iotracker_splitor_unittest.cpp @@ -339,7 +339,6 @@ TEST_F(IOTrackerSplitorTest, AsyncStartRead) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); CurveAioContext aioctx; @@ -374,7 +373,6 @@ TEST_F(IOTrackerSplitorTest, AsyncStartWrite) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); CurveAioContext aioctx; @@ -420,7 +418,6 @@ TEST_F(IOTrackerSplitorTest, StartRead) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); uint64_t offset = 4 * 1024 * 1024 - 4 * 1024; @@ -451,7 +448,6 @@ TEST_F(IOTrackerSplitorTest, StartWrite) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); uint64_t offset = 4 * 1024 * 1024 - 4 * 1024; @@ -518,7 +514,6 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWrite) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -656,7 +651,6 @@ TEST_F(IOTrackerSplitorTest, ManagerStartRead) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -687,7 +681,6 @@ TEST_F(IOTrackerSplitorTest, ManagerStartWrite) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -781,7 +774,6 @@ TEST_F(IOTrackerSplitorTest, BoundaryTEST) { MockRequestScheduler* mockschuler = new MockRequestScheduler; mockschuler->DelegateToFake(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); auto ioctxmana = fileinstance_->GetIOManager4File(); ioctxmana->SetRequestScheduler(mockschuler); @@ -1179,7 +1171,6 @@ TEST_F(IOTrackerSplitorTest, StartReadNotAllocateSegment) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); uint64_t offset = 1 * 1024 * 1024 * 1024 + 4 * 1024 * 1024 - 4 * 1024; @@ -1207,7 +1198,6 @@ TEST_F(IOTrackerSplitorTest, AsyncStartReadNotAllocateSegment) { mockschuler->DelegateToFake(); curve::client::IOManager4File* iomana = fileinstance_->GetIOManager4File(); - MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); iomana->SetRequestScheduler(mockschuler); CurveAioContext aioctx; diff --git a/test/client/libcbd_ext4_test.cpp b/test/client/libcbd_ext4_test.cpp index 704bbcb10a..a240a3fd77 100644 --- a/test/client/libcbd_ext4_test.cpp +++ b/test/client/libcbd_ext4_test.cpp @@ -42,7 +42,7 @@ TEST(TestLibcbdExt4, InitTest) { memset(&opt, 0, sizeof(opt)); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); @@ -67,7 +67,7 @@ TEST(TestLibcbdExt4, ReadWriteTest) { memset(&opt, 0, sizeof(opt)); memset(buf, 'a', BUFSIZE); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); @@ -122,7 +122,7 @@ TEST(TestLibcbdExt4, AioReadWriteTest) { memset(&opt, 0, sizeof(opt)); memset(buf, 'a', BUFSIZE); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); @@ -175,7 +175,7 @@ TEST(TestLibcbdExt4, IncreaseEpochTest) { memset(&opt, 0, sizeof(opt)); - opt.datahome = "."; + opt.datahome = const_cast("."); ret = cbd_lib_init(&opt); ASSERT_EQ(ret, 0); diff --git a/test/client/libcbd_libcurve_test.cpp b/test/client/libcbd_libcurve_test.cpp index 14e96e94e3..3f582b8a3c 100644 --- a/test/client/libcbd_libcurve_test.cpp +++ b/test/client/libcbd_libcurve_test.cpp @@ -133,7 +133,7 @@ TEST_F(TestLibcbdLibcurve, InitTest) { globalclientinited_ = false; memset(&opt, 0, sizeof(opt)); // testing with no conf specified - opt.conf = ""; + opt.conf = const_cast(""); ret = cbd_lib_init(&opt); ASSERT_NE(ret, 0); ret = cbd_lib_fini(); diff --git a/test/client/libcurve_interface_unittest.cpp b/test/client/libcurve_interface_unittest.cpp index 19edf9dfa4..c5df4982e9 100644 --- a/test/client/libcurve_interface_unittest.cpp +++ b/test/client/libcurve_interface_unittest.cpp @@ -113,7 +113,7 @@ TEST(TestLibcurveInterface, InterfaceTest) { ASSERT_EQ(GetClusterId(clusterId, 1), -LIBCURVE_ERROR::FAILED); // libcurve file operation - int temp = Create(filename.c_str(), &userinfo, FLAGS_test_disk_size); + (void)Create(filename.c_str(), &userinfo, FLAGS_test_disk_size); int fd = Open(filename.c_str(), &userinfo); @@ -895,7 +895,6 @@ TEST(TestLibcurveInterface, ResumeTimeoutBackoff) { ASSERT_NE(fd, -1); - CliServiceFake *cliservice = mds.GetCliService(); std::vector chunkservice = mds.GetFakeChunkService(); char *buffer = new char[8 * 1024]; diff --git a/test/client/mds_failover_test.cpp b/test/client/mds_failover_test.cpp index ebf8190e92..e95912f610 100644 --- a/test/client/mds_failover_test.cpp +++ b/test/client/mds_failover_test.cpp @@ -66,7 +66,6 @@ TEST(MDSChangeTest, MDSFailoverTest) { rpcexcutor.SetOption(metaopt.rpcRetryOpt); - int currentWorkMDSIndex = 1; int mds0RetryTimes = 0; int mds1RetryTimes = 0; int mds2RetryTimes = 0; diff --git a/test/client/metacache_test.cpp b/test/client/metacache_test.cpp index 725ac25f51..8f17b39d2b 100644 --- a/test/client/metacache_test.cpp +++ b/test/client/metacache_test.cpp @@ -91,7 +91,6 @@ TEST_F(MetaCacheTest, TestCleanChunksInSegment) { InsertMetaCache(fileLength, segmentSize, chunkSize); uint64_t totalChunks = fileLength / chunkSize; - uint64_t totalSegments = fileLength / segmentSize; uint64_t chunksInSegment = segmentSize / chunkSize; ASSERT_EQ(totalChunks, diff --git a/test/client/snapshot_service_unittest.cpp b/test/client/snapshot_service_unittest.cpp index b942b34604..56b1c9cd20 100644 --- a/test/client/snapshot_service_unittest.cpp +++ b/test/client/snapshot_service_unittest.cpp @@ -565,7 +565,6 @@ TEST(SnapInstance, DeleteChunkSnapshotTest) { SnapshotClient cl; ASSERT_TRUE(!cl.Init(opt)); - auto max_split_size_kb = 1024 * 64; MockRequestScheduler* mocksch = new MockRequestScheduler; mocksch->DelegateToFake(); diff --git a/test/common/count_down_event_test.cpp b/test/common/count_down_event_test.cpp index f31ebb6b9b..8bdc5c9681 100644 --- a/test/common/count_down_event_test.cpp +++ b/test/common/count_down_event_test.cpp @@ -85,7 +85,6 @@ TEST(CountDownEventTest, basic) { t1.join(); } { - int i = 0; CountDownEvent cond(0); cond.WaitFor(1000); } diff --git a/test/common/dlock_test.cpp b/test/common/dlock_test.cpp index f4354b4faa..cd62367a3a 100644 --- a/test/common/dlock_test.cpp +++ b/test/common/dlock_test.cpp @@ -46,7 +46,7 @@ class TestDLock : public ::testing::Test { system("rm -fr testDLock.etcd"); client_ = std::make_shared(); char endpoints[] = "127.0.0.1:2375"; - EtcdConf conf = { endpoints, strlen(endpoints), 1000 }; + EtcdConf conf = {endpoints, static_cast(strlen(endpoints)), 1000}; ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->Init(conf, 200, 3)); diff --git a/test/common/rw_lock_test.cpp b/test/common/rw_lock_test.cpp index 9801fcea45..ced1f06e48 100644 --- a/test/common/rw_lock_test.cpp +++ b/test/common/rw_lock_test.cpp @@ -80,6 +80,7 @@ TEST(RWLockTest, basic_test) { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); auto j = writeCnt + i; + (void)j; } }; { @@ -149,6 +150,7 @@ TEST(BthreadRWLockTest, basic_test) { for (uint64_t i = 0; i < 10000; ++i) { ReadLockGuard readLockGuard(rwlock); auto j = writeCnt + i; + (void)j; } }; { diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index 0ac05897b6..cb44a36b09 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -35,11 +35,13 @@ using curve::common::CountDownEvent; void TestAdd1(int a, double b, CountDownEvent *cond) { double c = a + b; + (void)c; cond->Signal(); } int TestAdd2(int a, double b, CountDownEvent *cond) { double c = a + b; + (void)c; cond->Signal(); return 0; } diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index 42952560b2..934cc0e0fc 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -49,7 +49,7 @@ const ChunkSizeType CHUNK_SIZE = 16 * kMB; const char* kFakeMdsAddr = "127.0.0.1:9079"; -static char *chunkServerParams[1][16] = { +static const char *chunkServerParams[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", @@ -103,7 +103,7 @@ class ChunkServerIoTest : public testing::Test { ASSERT_TRUE(cg1_.Generate()); paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; - params_.push_back(chunkServerParams[0]); + params_.push_back(const_cast(chunkServerParams[0])); // 初始化chunkfilepool,这里会预先分配一些chunk lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); @@ -157,9 +157,7 @@ class ChunkServerIoTest : public testing::Test { void TestBasicIO(std::shared_ptr verify) { uint64_t chunkId = 1; - off_t offset = 0; int length = kOpRequestAlignSize; - int ret = 0; const SequenceNum sn1 = 1; std::string data(length * 4, 0); // Now we will zeroing chunk file, even though it fill '0' in start @@ -214,9 +212,7 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn1 = 1; const SequenceNum sn2 = 2; const SequenceNum sn3 = 3; - off_t offset = 0; int length = kOpRequestAlignSize; - int ret = 0; std::string data(length * 4, 0); std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index e3a4808626..e5b7ef5238 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -778,7 +778,6 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { // 1. chunk文件不存在 ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; - SequenceNum sn0 = 0; SequenceNum sn1 = 1; SequenceNum sn2 = 2; string sourceFile = CURVEFS_FILENAME; @@ -1006,7 +1005,6 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { // 1. 创建克隆文件 ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; - ChunkID cloneChunk2 = 340; SequenceNum sn2 = 2; SequenceNum sn3 = 3; SequenceNum sn4 = 4; @@ -1074,7 +1072,6 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { // 1. 创建克隆文件 ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; - ChunkID cloneChunk2 = 342; SequenceNum sn2 = 2; SequenceNum sn3 = 3; SequenceNum sn4 = 4; diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index beb0dc9b87..a5c8806e73 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -45,7 +45,7 @@ using curve::common::Thread; const char* kFakeMdsAddr = "127.0.0.1:9329"; -static char *chunkConcurrencyParams1[1][16] = { +static const char *chunkConcurrencyParams1[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -66,7 +66,7 @@ static char *chunkConcurrencyParams1[1][16] = { }, }; -static char *chunkConcurrencyParams2[1][16] = { +static const char *chunkConcurrencyParams2[1][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -121,7 +121,7 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer1)] = 0; - params.push_back(chunkConcurrencyParams1[0]); + params.push_back(const_cast(chunkConcurrencyParams1[0])); } virtual void TearDown() { std::string rmdir1("rm -fr "); @@ -192,7 +192,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer1)] = 0; - params.push_back(chunkConcurrencyParams2[0]); + params.push_back(const_cast(chunkConcurrencyParams2[0])); // 初始化FilePool,这里会预先分配一些chunk lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); @@ -1401,7 +1401,6 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - const int sn = 1; // 1. 启动一个成员的复制组 PeerCluster cluster("InitShutdown-cluster", diff --git a/test/integration/chunkserver/datastore/datastore_restart_test.cpp b/test/integration/chunkserver/datastore/datastore_restart_test.cpp index 75f5b2f6fa..f7a9d9ae5a 100644 --- a/test/integration/chunkserver/datastore/datastore_restart_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_restart_test.cpp @@ -156,8 +156,8 @@ class ExecWrite : public ExecStep { } void Dump() override { - printf("WriteChunk, id = %llu, sn = %llu, offset = %llu, " - "size = %llu, data = %c.\n", + printf("WriteChunk, id = %lu, sn = %lu, offset = %lu, " + "size = %lu, data = %c.\n", id_, sn_, data_.offset, data_.length, data_.data); } @@ -182,8 +182,8 @@ class ExecPaste : public ExecStep { } void Dump() override { - printf("PasteChunk, id = %llu, offset = %llu, " - "size = %llu, data = %c.\n", + printf("PasteChunk, id = %lu, offset = %lu, " + "size = %lu, data = %c.\n", id_, data_.offset, data_.length, data_.data); } @@ -204,7 +204,7 @@ class ExecDelete : public ExecStep { } void Dump() override { - printf("DeleteChunk, id = %llu, sn = %llu.\n", id_, sn_); + printf("DeleteChunk, id = %lu, sn = %lu.\n", id_, sn_); } private: @@ -226,7 +226,7 @@ class ExecDeleteSnapshot : public ExecStep { void Dump() override { printf("DeleteSnapshotChunkOrCorrectSn, " - "id = %llu, correctedSn = %llu.\n", id_, correctedSn_); + "id = %lu, correctedSn = %lu.\n", id_, correctedSn_); } private: @@ -251,8 +251,8 @@ class ExecCreateClone : public ExecStep { } void Dump() override { - printf("CreateCloneChunk, id = %llu, sn = %llu, correctedSn = %llu, " - "chunk size = %llu, location = %s.\n", + printf("CreateCloneChunk, id = %lu, sn = %lu, correctedSn = %lu, " + "chunk size = %u, location = %s.\n", id_, sn_, correctedSn_, size_, location_.c_str()); } diff --git a/test/integration/chunkserver/datastore/datastore_stress_test.cpp b/test/integration/chunkserver/datastore/datastore_stress_test.cpp index 5f2af8086b..2364d61dd2 100644 --- a/test/integration/chunkserver/datastore/datastore_stress_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_stress_test.cpp @@ -64,8 +64,7 @@ TEST_F(StressTestSuit, StressTest) { auto RunStress = [&](int threadNum, int rwPercent, int ioNum) { uint64_t beginTime = TimeUtility::GetTimeofDayUs(); - const int kThreadNum = threadNum; - Thread threads[kThreadNum]; + Thread *threads = new Thread[threadNum]; int readThreadNum = threadNum * rwPercent / 100; int ioNumAvg = ioNum / threadNum; int idRange = 100; @@ -77,17 +76,18 @@ TEST_F(StressTestSuit, StressTest) { threads[i] = std::thread(RunWrite, idRange, ioNumAvg); } - for (auto& t : threads) { - t.join(); + for (int i = 0; i < threadNum; ++i) { + threads[i].join(); } uint64_t endTime = TimeUtility::GetTimeofDayUs(); uint64_t iops = ioNum * 1000000L / (endTime - beginTime); - printf("Total time used: %llu us\n", endTime - beginTime); + printf("Total time used: %lu us\n", endTime - beginTime); printf("Thread number: %d\n", threadNum); printf("read write percent: %d\n", rwPercent); printf("io num: %d\n", ioNum); - printf("iops: %llu\n", iops); + printf("iops: %lu\n", iops); + delete[] threads; }; printf("===============TEST WRITE==================\n"); diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index f542449fe9..246b5ff801 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -311,9 +311,9 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Create(filename.c_str(), &info, 10ull * 1024 * 1024 * 1024); int fd = ::Open(filename.c_str(), &info); - int ret = ::Read(fd, readBuff.get(), offset, length); + (void)::Read(fd, readBuff.get(), offset, length); LOG(INFO) << "Read finish, here"; - ret = ::Write(fd, readBuff.get(), offset, length); + (void)::Write(fd, readBuff.get(), offset, length); LOG(INFO) << "Write finish, here"; ::Close(fd); diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 1013711343..57bfb553a8 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -772,7 +772,6 @@ int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, addr.sin_port = htons(port); addr.sin_addr.s_addr = inet_addr(res[0].c_str()); - bool satisfy = false; uint64_t start = ::curve::common::TimeUtility::GetTimeofDayMs(); while (::curve::common::TimeUtility::GetTimeofDayMs() - start < timeoutMs) { int connectRes = diff --git a/test/integration/common/peer_cluster.cpp b/test/integration/common/peer_cluster.cpp index c448897b0b..f09db13283 100644 --- a/test/integration/common/peer_cluster.cpp +++ b/test/integration/common/peer_cluster.cpp @@ -52,8 +52,8 @@ PeerCluster::PeerCluster(const std::string &clusterName, clusterName_(clusterName), snapshotIntervalS_(1), electionTimeoutMs_(1000), - params_(params), paramsIndexs_(paramsIndexs), + params_(params), isFakeMdsStart_(false) { logicPoolID_ = logicPoolID; copysetID_ = copysetID; diff --git a/test/integration/raft/raft_config_change_test.cpp b/test/integration/raft/raft_config_change_test.cpp index a1704dba01..c585022469 100644 --- a/test/integration/raft/raft_config_change_test.cpp +++ b/test/integration/raft/raft_config_change_test.cpp @@ -44,7 +44,7 @@ const char kRaftConfigChangeTestLogDir[] = "./runlog/RaftConfigChange"; const char* kFakeMdsAddr = "127.0.0.1:9080"; const uint32_t kOpRequestAlignSize = 4096; -static char* raftConfigParam[5][16] = { +static const char* raftConfigParam[5][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -224,11 +224,11 @@ class RaftConfigChangeTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer4)] = 3; paramsIndexs[PeerCluster::PeerToId(peer5)] = 4; - params.push_back(raftConfigParam[0]); - params.push_back(raftConfigParam[1]); - params.push_back(raftConfigParam[2]); - params.push_back(raftConfigParam[3]); - params.push_back(raftConfigParam[4]); + params.push_back(const_cast(raftConfigParam[0])); + params.push_back(const_cast(raftConfigParam[1])); + params.push_back(const_cast(raftConfigParam[2])); + params.push_back(const_cast(raftConfigParam[3])); + params.push_back(const_cast(raftConfigParam[4])); } virtual void TearDown() { // wait for process exit @@ -1415,7 +1415,6 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; - const int kMaxLoop = 10; butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()); diff --git a/test/integration/raft/raft_log_replication_test.cpp b/test/integration/raft/raft_log_replication_test.cpp index a560b538bb..0a0b25cf54 100644 --- a/test/integration/raft/raft_log_replication_test.cpp +++ b/test/integration/raft/raft_log_replication_test.cpp @@ -46,7 +46,7 @@ const uint32_t kOpRequestAlignSize = 4096; const char kRaftLogRepTestLogDir[] = "./runlog/RaftLogRep"; const char* kFakeMdsAddr = "127.0.0.1:9070"; -static char* raftLogParam[5][16] = { +static const char* raftLogParam[5][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -224,11 +224,11 @@ class RaftLogReplicationTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer4)] = 3; paramsIndexs[PeerCluster::PeerToId(peer5)] = 4; - params.push_back(raftLogParam[0]); - params.push_back(raftLogParam[1]); - params.push_back(raftLogParam[2]); - params.push_back(raftLogParam[3]); - params.push_back(raftLogParam[4]); + params.push_back(const_cast(raftLogParam[0])); + params.push_back(const_cast(raftLogParam[1])); + params.push_back(const_cast(raftLogParam[2])); + params.push_back(const_cast(raftLogParam[3])); + params.push_back(const_cast(raftLogParam[4])); } virtual void TearDown() { std::string rmdir1("rm -fr "); diff --git a/test/integration/raft/raft_snapshot_test.cpp b/test/integration/raft/raft_snapshot_test.cpp index b77d53f954..661dad2862 100644 --- a/test/integration/raft/raft_snapshot_test.cpp +++ b/test/integration/raft/raft_snapshot_test.cpp @@ -45,7 +45,7 @@ const char* kFakeMdsAddr = "127.0.0.1:9320"; const uint32_t kOpRequestAlignSize = 4096; -static char *raftVoteParam[4][16] = { +static const char *raftVoteParam[4][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -190,10 +190,10 @@ class RaftSnapshotTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer3_)] = 2; paramsIndexs_[PeerCluster::PeerToId(peer4_)] = 3; - params_.push_back(raftVoteParam[0]); - params_.push_back(raftVoteParam[1]); - params_.push_back(raftVoteParam[2]); - params_.push_back(raftVoteParam[3]); + params_.push_back(const_cast(raftVoteParam[0])); + params_.push_back(const_cast(raftVoteParam[1])); + params_.push_back(const_cast(raftVoteParam[2])); + params_.push_back(const_cast(raftVoteParam[3])); // 配置默认raft client option defaultCliOpt_.max_retry = 3; diff --git a/test/integration/raft/raft_vote_test.cpp b/test/integration/raft/raft_vote_test.cpp index f83c7a2d1d..032e5e397c 100644 --- a/test/integration/raft/raft_vote_test.cpp +++ b/test/integration/raft/raft_vote_test.cpp @@ -45,7 +45,7 @@ const char* kFakeMdsAddr = "127.0.0.1:9089"; const uint32_t kOpRequestAlignSize = 4096; -static char* raftVoteParam[3][16] = { +static const char* raftVoteParam[3][16] = { { "chunkserver", "-chunkServerIp=127.0.0.1", @@ -159,9 +159,9 @@ class RaftVoteTest : public testing::Test { paramsIndexs[PeerCluster::PeerToId(peer2)] = 1; paramsIndexs[PeerCluster::PeerToId(peer3)] = 2; - params.push_back(raftVoteParam[0]); - params.push_back(raftVoteParam[1]); - params.push_back(raftVoteParam[2]); + params.push_back(const_cast(raftVoteParam[0])); + params.push_back(const_cast(raftVoteParam[1])); + params.push_back(const_cast(raftVoteParam[2])); } virtual void TearDown() { std::string rmdir1("rm -fr "); diff --git a/test/kvstorageclient/etcdclient_test.cpp b/test/kvstorageclient/etcdclient_test.cpp index 12df8de8a6..80f3336f3e 100644 --- a/test/kvstorageclient/etcdclient_test.cpp +++ b/test/kvstorageclient/etcdclient_test.cpp @@ -54,7 +54,7 @@ class TestEtcdClinetImp : public ::testing::Test { client_ = std::make_shared(); char endpoints[] = "127.0.0.1:2377"; - EtcdConf conf = { endpoints, strlen(endpoints), 1000 }; + EtcdConf conf = {endpoints, static_cast(strlen(endpoints)), 1000}; ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->Init(conf, 200, 3)); @@ -204,13 +204,15 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } // 5. rename file: rename file9 ~ file10, file10本来不存在 - Operation op1{ OpType::OpDelete, const_cast(keyMap[9].c_str()), - const_cast(fileInfo9.c_str()), keyMap[9].size(), - fileInfo9.size() }; - Operation op2{ OpType::OpPut, const_cast(fileKey10.c_str()), - const_cast(fileInfo10.c_str()), fileKey10.size(), - fileInfo10.size() }; - std::vector ops{ op1, op2 }; + Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), + const_cast(fileInfo9.c_str()), + static_cast(keyMap[9].size()), + static_cast(fileInfo9.size())}; + Operation op2{OpType::OpPut, const_cast(fileKey10.c_str()), + const_cast(fileInfo10.c_str()), + static_cast(fileKey10.size()), + static_cast(fileInfo10.size())}; + std::vector ops{op1, op2}; ASSERT_EQ(EtcdErrCode::EtcdOK, client_->TxnN(ops)); // cannot get file9 std::string out; @@ -222,12 +224,14 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName10, fileinfo.filename()); // 6. snapshot of keyMap[6] - Operation op3{ OpType::OpPut, const_cast(keyMap[6].c_str()), - const_cast(fileInfo6.c_str()), keyMap[6].size(), - fileInfo6.size() }; - Operation op4{ OpType::OpPut, const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), - snapshotKey6.size(), snapshotInfo6.size() }; + Operation op3{OpType::OpPut, const_cast(keyMap[6].c_str()), + const_cast(fileInfo6.c_str()), + static_cast(keyMap[6].size()), + static_cast(fileInfo6.size())}; + Operation op4{OpType::OpPut, const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), + static_cast(snapshotKey6.size()), + static_cast(snapshotInfo6.size())}; ops.clear(); ops.emplace_back(op3); ops.emplace_back(op4); @@ -256,8 +260,9 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ("200", out); // 8. rename file: rename file7 ~ file8 - Operation op8{ OpType::OpDelete, const_cast(keyMap[7].c_str()), "", - keyMap[7].size(), 0 }; + Operation op8{OpType::OpDelete, const_cast(keyMap[7].c_str()), + const_cast(""), static_cast(keyMap[7].size()), + 0}; FileInfo newFileInfo7; newFileInfo7.CopyFrom(fileInfo7); newFileInfo7.set_parentid(fileInfo8.parentid()); @@ -267,10 +272,11 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { newFileInfo7.filename()); std::string encodeNewFileInfo7; ASSERT_TRUE(newFileInfo7.SerializeToString(&encodeNewFileInfo7)); - Operation op9{ OpType::OpPut, - const_cast(encodeNewFileInfo7Key.c_str()), - const_cast(encodeNewFileInfo7.c_str()), - encodeNewFileInfo7Key.size(), encodeNewFileInfo7.size() }; + Operation op9{OpType::OpPut, + const_cast(encodeNewFileInfo7Key.c_str()), + const_cast(encodeNewFileInfo7.c_str()), + static_cast(encodeNewFileInfo7Key.size()), + static_cast(encodeNewFileInfo7.size())}; ops.clear(); ops.emplace_back(op8); ops.emplace_back(op9); @@ -300,9 +306,10 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdDeadlineExceeded, client_->TxnN(ops)); client_->SetTimeout(5000); - Operation op5{ OpType(5), const_cast(snapshotKey6.c_str()), - const_cast(snapshotInfo6.c_str()), - snapshotKey6.size(), snapshotInfo6.size() }; + Operation op5{OpType(5), const_cast(snapshotKey6.c_str()), + const_cast(snapshotInfo6.c_str()), + static_cast(snapshotKey6.size()), + static_cast(snapshotInfo6.size())}; ops.clear(); ops.emplace_back(op3); ops.emplace_back(op5); @@ -384,7 +391,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { int dialtTimeout = 10000; int retryTimes = 3; char endpoints[] = "127.0.0.1:2377"; - EtcdConf conf = { endpoints, strlen(endpoints), 20000 }; + EtcdConf conf = {endpoints, static_cast(strlen(endpoints)), 20000}; std::string leaderName1("leader1"); std::string leaderName2("leader2"); uint64_t leaderOid; diff --git a/test/mds/nameserver2/chunk_allocator_test.cpp b/test/mds/nameserver2/chunk_allocator_test.cpp index 780cb090d9..332f3e28f8 100644 --- a/test/mds/nameserver2/chunk_allocator_test.cpp +++ b/test/mds/nameserver2/chunk_allocator_test.cpp @@ -28,13 +28,13 @@ #include "src/mds/nameserver2/chunk_allocator.h" #include "src/mds/common/mds_define.h" -using ::testing::Return; +using ::curve::mds::topology::CopysetIdInfo; +using ::curve::mds::topology::PoolIdType; using ::testing::_; +using ::testing::AtLeast; using ::testing::DoAll; +using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::AtLeast; -using ::curve::mds::topology::CopysetIdInfo; -using ::curve::mds::topology::PoolIdType; namespace curve { namespace mds { @@ -42,7 +42,7 @@ namespace mds { const uint64_t DefaultChunkSize = 16 * kMB; const uint64_t DefaultSegmentSize = kGB * 1; -class ChunkAllocatorTest: public ::testing::Test { +class ChunkAllocatorTest : public ::testing::Test { protected: void SetUp() override { mockChunkIDGenerator_ = std::make_shared(); @@ -59,20 +59,25 @@ class ChunkAllocatorTest: public ::testing::Test { TEST_F(ChunkAllocatorTest, testcase1) { auto impl = std::make_shared( - mockTopologyChunkAllocator_, - mockChunkIDGenerator_); + mockTopologyChunkAllocator_, mockChunkIDGenerator_); // test segment pointer == nullptr ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, nullptr), false); + DefaultSegmentSize, DefaultChunkSize, + 0, nullptr), + false); // test offset not align with segmentsize PageFileSegment segment; ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 1, &segment), false); + DefaultSegmentSize, DefaultChunkSize, + 1, &segment), + false); // test chunkSize not align with segmentsize ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize - 1, 0, &segment), false); + DefaultSegmentSize, + DefaultChunkSize - 1, 0, &segment), + false); // test topologyAdmin_AllocateChunkRoundRobinInSingleLogicalPool // return false @@ -80,12 +85,14 @@ TEST_F(ChunkAllocatorTest, testcase1) { PageFileSegment segment; EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) .Times(1) .WillOnce(Return(false)); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + DefaultSegmentSize, + DefaultChunkSize, 0, &segment), + false); } // test topologyAdmin_ Allocate return size error @@ -94,13 +101,14 @@ TEST_F(ChunkAllocatorTest, testcase1) { std::vector copysetInfos; EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), Return(true))); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + DefaultSegmentSize, + DefaultChunkSize, 0, &segment), + false); } // test GenChunkID error @@ -108,23 +116,25 @@ TEST_F(ChunkAllocatorTest, testcase1) { PoolIdType logicalPoolID = 1; PageFileSegment segment; std::vector copysetInfos; - for (int i = 0; i != DefaultSegmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {logicalPoolID, i}; + for (int i = 0; i != DefaultSegmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {logicalPoolID, + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), Return(true))); EXPECT_CALL(*mockChunkIDGenerator_, GenChunkID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + DefaultSegmentSize, + DefaultChunkSize, 0, &segment), + false); } // test ok @@ -132,46 +142,49 @@ TEST_F(ChunkAllocatorTest, testcase1) { PoolIdType logicalPoolID = 1; PageFileSegment segment; std::vector copysetInfos; - for (int i = 0; i != DefaultSegmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {logicalPoolID, i}; + for (int i = 0; i != DefaultSegmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {logicalPoolID, + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), Return(true))); EXPECT_CALL(*mockChunkIDGenerator_, GenChunkID(_)) - .Times(1) - .WillOnce(Return(false)); + .Times(1) + .WillOnce(Return(false)); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - DefaultSegmentSize, DefaultChunkSize, 0, &segment), false); + DefaultSegmentSize, + DefaultChunkSize, 0, &segment), + false); } // test logicalid not same { PageFileSegment segment; - PoolIdType logicalPoolID = 1; std::vector copysetInfos; - uint64_t segmentSize = DefaultChunkSize*2; + uint64_t segmentSize = DefaultChunkSize * 2; - for (int i = 0; i != segmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {i, i}; + for (int i = 0; i != segmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {static_cast(i), + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), Return(true))); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - segmentSize, DefaultChunkSize, 0, &segment), false); + segmentSize, DefaultChunkSize, 0, + &segment), + false); } @@ -181,39 +194,41 @@ TEST_F(ChunkAllocatorTest, testcase1) { PoolIdType logicalPoolID = 1; std::vector copysetInfos; - uint64_t segmentSize = DefaultChunkSize*2; + uint64_t segmentSize = DefaultChunkSize * 2; - for (int i = 0; i != segmentSize/DefaultChunkSize; i++) { - CopysetIdInfo info = {logicalPoolID, i}; + for (int i = 0; i != segmentSize / DefaultChunkSize; i++) { + CopysetIdInfo info = {logicalPoolID, + static_cast(i)}; copysetInfos.push_back(info); } EXPECT_CALL(*mockTopologyChunkAllocator_, - AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) + AllocateChunkRoundRobinInSingleLogicalPool(_, _, _, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<3>(copysetInfos), Return(true))); EXPECT_CALL(*mockChunkIDGenerator_, GenChunkID(_)) - .Times(AtLeast(segmentSize/DefaultChunkSize)) - .WillRepeatedly(DoAll(SetArgPointee<0>(1), Return(true))); + .Times(AtLeast(segmentSize / DefaultChunkSize)) + .WillRepeatedly(DoAll(SetArgPointee<0>(1), Return(true))); ASSERT_EQ(impl->AllocateChunkSegment(FileType::INODE_PAGEFILE, - segmentSize, DefaultChunkSize, 0, &segment), true); + segmentSize, DefaultChunkSize, 0, + &segment), + true); PageFileSegment expectSegment; expectSegment.set_chunksize(DefaultChunkSize); expectSegment.set_segmentsize(segmentSize); expectSegment.set_startoffset(0); expectSegment.set_logicalpoolid(logicalPoolID); - for (uint32_t i = 0; i < segmentSize/DefaultChunkSize ; i++) { - PageFileChunkInfo* chunkinfo = expectSegment.add_chunks(); + for (uint32_t i = 0; i < segmentSize / DefaultChunkSize; i++) { + PageFileChunkInfo *chunkinfo = expectSegment.add_chunks(); chunkinfo->set_chunkid(1); chunkinfo->set_copysetid(i); LOG(INFO) << "chunkid = " << 1 << ", copysetid = " << i; } ASSERT_EQ(segment.SerializeAsString(), - expectSegment.SerializeAsString()); + expectSegment.SerializeAsString()); } } } // namespace mds diff --git a/test/mds/nameserver2/curvefs_test.cpp b/test/mds/nameserver2/curvefs_test.cpp index e08ddada5e..795f60a4c7 100644 --- a/test/mds/nameserver2/curvefs_test.cpp +++ b/test/mds/nameserver2/curvefs_test.cpp @@ -1512,7 +1512,6 @@ TEST_F(CurveFSTest, testRenameFile) { // new file exist, rename success { - uint64_t fileId = 10; FileInfo fileInfo1; FileInfo fileInfo2; FileInfo fileInfo3; diff --git a/test/mds/schedule/leaderScheduler_test.cpp b/test/mds/schedule/leaderScheduler_test.cpp index e109647809..3be00637b0 100644 --- a/test/mds/schedule/leaderScheduler_test.cpp +++ b/test/mds/schedule/leaderScheduler_test.cpp @@ -132,7 +132,6 @@ TEST_F(TestLeaderSchedule, test_copySet_has_candidate) { PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -173,7 +172,6 @@ TEST_F(TestLeaderSchedule, test_cannot_get_chunkServerInfo) { PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -218,7 +216,6 @@ TEST_F(TestLeaderSchedule, test_no_need_tranferLeaderOut) { PeerInfo peer2(2, 2, 2, "192.168.10.2", 9000); PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -265,7 +262,6 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_normal) { PeerInfo peer5(5, 5, 5, "192.168.10.5", 9000); PeerInfo peer6(6, 6, 6, "192.168.10.6", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -361,7 +357,6 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { PeerInfo peer5(5, 5, 5, "192.168.10.5", 9000); PeerInfo peer6(6, 6, 6, "192.168.10.6", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -452,7 +447,6 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); PeerInfo peer4(3, 4, 4, "192.168.10.4", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( @@ -535,7 +529,6 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { PeerInfo peer3(3, 3, 3, "192.168.10.3", 9000); PeerInfo peer4(3, 4, 4, "192.168.10.4", 9000); auto onlineState = ::curve::mds::topology::OnlineState::ONLINE; - auto offlineState = ::curve::mds::topology::OnlineState::OFFLINE; auto diskState = ::curve::mds::topology::DiskState::DISKNORMAL; auto statInfo = ::curve::mds::heartbeat::ChunkServerStatisticInfo(); ChunkServerInfo csInfo1( diff --git a/test/mds/schedule/recoverScheduler_test.cpp b/test/mds/schedule/recoverScheduler_test.cpp index 036b21526f..c7c11b299e 100644 --- a/test/mds/schedule/recoverScheduler_test.cpp +++ b/test/mds/schedule/recoverScheduler_test.cpp @@ -214,7 +214,6 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { ChunkServerIdType id1 = 1; ChunkServerIdType id2 = 2; ChunkServerIdType id3 = 3; - ChunkServerIdType id4 = 4; Operator op; EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInLogicalPool(_)) .WillRepeatedly(Return(90)); diff --git a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp index f6fdfa107c..b8b3ddb148 100644 --- a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp +++ b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp @@ -44,16 +44,16 @@ using ::curve::mds::topology::MockTopology; -using ::curve::mds::topology::Server; using ::curve::mds::topology::ChunkServer; -using ::curve::mds::topology::CopySetInfo; using ::curve::mds::topology::ChunkServerState; +using ::curve::mds::topology::CopySetInfo; +using ::curve::mds::topology::Server; using ::curve::mds::topology::TopologyOption; -using ::curve::mds::topology::ZoneIdType; -using ::curve::mds::topology::ServerIdType; using ::curve::mds::topology::ChunkServerIdType; using ::curve::mds::topology::LogicalPoolType; +using ::curve::mds::topology::ServerIdType; +using ::curve::mds::topology::ZoneIdType; using ::curve::mds::topology::ChunkServerStatus; using ::curve::mds::topology::OnlineState; @@ -62,14 +62,14 @@ using ::curve::mds::topology::ChunkServerFilter; using ::curve::mds::topology::CopySetFilter; using ::curve::mds::copyset::ChunkServerInfo; -using ::curve::mds::copyset::CopysetPolicy; +using ::curve::mds::copyset::ClusterInfo; +using ::curve::mds::copyset::Copyset; +using ::curve::mds::copyset::CopysetConstrait; +using ::curve::mds::copyset::CopysetManager; using ::curve::mds::copyset::CopysetPermutationPolicy; using ::curve::mds::copyset::CopysetPermutationPolicyNXX; +using ::curve::mds::copyset::CopysetPolicy; using ::curve::mds::copyset::CopysetZoneShufflePolicy; -using ::curve::mds::copyset::Copyset; -using ::curve::mds::copyset::ClusterInfo; -using ::curve::mds::copyset::CopysetManager; -using ::curve::mds::copyset::CopysetConstrait; namespace curve { namespace mds { namespace schedule { @@ -78,10 +78,10 @@ class FakeTopologyStat; class FakeTopo : public ::curve::mds::topology::TopologyImpl { public: - FakeTopo() : TopologyImpl( - std::make_shared(), - std::make_shared(), - std::make_shared()) {} + FakeTopo() + : TopologyImpl(std::make_shared(), + std::make_shared(), + std::make_shared()) {} void BuildMassiveTopo() { constexpr int serverNum = 9; @@ -91,18 +91,20 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { // gen server for (int i = 1; i <= serverNum; i++) { - std::string internalHostIP = "10.192.0." + std::to_string(i+1); - serverMap_[i]= Server(static_cast(i), "", - internalHostIP, 0, "", 0, i % zoneNum + 1, 1, ""); + std::string internalHostIP = "10.192.0." + std::to_string(i + 1); + serverMap_[i] = + Server(static_cast(i), "", internalHostIP, 0, "", + 0, i % zoneNum + 1, 1, ""); } // gen chunkserver for (int i = 1; i <= serverNum; i++) { for (int j = 1; j <= diskNumPerServer; j++) { - ChunkServerIdType id = j + diskNumPerServer * (i-1); - ChunkServer chunkserver(static_cast(id), - "", "sata", i, serverMap_[i].GetInternalHostIp(), 9000+j, - "", ChunkServerStatus::READWRITE); + ChunkServerIdType id = j + diskNumPerServer * (i - 1); + ChunkServer chunkserver( + static_cast(id), "", "sata", i, + serverMap_[i].GetInternalHostIp(), 9000 + j, "", + ChunkServerStatus::READWRITE); chunkserver.SetOnlineState(OnlineState::ONLINE); chunkServerMap_[id] = chunkserver; } @@ -133,15 +135,16 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { ::curve::mds::topology::CopySetInfo info(0, id++); info.SetCopySetMembers(it.replicas); info.SetLeader(*it.replicas.begin()); - copySetMap_[info.GetCopySetKey()] = info; + copySetMap_[info.GetCopySetKey()] = info; } logicalPoolSet_.insert(0); } - std::vector GetLogicalPoolInCluster( - LogicalPoolFilter filter = [](const LogicalPool&) { - return true;}) const override { + std::vector + GetLogicalPoolInCluster(LogicalPoolFilter filter = [](const LogicalPool &) { + return true; + }) const override { std::vector ret; for (auto lid : logicalPoolSet_) { ret.emplace_back(lid); @@ -149,35 +152,34 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { return ret; } - std::vector GetChunkServerInCluster( - ChunkServerFilter filter = [](const ChunkServer&) { - return true;}) const override { + std::vector + GetChunkServerInCluster(ChunkServerFilter filter = [](const ChunkServer &) { + return true; + }) const override { std::vector ret; - for (auto it = chunkServerMap_.begin(); - it != chunkServerMap_.end(); - it++) { + for (auto it = chunkServerMap_.begin(); it != chunkServerMap_.end(); + it++) { ret.emplace_back(it->first); } return ret; } std::list GetChunkServerInLogicalPool( - PoolIdType id, - ChunkServerFilter filter = [](const ChunkServer&) { - return true;}) const override { + PoolIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + return true; + }) const override { std::list ret; - for (auto it = chunkServerMap_.begin(); - it != chunkServerMap_.end(); - it++) { + for (auto it = chunkServerMap_.begin(); it != chunkServerMap_.end(); + it++) { ret.emplace_back(it->first); } return ret; } std::list GetChunkServerInServer( - ServerIdType id, - ChunkServerFilter filter = [](const ChunkServer&) { - return true;}) const override { + ServerIdType id, ChunkServerFilter filter = [](const ChunkServer &) { + return true; + }) const override { std::list res; for (auto it : chunkServerMap_) { if (it.second.GetServerId() == id) { @@ -188,8 +190,9 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::vector GetCopySetsInCluster( - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { - return true;}) const override { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + return true; + }) const override { std::vector ret; for (auto it : copySetMap_) { ret.emplace_back(it.first); @@ -199,8 +202,9 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { std::vector GetCopySetsInChunkServer( ChunkServerIdType csId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { - return true;}) const override { + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + return true; + }) const override { std::vector ret; for (auto it : copySetMap_) { if (it.second.GetCopySetMembers().count(csId) > 0) { @@ -211,9 +215,11 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } std::vector<::curve::mds::topology::CopySetInfo> - GetCopySetInfosInLogicalPool(PoolIdType logicalPoolId, - CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo&) { - return true;}) const override { + GetCopySetInfosInLogicalPool( + PoolIdType logicalPoolId, + CopySetFilter filter = [](const ::curve::mds::topology::CopySetInfo &) { + return true; + }) const override { std::vector<::curve::mds::topology::CopySetInfo> ret; for (auto it : copySetMap_) { if (it.first.first == logicalPoolId) { @@ -234,7 +240,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } bool GetCopySet(::curve::mds::topology::CopySetKey key, - ::curve::mds::topology::CopySetInfo *out) const override { + ::curve::mds::topology::CopySetInfo *out) const override { auto it = copySetMap_.find(key); if (it != copySetMap_.end()) { *out = it->second; @@ -244,8 +250,8 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } } - bool GetChunkServer( - ChunkServerIdType chunkserverId, ChunkServer *out) const override { + bool GetChunkServer(ChunkServerIdType chunkserverId, + ChunkServer *out) const override { auto it = chunkServerMap_.find(chunkserverId); if (it != chunkServerMap_.end()) { *out = it->second; @@ -260,15 +266,15 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { rap.pageFileRAP.replicaNum = 3; rap.pageFileRAP.zoneNum = 3; - LogicalPool pool(0, "logicalpool-0", 1, LogicalPoolType::PAGEFILE, - rap, LogicalPool::UserPolicy{}, 0, true, true); + LogicalPool pool(0, "logicalpool-0", 1, LogicalPoolType::PAGEFILE, rap, + LogicalPool::UserPolicy{}, 0, true, true); pool.SetScatterWidth(100); *out = pool; return true; } - int UpdateChunkServerOnlineState( - const OnlineState &onlineState, ChunkServerIdType id) override { + int UpdateChunkServerOnlineState(const OnlineState &onlineState, + ChunkServerIdType id) override { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { return -1; @@ -279,7 +285,7 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { } int UpdateChunkServerRwState(const ChunkServerStatus &rwStatus, - ChunkServerIdType id) { + ChunkServerIdType id) { auto it = chunkServerMap_.find(id); if (it == chunkServerMap_.end()) { return -1; @@ -312,13 +318,15 @@ class FakeTopo : public ::curve::mds::topology::TopologyImpl { class FakeTopologyServiceManager : public TopologyServiceManager { public: - FakeTopologyServiceManager() : - TopologyServiceManager(std::make_shared(), - std::static_pointer_cast( - std::make_shared( - std::make_shared())), nullptr, - std::make_shared( - ::curve::mds::copyset::CopysetOption{}), nullptr) {} + FakeTopologyServiceManager() + : TopologyServiceManager(std::make_shared(), + std::static_pointer_cast( + std::make_shared( + std::make_shared())), + nullptr, + std::make_shared( + ::curve::mds::copyset::CopysetOption{}), + nullptr) {} bool CreateCopysetNodeOnChunkServer( ChunkServerIdType csId, @@ -332,7 +340,7 @@ class FakeTopologyStat : public TopologyStat { explicit FakeTopologyStat(const std::shared_ptr &topo) : topo_(topo) {} void UpdateChunkServerStat(ChunkServerIdType csId, - const ChunkServerStat &stat) {} + const ChunkServerStat &stat) {} bool GetChunkServerStat(ChunkServerIdType csId, ChunkServerStat *stat) { if (!leaderCountOn) { @@ -352,8 +360,7 @@ class FakeTopologyStat : public TopologyStat { stat->leaderCount = leaderCount; return true; } - bool GetChunkPoolSize(PoolIdType pId, - uint64_t *chunkPoolSize) { + bool GetChunkPoolSize(PoolIdType pId, uint64_t *chunkPoolSize) { return true; } @@ -439,10 +446,9 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in online chunkserver###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << ", 最小值:(" << min << "," << minId << ")"; } @@ -480,10 +486,9 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in cluster###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << ", 最小值:(" << min << "," << minId << ")"; } @@ -523,17 +528,16 @@ class CopysetSchedulerPOC : public testing::Test { // 打印方差 float avg = static_cast(sumNumber) / - static_cast(numberMap.size()); + static_cast(numberMap.size()); float variance = 0; for (auto it : numberMap) { variance += std::pow(it.second - avg, 2); } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in online chunkserver###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << "), 最小值:(" << min << "," << minId << ")"; } @@ -559,18 +563,16 @@ class CopysetSchedulerPOC : public testing::Test { // 打印方差 float avg = static_cast(sumNumber) / - static_cast(numberMap.size()); + static_cast(numberMap.size()); float variance = 0; for (auto it : numberMap) { variance += std::pow(it.second - avg, 2); } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in cluster###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值: " << max - << ", 最小值:" << min; + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值: " << max << ", 最小值:" + << min; } void PrintLeaderCountInChunkServer(PoolIdType lid = 0) { @@ -598,22 +600,21 @@ class CopysetSchedulerPOC : public testing::Test { sumNumber += out.leaderCount; LOG(INFO) << "PRINT chunkserverid:" << it - << ", leader num:" << out.leaderCount; + << ", leader num:" << out.leaderCount; } } float avg = static_cast(sumNumber) / - static_cast(leaderDistribute.size()); + static_cast(leaderDistribute.size()); float variance = 0; for (auto it : leaderDistribute) { variance += std::pow(it.second - avg, 2); } variance /= leaderDistribute.size(); LOG(INFO) << "###print leader-num in cluster###\n" - << "均值:" << avg - << ", 方差:" << variance - << ", 标准差: " << std::sqrt(variance) - << ", 最大值:(" << max << "," << maxId << ")" + << "均值:" << avg << ", 方差:" << variance << ", 标准差: " + << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << ")" << "), 最小值:(" << min << "," << minId << ")"; } @@ -703,36 +704,33 @@ class CopysetSchedulerPOC : public testing::Test { } void BuildLeaderScheduler(int opConcurrent) { - topoAdapter_ = std::make_shared( + topoAdapter_ = std::make_shared( topo_, std::make_shared(), topoStat_); - opController_ = - std::make_shared( - opConcurrent, std::make_shared(topo_)); + opController_ = std::make_shared( + opConcurrent, std::make_shared(topo_)); - leaderScheduler_ = std::make_shared( - opt, topoAdapter_, opController_); + leaderScheduler_ = + std::make_shared(opt, topoAdapter_, opController_); } void BuildRapidLeaderScheduler(int opConcurrent) { - topoAdapter_ = std::make_shared( + topoAdapter_ = std::make_shared( topo_, std::make_shared(), topoStat_); - opController_ = - std::make_shared( - opConcurrent, std::make_shared(topo_)); + opController_ = std::make_shared( + opConcurrent, std::make_shared(topo_)); rapidLeaderScheduler_ = std::make_shared( opt, topoAdapter_, opController_, 0); } void BuilRecoverScheduler(int opConcurrent) { - topoAdapter_ = std::make_shared( + topoAdapter_ = std::make_shared( topo_, std::make_shared(), topoStat_); - opController_ = - std::make_shared( - opConcurrent, std::make_shared(topo_)); + opController_ = std::make_shared( + opConcurrent, std::make_shared(topo_)); recoverScheduler_ = std::make_shared( opt, topoAdapter_, opController_); @@ -743,8 +741,7 @@ class CopysetSchedulerPOC : public testing::Test { opt, topoAdapter_, opController_); } - void ApplyOperatorsInOpController( - const std::set &list) { + void ApplyOperatorsInOpController(const std::set &list) { std::vector keys; for (auto op : opController_->GetOperators()) { auto type = dynamic_cast(op.step.get()); @@ -860,9 +857,11 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_1) { // 均值:97.9556, 方差:11.5314, 标准差: 3.39579, 最大值:106, 最小值:88 // ###print copyset-num in cluster### // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ==========================恢复之后的状态================================= //NOLINT + // ==========================恢复之后的状态================================= + // //NOLINT // ###print scatter-with in online chunkserver### - // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, 最小值:95 //NOLINT + // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, + // 最小值:95 //NOLINT // ###print scatter-with in cluster### // 均值:98.2667, 方差:64.2289, 标准差: 8.0143, 最大值:106, 最小值:0 // ###print copyset-num in online chunkserver### @@ -923,7 +922,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline // 1. 创建recoverScheduler - BuilRecoverScheduler(1); + BuilRecoverScheduler(1); // 2. 任意选择两个chunkserver处于offline状态 std::set idlist; @@ -948,7 +947,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { } } - ApplyOperatorsInOpController(idlist); + ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); // 4. 打印最终的scatter-with @@ -977,7 +976,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { // 测试20个chunkserver 接连 offline // 1. 创建recoverScheduler - BuilRecoverScheduler(1); + BuilRecoverScheduler(1); // 2. 任意选择两个chunkserver处于offline状态 std::set idlist; @@ -1002,7 +1001,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { } } - ApplyOperatorsInOpController(idlist); + ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); // 4. 打印最终的scatter-with @@ -1037,7 +1036,8 @@ TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { ASSERT_EQ(targetOpNum, opNum); } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_1) { // NOLINT // 测试一个chunkserver offline, 集群回迁的情况 // 1. 一个chunkserver offline后恢复 @@ -1084,7 +1084,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { //NOLIN PrintCopySetNumInLogicalPool(); LOG(INFO) << "offline one:" << choose; ASSERT_TRUE(GetChunkServerScatterwith(choose) <= - minScatterwidth_ * (1 + scatterwidthPercent_)); + minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); // ============================结果==================================== @@ -1095,7 +1095,8 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { //NOLIN // 均值:100, 方差:0.5, 标准差: 0.707107, 最大值: 101, 最小值:91 } -TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) { //NOLINT +TEST_F(CopysetSchedulerPOC, + DISABLED_test_scatterwith_after_copysetRebalance_2) { // NOLINT // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline // 集群回迁的情况 @@ -1146,7 +1147,8 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) int removeOne = 0; do { removeOne = copySetScheduler_->Schedule(); - ApplyOperatorsInOpController(std::set{removeOne}); + ApplyOperatorsInOpController(std::set{ + static_cast(removeOne)}); } while (removeOne > 0); PrintScatterWithInLogicalPool(); PrintCopySetNumInLogicalPool(); @@ -1158,7 +1160,8 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_3) { // NOLINT // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline // 回迁的情况 @@ -1187,7 +1190,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLIN } } - ApplyOperatorsInOpController(idlist); + ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); @@ -1228,7 +1231,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLIN for (auto choose : idlist) { ASSERT_TRUE(GetChunkServerScatterwith(choose) <= - minScatterwidth_ * (1 + scatterwidthPercent_)); + minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); } @@ -1240,8 +1243,10 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { //NOLIN // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_4) { //NOLINT - // set one chunkserver status from online to pendding, and the copyset on it will schedule out //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_4) { // NOLINT + // set one chunkserver status from online to pendding, and the copyset on it + // will schedule out //NOLINT // set one chunkserver status to pendding auto chunkserverlist = topo_->GetChunkServerInServer(1); @@ -1257,8 +1262,7 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_4) { //NOLIN removeOne = copySetScheduler_->Schedule(); opNum += removeOne; if (removeOne > 0) { - ApplyOperatorsInOpController( - std::set{target}); + ApplyOperatorsInOpController(std::set{target}); } } while (removeOne > 0); ASSERT_EQ(opNum, targetOpNum); @@ -1267,8 +1271,10 @@ TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_4) { //NOLIN PrintCopySetNumInOnlineChunkServer(); } -TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_5) { //NOLINT - // set two chunkserver status from online to pendding, and the copyset on it will schedule out //NOLINT +TEST_F(CopysetSchedulerPOC, + test_scatterwith_after_copysetRebalance_5) { // NOLINT + // set two chunkserver status from online to pendding, and the copyset on it + // will schedule out //NOLINT // set two chunkserver status to pendding auto chunkserverlist = topo_->GetChunkServerInServer(1); diff --git a/test/mds/server/mds_test.cpp b/test/mds/server/mds_test.cpp index 93c2207cfa..dbe2df60f7 100644 --- a/test/mds/server/mds_test.cpp +++ b/test/mds/server/mds_test.cpp @@ -64,7 +64,7 @@ class MDSTest : public ::testing::Test { } // 一定时间内尝试init直到etcd完全起来 auto client = std::make_shared(); - EtcdConf conf = { kEtcdAddr, strlen(kEtcdAddr), 1000 }; + EtcdConf conf = {kEtcdAddr, static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool initSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 5) { @@ -176,7 +176,7 @@ TEST_F(MDSTest, common) { request1.set_token("123"); request1.set_ip("127.0.0.1"); request1.set_port(8888); - heartbeat::DiskState* diskState = new heartbeat::DiskState(); + heartbeat::DiskState *diskState = new heartbeat::DiskState(); diskState->set_errtype(0); diskState->set_errmsg(""); request1.set_allocated_diskstate(diskState); diff --git a/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp b/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp index d2f62b6d9e..40d785c371 100644 --- a/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp +++ b/test/mds/snapshotcloneclient/test_snapshotclone_client.cpp @@ -90,7 +90,6 @@ TEST_F(TestSnapshotCloneClient, TestInitSuccess) { } TEST_F(TestSnapshotCloneClient, TestInitFalse) { - uint32_t port = listenAddr_.port; option.snapshotCloneAddr = ""; client_->Init(option); ASSERT_FALSE(client_->GetInitStatus()); @@ -107,7 +106,6 @@ TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseNotInit) { } TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseConnectFail) { - uint32_t port = listenAddr_.port; option.snapshotCloneAddr = "aa"; client_->Init(option); ASSERT_TRUE(client_->GetInitStatus()); @@ -122,7 +120,6 @@ TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseConnectFail) { } TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseCallFail) { - uint32_t port = listenAddr_.port; option.snapshotCloneAddr = "127.0.0.1:" + std::to_string(0); client_->Init(option); ASSERT_TRUE(client_->GetInitStatus()); @@ -216,7 +213,6 @@ TEST_F(TestSnapshotCloneClient, TestGetCloneRefStatusFalseInvalidStatus) { butil::IOBufBuilder os; Json::Value mainObj; mainObj[kCodeStr] = std::to_string(kErrCodeSuccess); - CloneRefStatus refStatus = CloneRefStatus::kNoRef; mainObj[kRefStatusStr] = 4; os << mainObj.toStyledString(); os.move_to(bcntl->response_attachment()); diff --git a/test/mds/topology/test_topology_service_manager.cpp b/test/mds/topology/test_topology_service_manager.cpp index 3f7e2cfa92..ebecb69a44 100644 --- a/test/mds/topology/test_topology_service_manager.cpp +++ b/test/mds/topology/test_topology_service_manager.cpp @@ -840,7 +840,6 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_PhysicalPoolNotFound) { TEST_F(TestTopologyServiceManager, test_RegistServer_ByNamePhysicalPoolNotFound) { - ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "PhysicalPool1"); @@ -885,7 +884,6 @@ TEST_F(TestTopologyServiceManager, test_RegistServer_ZoneNotFound) { } TEST_F(TestTopologyServiceManager, test_RegistServer_ByNameZoneNotFound) { - ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "PhysicalPool1"); @@ -946,7 +944,6 @@ TEST_F(TestTopologyServiceManager, TEST_F(TestTopologyServiceManager, test_RegistServer_InvalidParamMissingPhysicalPoolIdAndName) { - ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "PhysicalPool1"); @@ -968,7 +965,6 @@ TEST_F(TestTopologyServiceManager, } TEST_F(TestTopologyServiceManager, test_RegistServer_AllocateIdFail) { - ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); @@ -1476,7 +1472,6 @@ TEST_F(TestTopologyServiceManager, test_CreateZone_success) { TEST_F(TestTopologyServiceManager, test_CreateZone_AllocateIdFail) { PoolIdType physicalPoolId = 0x11; - ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId, "poolname1"); ZoneRequest request; diff --git a/test/mds/topology/test_topology_stat.cpp b/test/mds/topology/test_topology_stat.cpp index 9b930f5fcc..29af525410 100644 --- a/test/mds/topology/test_topology_stat.cpp +++ b/test/mds/topology/test_topology_stat.cpp @@ -114,7 +114,6 @@ TEST_F(TestTopologyStat, TestUpdateAndGetChunkServerStat) { cstat3.writeIOPS = 3; stat3.copysetStats.push_back(cstat3); - PoolIdType pPid = 2; EXPECT_CALL(*topology_, GetBelongPhysicalPoolId(_, _)) .WillRepeatedly(DoAll(SetArgPointee<1>(2), Return(kTopoErrCodeSuccess))); diff --git a/test/snapshotcloneserver/test_clone_core.cpp b/test/snapshotcloneserver/test_clone_core.cpp index 25265ecb6a..14c238fba6 100644 --- a/test/snapshotcloneserver/test_clone_core.cpp +++ b/test/snapshotcloneserver/test_clone_core.cpp @@ -1223,7 +1223,6 @@ void TestCloneCoreImpl::MockCloneMetaSuccess( std::shared_ptr task) { uint32_t chunksize = 1024 * 1024; uint64_t segmentsize = 2 * chunksize; - uint64_t filelength = 1 * segmentsize; SegmentInfo segInfoOut; segInfoOut.segmentsize = segmentsize; segInfoOut.chunksize = chunksize; diff --git a/test/snapshotcloneserver/test_snapshot_core.cpp b/test/snapshotcloneserver/test_snapshot_core.cpp index 014f776d5d..a30db66c7f 100644 --- a/test/snapshotcloneserver/test_snapshot_core.cpp +++ b/test/snapshotcloneserver/test_snapshot_core.cpp @@ -989,7 +989,7 @@ TEST_F(TestSnapshotCoreImpl, std::vector snapInfos; SnapshotInfo info2(uuid2, user, fileName, desc2); info.SetSeqNum(seqNum); - info2.SetSeqNum(seqNum - 1); //上一个快照 + info2.SetSeqNum(seqNum - 1); // 上一个快照 info2.SetStatus(Status::done); snapInfos.push_back(info); snapInfos.push_back(info2); diff --git a/test/tools/mds_client_test.cpp b/test/tools/mds_client_test.cpp index 4d8b465f68..35c594561c 100644 --- a/test/tools/mds_client_test.cpp +++ b/test/tools/mds_client_test.cpp @@ -28,36 +28,36 @@ #include "test/tools/mock/mock_topology_service.h" #include "test/tools/mock/mock_schedule_service.h" -using curve::mds::topology::DiskState; -using curve::mds::topology::OnlineState; +using curve::mds::schedule::QueryChunkServerRecoverStatusRequest; +using curve::mds::schedule::QueryChunkServerRecoverStatusResponse; +using curve::mds::schedule::RapidLeaderScheduleResponse; using curve::mds::topology::AllocateStatus; -using curve::mds::topology::LogicalPoolType; -using curve::mds::topology::ListPhysicalPoolRequest; -using curve::mds::topology::ListPhysicalPoolResponse; -using curve::mds::topology::ListLogicalPoolRequest; -using curve::mds::topology::ListLogicalPoolResponse; +using curve::mds::topology::DiskState; using curve::mds::topology::GetChunkServerListInCopySetsRequest; using curve::mds::topology::GetChunkServerListInCopySetsResponse; +using curve::mds::topology::GetCopysetRequest; +using curve::mds::topology::GetCopysetResponse; using curve::mds::topology::GetCopySetsInChunkServerRequest; using curve::mds::topology::GetCopySetsInChunkServerResponse; using curve::mds::topology::GetCopySetsInClusterRequest; using curve::mds::topology::GetCopySetsInClusterResponse; -using curve::mds::topology::GetCopysetRequest; -using curve::mds::topology::GetCopysetResponse; -using curve::mds::topology::SetCopysetsAvailFlagRequest; -using curve::mds::topology::SetCopysetsAvailFlagResponse; +using curve::mds::topology::ListLogicalPoolRequest; +using curve::mds::topology::ListLogicalPoolResponse; +using curve::mds::topology::ListPhysicalPoolRequest; +using curve::mds::topology::ListPhysicalPoolResponse; using curve::mds::topology::ListUnAvailCopySetsRequest; using curve::mds::topology::ListUnAvailCopySetsResponse; +using curve::mds::topology::LogicalPoolType; +using curve::mds::topology::OnlineState; +using curve::mds::topology::SetCopysetsAvailFlagRequest; +using curve::mds::topology::SetCopysetsAvailFlagResponse; using curve::mds::topology::SetLogicalPoolScanStateRequest; using curve::mds::topology::SetLogicalPoolScanStateResponse; -using curve::mds::schedule::RapidLeaderScheduleResponse; -using curve::mds::schedule::QueryChunkServerRecoverStatusRequest; -using curve::mds::schedule::QueryChunkServerRecoverStatusResponse; using ::testing::_; -using ::testing::Return; -using ::testing::Invoke; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; using ::testing::SetArgPointee; DECLARE_string(mdsDummyPort); @@ -67,11 +67,9 @@ namespace tool { const char mdsAddr[] = "127.0.0.1:9191,127.0.0.1:9192"; -template -void callback(RpcController* controller, - const Req* request, - Resp* response, - Closure* done) { +template +void callback(RpcController *controller, const Req *request, Resp *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); } @@ -84,11 +82,11 @@ class ToolMDSClientTest : public ::testing::Test { topoService = new curve::mds::topology::MockTopologyService(); scheduleService = new curve::mds::schedule::MockScheduleService(); ASSERT_EQ(0, server->AddService(nameService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->AddService(topoService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->AddService(scheduleService, - brpc::SERVER_DOESNT_OWN_SERVICE)); + brpc::SERVER_DOESNT_OWN_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); brpc::StartDummyServerAt(9193); @@ -96,13 +94,13 @@ class ToolMDSClientTest : public ::testing::Test { curve::mds::topology::ListPhysicalPoolResponse response; response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), + .WillOnce(DoAll( + SetArgPointee<2>(response), Invoke([](RpcController *controller, const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, mdsClient.Init(mdsAddr, "9194,9193")); } void TearDown() { @@ -118,7 +116,7 @@ class ToolMDSClientTest : public ::testing::Test { scheduleService = nullptr; } - void GetFileInfoForTest(uint64_t id, FileInfo* fileInfo) { + void GetFileInfoForTest(uint64_t id, FileInfo *fileInfo) { fileInfo->set_id(id); fileInfo->set_filename("test"); fileInfo->set_parentid(0); @@ -129,12 +127,11 @@ class ToolMDSClientTest : public ::testing::Test { fileInfo->set_ctime(1573546993000000); } - void GetCopysetInfoForTest(CopySetServerInfo* info, - int num, uint32_t copysetId = 1) { + void GetCopysetInfoForTest(CopySetServerInfo *info, int num, + uint32_t copysetId = 1) { info->Clear(); for (int i = 0; i < num; ++i) { - curve::common::ChunkServerLocation *csLoc = - info->add_cslocs(); + curve::common::ChunkServerLocation *csLoc = info->add_cslocs(); csLoc->set_chunkserverid(i); csLoc->set_hostip("127.0.0.1"); csLoc->set_port(9191 + i); @@ -142,21 +139,21 @@ class ToolMDSClientTest : public ::testing::Test { info->set_copysetid(copysetId); } - void GetSegmentForTest(PageFileSegment* segment) { + void GetSegmentForTest(PageFileSegment *segment) { segment->set_logicalpoolid(1); segment->set_segmentsize(DefaultSegmentSize); segment->set_chunksize(kChunkSize); segment->set_startoffset(0); } - void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo* pool) { + void GetPhysicalPoolInfoForTest(PoolIdType id, PhysicalPoolInfo *pool) { pool->set_physicalpoolid(id); pool->set_physicalpoolname("testPool"); pool->set_desc("physical pool for test"); } void GetLogicalPoolForTest(PoolIdType id, - curve::mds::topology::LogicalPoolInfo *lpInfo) { + curve::mds::topology::LogicalPoolInfo *lpInfo) { lpInfo->set_logicalpoolid(id); lpInfo->set_logicalpoolname("defaultLogicalPool"); lpInfo->set_physicalpoolid(1); @@ -207,10 +204,10 @@ class ToolMDSClientTest : public ::testing::Test { csInfo->set_diskcapacity(1024); csInfo->set_diskused(512); } - brpc::Server* server; - curve::mds::MockNameService* nameService; - curve::mds::topology::MockTopologyService* topoService; - curve::mds::schedule::MockScheduleService* scheduleService; + brpc::Server *server; + curve::mds::MockNameService *nameService; + curve::mds::topology::MockTopologyService *topoService; + curve::mds::schedule::MockScheduleService *scheduleService; MDSClient mdsClient; const uint64_t kChunkSize = 16777216; const uint64_t DefaultSegmentSize = 1024 * 1024 * 1024; @@ -237,42 +234,40 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + const curve::mds::GetFileInfoRequest *request, + curve::mds::GetFileInfoResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); // 返回码不为OK curve::mds::GetFileInfoResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetFileInfoRequest *request, + curve::mds::GetFileInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); // 正常情况 - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo *info = new curve::mds::FileInfo; GetFileInfoForTest(1, info); response.set_allocated_fileinfo(info); response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetFileInfoRequest *request, - curve::mds::GetFileInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetFileInfoRequest *request, + curve::mds::GetFileInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetFileInfo(filename, &outFileInfo)); ASSERT_EQ(info->DebugString(), outFileInfo.DebugString()); } @@ -283,48 +278,48 @@ TEST_F(ToolMDSClientTest, GetAllocatedSize) { // 发送RPC失败 EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::GetAllocatedSizeRequest *request, + curve::mds::GetAllocatedSizeResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); // 返回码不为OK curve::mds::GetAllocatedSizeResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetAllocatedSizeRequest *request, + curve::mds::GetAllocatedSizeResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); // 正常情况 response.set_allocatedsize(DefaultSegmentSize * 3); for (int i = 1; i <= 3; ++i) { - response.mutable_allocsizemap()->insert({i, DefaultSegmentSize}); + response.mutable_allocsizemap()->insert( + {static_cast<::google::protobuf::uint32>(i), DefaultSegmentSize}); } response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetAllocatedSizeRequest *request, - curve::mds::GetAllocatedSizeResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetAllocatedSizeRequest *request, + curve::mds::GetAllocatedSizeResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); AllocMap allocMap; ASSERT_EQ(0, mdsClient.GetAllocatedSize(filename, &allocSize, &allocMap)); ASSERT_EQ(DefaultSegmentSize * 3, allocSize); - AllocMap expected = {{1, DefaultSegmentSize}, {2, DefaultSegmentSize}, + AllocMap expected = {{1, DefaultSegmentSize}, + {2, DefaultSegmentSize}, {3, DefaultSegmentSize}}; ASSERT_EQ(expected, allocMap); } @@ -336,28 +331,27 @@ TEST_F(ToolMDSClientTest, ListDir) { // 发送RPC失败 EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ListDirRequest *request, + curve::mds::ListDirResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); // 返回码不为OK curve::mds::ListDirResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListDir(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListDirRequest *request, + curve::mds::ListDirResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); // 正常情况 response.set_statuscode(curve::mds::StatusCode::kOK); @@ -366,13 +360,12 @@ TEST_F(ToolMDSClientTest, ListDir) { GetFileInfoForTest(i, fileInfo); } EXPECT_CALL(*nameService, ListDir(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListDirRequest *request, - curve::mds::ListDirResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListDirRequest *request, + curve::mds::ListDirResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListDir(fileName, &fileInfoVec)); for (int i = 0; i < 5; i++) { FileInfo expected; @@ -389,72 +382,69 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { // 发送RPC失败 EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(GetSegmentRes::kOtherError, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // segment不存在 curve::mds::GetOrAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kSegmentNotAllocated, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // 文件不存在 response.set_statuscode(curve::mds::StatusCode::kFileNotExists); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kFileNotExists, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // 其他错误 response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOtherError, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); // 正常情况 - PageFileSegment* segment = new PageFileSegment(); + PageFileSegment *segment = new PageFileSegment(); GetSegmentForTest(segment); response.set_statuscode(curve::mds::StatusCode::kOK); response.set_allocated_pagefilesegment(segment); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::GetOrAllocateSegmentRequest *request, - curve::mds::GetOrAllocateSegmentResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::GetOrAllocateSegmentRequest *request, + curve::mds::GetOrAllocateSegmentResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kOK, - mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); + mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); ASSERT_EQ(segment->DebugString(), outSegment.DebugString()); } @@ -464,89 +454,85 @@ TEST_F(ToolMDSClientTest, DeleteFile) { // 发送RPC失败 EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::DeleteFileRequest *request, + curve::mds::DeleteFileResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); // 返回码不为OK curve::mds::DeleteFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::DeleteFileRequest *request, + curve::mds::DeleteFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); // 正常情况 response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::DeleteFileRequest *request, - curve::mds::DeleteFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::DeleteFileRequest *request, + curve::mds::DeleteFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.DeleteFile(fileName)); } TEST_F(ToolMDSClientTest, CreateFile) { std::string fileName = "/test"; uint64_t length = 10 * DefaultSegmentSize; - uint64_t stripeUnit = 32 * 1024 *1024; + uint64_t stripeUnit = 32 * 1024 * 1024; uint64_t stripeCount = 32; // 发送RPC失败 EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); - ASSERT_EQ(-1, mdsClient.CreateFile(fileName, length, - stripeUnit, stripeCount)); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::CreateFileRequest *request, + curve::mds::CreateFileResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); + ASSERT_EQ(-1, + mdsClient.CreateFile(fileName, length, stripeUnit, stripeCount)); // 返回码不为OK curve::mds::CreateFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(-1, mdsClient.CreateFile(fileName, length, - stripeUnit, stripeCount)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::CreateFileRequest *request, + curve::mds::CreateFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(-1, + mdsClient.CreateFile(fileName, length, stripeUnit, stripeCount)); // 正常情况 response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::CreateFileRequest *request, - curve::mds::CreateFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(0, mdsClient.CreateFile(fileName, length, - stripeUnit, stripeCount)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::CreateFileRequest *request, + curve::mds::CreateFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(0, + mdsClient.CreateFile(fileName, length, stripeUnit, stripeCount)); } TEST_F(ToolMDSClientTest, ExtendVolume_success) { @@ -555,13 +541,12 @@ TEST_F(ToolMDSClientTest, ExtendVolume_success) { curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ExtendFileRequest *request, + curve::mds::ExtendFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ExtendVolume(fileName, length)); } @@ -572,15 +557,15 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { // 发送RPC失败 EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ExtendFileRequest *request, + curve::mds::ExtendFileResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); return; @@ -589,13 +574,12 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ExtendFileRequest *request, - curve::mds::ExtendFileResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ExtendFileRequest *request, + curve::mds::ExtendFileResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ExtendVolume(fileName, length)); } @@ -607,31 +591,30 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { // 发送rpc失败 EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); - ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet( - logicalPoolId, copysetId, &csLocs)); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); + ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, + copysetId, &csLocs)); // 返回码不为OK GetChunkServerListInCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet( - logicalPoolId, copysetId, &csLocs)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, + copysetId, &csLocs)); // 正常情况 response.set_statuscode(kTopoErrCodeSuccess); @@ -640,15 +623,14 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { auto infoPtr = response.add_csinfo(); infoPtr->CopyFrom(csInfo); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySet( - logicalPoolId, copysetId, &csLocs)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, + &csLocs)); ASSERT_EQ(csInfo.cslocs_size(), csLocs.size()); for (uint32_t i = 0; i < csLocs.size(); ++i) { ASSERT_EQ(csInfo.cslocs(i).DebugString(), csLocs[i].DebugString()); @@ -668,15 +650,14 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { std::vector copysets = {100, 101, 102}; std::vector csServerInfos; EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetChunkServerListInCopySetsRequest *request, - GetChunkServerListInCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); - ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySets( - logicalPoolId, copysets, &csServerInfos)); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetChunkServerListInCopySetsRequest *request, + GetChunkServerListInCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); + ASSERT_EQ(0, mdsClient.GetChunkServerListInCopySets(logicalPoolId, copysets, + &csServerInfos)); ASSERT_EQ(expected.size(), csServerInfos.size()); for (uint32_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), csServerInfos[i].DebugString()); @@ -689,28 +670,28 @@ TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { // 发送rpc失败 EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const ListPhysicalPoolRequest *request, + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); // 返回码不为OK ListPhysicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListPhysicalPoolRequest *request, + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); // 正常情况 @@ -720,13 +701,13 @@ TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { GetPhysicalPoolInfoForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListPhysicalPoolRequest *request, - ListPhysicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListPhysicalPoolRequest *request, + ListPhysicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, mdsClient.ListPhysicalPoolsInCluster(&pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -743,28 +724,27 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { // 发送rpc失败 EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, const ListLogicalPoolRequest *request, + ListLogicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); // 返回码不为OK ListLogicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListLogicalPoolRequest *request, + ListLogicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); // 正常情况 @@ -774,13 +754,13 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { GetLogicalPoolForTest(i, poolInfo); } EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListLogicalPoolRequest *request, - ListLogicalPoolResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce( + DoAll(SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListLogicalPoolRequest *request, + ListLogicalPoolResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + }))); ASSERT_EQ(0, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); ASSERT_EQ(3, pools.size()); for (int i = 0; i < 3; ++i) { @@ -796,28 +776,28 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { // 发送rpc失败 EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::topology::ListPoolZoneRequest *request, + curve::mds::topology::ListPoolZoneResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); // 返回码不为OK curve::mds::topology::ListPoolZoneResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::topology::ListPoolZoneRequest *request, + curve::mds::topology::ListPoolZoneResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); // 正常情况 response.set_statuscode(kTopoErrCodeSuccess); @@ -826,13 +806,12 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { GetZoneInfoForTest(i, zoneInfo); } EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListPoolZoneRequest *request, - curve::mds::topology::ListPoolZoneResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::topology::ListPoolZoneRequest *request, + curve::mds::topology::ListPoolZoneResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); ASSERT_EQ(3, zones.size()); for (int i = 0; i < 3; ++i) { @@ -849,28 +828,29 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { // 发送rpc失败 EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::topology::ListZoneServerRequest *request, + curve::mds::topology::ListZoneServerResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); // 返回码不为OK curve::mds::topology::ListZoneServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListZoneServerRequest *request, + curve::mds::topology::ListZoneServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); // 正常情况 @@ -880,13 +860,13 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { GetServerInfoForTest(i, serverInfo); } EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListZoneServerRequest *request, - curve::mds::topology::ListZoneServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListZoneServerRequest *request, + curve::mds::topology::ListZoneServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListServersInZone(zoneId, &servers)); ASSERT_EQ(3, servers.size()); for (int i = 0; i < 3; ++i) { @@ -903,28 +883,29 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { // 发送rpc失败 EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::topology::ListChunkServerRequest *request, + curve::mds::topology::ListChunkServerResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); // 返回码不为OK curve::mds::topology::ListChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListChunkServerRequest *request, + curve::mds::topology::ListChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); // 正常情况,两个chunkserver正常,一个chunkserver retired @@ -934,13 +915,13 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { GetChunkServerInfoForTest(i, csInfo, i == 2); } EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::ListChunkServerRequest *request, - curve::mds::topology::ListChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::ListChunkServerRequest *request, + curve::mds::topology::ListChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); ASSERT_EQ(2, chunkservers.size()); for (int i = 0; i < 2; ++i) { @@ -958,15 +939,16 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { // 发送rpc失败 EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(12) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done){ + .WillRepeatedly(Invoke( + [](RpcController *controller, + const curve::mds::topology::GetChunkServerInfoRequest *request, + curve::mds::topology::GetChunkServerInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); @@ -975,30 +957,32 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::GetChunkServerInfoRequest + *request, + curve::mds::topology::GetChunkServerInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); // 正常情况 response.set_statuscode(kTopoErrCodeSuccess); - ChunkServerInfo* csInfo = new ChunkServerInfo(); + ChunkServerInfo *csInfo = new ChunkServerInfo(); GetChunkServerInfoForTest(1, csInfo); response.set_allocated_chunkserverinfo(csInfo); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::topology::GetChunkServerInfoRequest *request, - curve::mds::topology::GetChunkServerInfoResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke( + [](RpcController *controller, + const curve::mds::topology::GetChunkServerInfoRequest + *request, + curve::mds::topology::GetChunkServerInfoResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(0, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); ChunkServerInfo expected; @@ -1022,15 +1006,15 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { // 发送rpc失败 EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(12) - .WillRepeatedly(Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const GetCopySetsInChunkServerRequest *request, + GetCopySetsInChunkServerResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); @@ -1039,13 +1023,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInChunkServerRequest *request, + GetCopySetsInChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); @@ -1058,13 +1041,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { } EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(2) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInChunkServerRequest *request, - GetCopySetsInChunkServerResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInChunkServerRequest *request, + GetCopySetsInChunkServerResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1089,15 +1071,15 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { // 发送rpc失败 EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const GetCopySetsInClusterRequest *request, + GetCopySetsInClusterResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); // 返回码不为OK @@ -1105,13 +1087,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInClusterRequest *request, + GetCopySetsInClusterResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); // 正常情况 @@ -1123,13 +1104,12 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { } EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(1) - .WillRepeatedly(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const GetCopySetsInClusterRequest *request, - GetCopySetsInClusterResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillRepeatedly(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const GetCopySetsInClusterRequest *request, + GetCopySetsInClusterResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.GetCopySetsInCluster(©sets)); ASSERT_EQ(5, copysets.size()); copysets.clear(); @@ -1141,13 +1121,11 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { TEST_F(ToolMDSClientTest, GetCopyset) { auto succCallback = callback; - auto failCallback = [](RpcController* controller, - const GetCopysetRequest* request, - GetCopysetResponse* response, - Closure* done) { + auto failCallback = [](RpcController *controller, + const GetCopysetRequest *request, + GetCopysetResponse *response, Closure *done) { brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); + brpc::Controller *cntl = dynamic_cast(controller); cntl->SetFailed("fail"); }; @@ -1168,8 +1146,8 @@ TEST_F(ToolMDSClientTest, GetCopyset) { CopysetInfo copysetInfo; EXPECT_CALL(*topoService, GetCopyset(_, _, _, _)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(succResp), - Invoke(failCallback))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(succResp), Invoke(failCallback))); ASSERT_EQ(mdsClient.GetCopyset(1, 1, ©setInfo), -1); } @@ -1177,8 +1155,7 @@ TEST_F(ToolMDSClientTest, GetCopyset) { { CopysetInfo copysetInfo; EXPECT_CALL(*topoService, GetCopyset(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(failResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(failResp), Invoke(succCallback))); ASSERT_EQ(mdsClient.GetCopyset(1, 1, ©setInfo), -1); } @@ -1186,8 +1163,7 @@ TEST_F(ToolMDSClientTest, GetCopyset) { { CopysetInfo copysetInfo; EXPECT_CALL(*topoService, GetCopyset(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(succResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(succResp), Invoke(succCallback))); ASSERT_EQ(mdsClient.GetCopyset(1, 1, ©setInfo), 0); ASSERT_EQ(copysetInfo.logicalpoolid(), 1); ASSERT_EQ(copysetInfo.copysetid(), 1); @@ -1201,15 +1177,15 @@ TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { // 发送rpc失败 EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const RapidLeaderScheduleRequst *request, + RapidLeaderScheduleResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); // 返回码不为OK @@ -1217,25 +1193,23 @@ TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { response.set_statuscode( curve::mds::schedule::kScheduleErrCodeInvalidLogicalPool); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const RapidLeaderScheduleRequst *request, + RapidLeaderScheduleResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); // 成功 response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const RapidLeaderScheduleRequst *request, - RapidLeaderScheduleResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const RapidLeaderScheduleRequst *request, + RapidLeaderScheduleResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.RapidLeaderSchedule(1)); } @@ -1251,9 +1225,9 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { // CASE 1: Send rpc failed { auto failCallback = [](RpcController *controller, - const SetLogicalPoolScanStateRequest* request, - SetLogicalPoolScanStateResponse* response, - Closure* done) { + const SetLogicalPoolScanStateRequest *request, + SetLogicalPoolScanStateResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); brpc::Controller *cntl = dynamic_cast(controller); @@ -1261,24 +1235,22 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { }; EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) .Times(6) - .WillRepeatedly(DoAll(SetArgPointee<2>(succResp), - Invoke(failCallback))); + .WillRepeatedly( + DoAll(SetArgPointee<2>(succResp), Invoke(failCallback))); ASSERT_EQ(-1, mdsClient.SetLogicalPoolScanState(1, true)); } // CASE 2: Logical pool not found { EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(failResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(failResp), Invoke(succCallback))); ASSERT_EQ(-1, mdsClient.SetLogicalPoolScanState(1, true)); } // CASE 3: Set success { EXPECT_CALL(*topoService, SetLogicalPoolScanState(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(succResp), - Invoke(succCallback))); + .WillOnce(DoAll(SetArgPointee<2>(succResp), Invoke(succCallback))); ASSERT_EQ(0, mdsClient.SetLogicalPoolScanState(1, true)); } } @@ -1288,44 +1260,42 @@ TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { // 发送rpc失败 EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly(Invoke( + [](RpcController *controller, + const QueryChunkServerRecoverStatusRequest *request, + QueryChunkServerRecoverStatusResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + std::vector{}, &statusMap)); // 1. QueryChunkServerRecoverStatus失败的情况 QueryChunkServerRecoverStatusResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrInvalidQueryChunkserverID); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const QueryChunkServerRecoverStatusRequest *request, + QueryChunkServerRecoverStatusResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + std::vector{}, &statusMap)); // 2. QueryChunkServerRecoverStatus成功的情况 response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const QueryChunkServerRecoverStatusRequest *request, - QueryChunkServerRecoverStatusResponse *response, - Closure *done) { - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const QueryChunkServerRecoverStatusRequest *request, + QueryChunkServerRecoverStatusResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.QueryChunkServerRecoverStatus( - std::vector{}, &statusMap)); + std::vector{}, &statusMap)); } TEST_F(ToolMDSClientTest, GetMetric) { @@ -1365,14 +1335,14 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { std::map onlineStatus; // 9180在线,9999不在线 value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9192\"}"); + "\"conf_value\":\"127.0.0.1:9192\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); std::map expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", true}}; ASSERT_EQ(expected, onlineStatus); // 9180的服务端口不一致 value.set_value("{\"conf_name\":\"mds.listen.addr\"," - "\"conf_value\":\"127.0.0.1:9188\"}"); + "\"conf_value\":\"127.0.0.1:9188\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; ASSERT_EQ(expected, onlineStatus); @@ -1389,28 +1359,27 @@ TEST_F(ToolMDSClientTest, ListClient) { // 发送rpc失败 EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ListClientRequest *request, + curve::mds::ListClientResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); // 返回码不为OK curve::mds::ListClientResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListClient(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListClientRequest *request, + curve::mds::ListClientResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); // 正常情况 @@ -1421,19 +1390,18 @@ TEST_F(ToolMDSClientTest, ListClient) { clientInfo->set_port(8888 + i); } EXPECT_CALL(*nameService, ListClient(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListClientRequest *request, - curve::mds::ListClientResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListClientRequest *request, + curve::mds::ListClientResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListClient(&clientAddrs)); ASSERT_EQ(response.clientinfos_size(), clientAddrs.size()); for (int i = 0; i < 5; i++) { - const auto& clientInfo = response.clientinfos(i); - std::string expected = clientInfo.ip() + ":" + - std::to_string(clientInfo.port()); + const auto &clientInfo = response.clientinfos(i); + std::string expected = + clientInfo.ip() + ":" + std::to_string(clientInfo.port()); ASSERT_EQ(expected, clientAddrs[i]); } } @@ -1445,28 +1413,28 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { // send rpc fail EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const curve::mds::ListVolumesOnCopysetsRequest *request, + curve::mds::ListVolumesOnCopysetsResponse *response, + Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); // return code not ok curve::mds::ListVolumesOnCopysetsResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListVolumesOnCopysetsRequest *request, + curve::mds::ListVolumesOnCopysetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); // normal @@ -1476,13 +1444,12 @@ TEST_F(ToolMDSClientTest, ListVolumesOnCopyset) { *fileName = "file" + std::to_string(i); } EXPECT_CALL(*nameService, ListVolumesOnCopysets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const curve::mds::ListVolumesOnCopysetsRequest *request, - curve::mds::ListVolumesOnCopysetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const curve::mds::ListVolumesOnCopysetsRequest *request, + curve::mds::ListVolumesOnCopysetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListVolumesOnCopyset(copysets, &fileNames)); ASSERT_EQ(response.filenames_size(), fileNames.size()); for (int i = 0; i < 5; i++) { @@ -1500,40 +1467,38 @@ TEST_F(ToolMDSClientTest, SetCopysetsAvailFlag) { // send rpc fail EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const SetCopysetsAvailFlagRequest *request, + SetCopysetsAvailFlagResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); // return code not ok SetCopysetsAvailFlagResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeStorgeFail); EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const SetCopysetsAvailFlagRequest *request, + SetCopysetsAvailFlagResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.SetCopysetsAvailFlag(copysets, false)); // normal response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, SetCopysetsAvailFlag(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const SetCopysetsAvailFlagRequest *request, - SetCopysetsAvailFlagResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const SetCopysetsAvailFlagRequest *request, + SetCopysetsAvailFlagResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.SetCopysetsAvailFlag(copysets, false)); } @@ -1542,28 +1507,27 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { // send rpc fail EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) .Times(6) - .WillRepeatedly(Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - brpc::Controller *cntl = - dynamic_cast(controller); - cntl->SetFailed("test"); - })); + .WillRepeatedly( + Invoke([](RpcController *controller, + const ListUnAvailCopySetsRequest *request, + ListUnAvailCopySetsResponse *response, Closure *done) { + brpc::ClosureGuard doneGuard(done); + brpc::Controller *cntl = + dynamic_cast(controller); + cntl->SetFailed("test"); + })); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); // return code not ok ListUnAvailCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeStorgeFail); EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListUnAvailCopySetsRequest *request, + ListUnAvailCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListUnAvailCopySets(©sets)); // normal @@ -1574,13 +1538,12 @@ TEST_F(ToolMDSClientTest, ListUnAvailCopySets) { cp->set_copysetid(i); } EXPECT_CALL(*topoService, ListUnAvailCopySets(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<2>(response), - Invoke([](RpcController *controller, - const ListUnAvailCopySetsRequest *request, - ListUnAvailCopySetsResponse *response, - Closure *done){ - brpc::ClosureGuard doneGuard(done); - }))); + .WillOnce(DoAll( + SetArgPointee<2>(response), + Invoke([](RpcController *controller, + const ListUnAvailCopySetsRequest *request, + ListUnAvailCopySetsResponse *response, + Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, mdsClient.ListUnAvailCopySets(©sets)); }