RocketMq4.7源码解析之五(消息存储)

古城微笑少年丶 2022-12-01 15:57 260阅读 0赞

文章目录

  • 消息队列与索引文件恢复
    • 加载Commitlog文件
    • 加载ConsumeQueue
    • 加载索引文件
    • 根据Broker是否是正常停止执行不同的恢复策略
      • 恢复所有ConsumeQueue文件
      • 正常停止commitLog文件恢复
      • 异常停止commitLog文件恢复
  • 存储流程
    • 校验msg
    • 异步存储消息(默认)
    • 同步存储消息
      • CommitLog刷盘
        • 同步刷盘
        • 异步刷盘
  • 实时更新消息消费队列与索引文件
    • 根据消息更新ConumeQueue
      • 异步创建文件
      • 同步创建文件 (默认)
    • 根据消息更新Index 索引文件
      • 获取或创建IndexFile 文件
      • 添加到Hash索引文件中
  • 过期文件删除机制
  • 总结

消息队列与索引文件恢复

假设broker上次启动后,由于某些原因宕机了,导致ommitlog 、ConsumeQueue 、IndexFile 文件数据不一致,所以broker初次启动时,会去加载文件,然后尝试故障修复,让数据达到最终一致性.

加载存储文件

  1. public boolean load() {
  2. boolean result = true;
  3. try {
  4. //判断上一次退出是否正常。
  5. /*
  6. 其实现机制是Broker 在启动时创建${ ROCKET_HOME} /store/abort 文件,
  7. 在退出时通过注册NM 钩子函数删除abort 文件。
  8. 如果下一次启动时存在abort文件。说明Broker 是异常退出的,
  9. Commitlog 与Consumequeue数据有可能不一致,需要进行修复。
  10. */
  11. boolean lastExitOK = !this.isTempFileExist();
  12. log.info("last shutdown {}", lastExitOK ? "normally" : "abnormally");
  13. //加载延迟队列, RocketMQ 定时消息相关
  14. if (null != scheduleMessageService) {
  15. result = result && this.scheduleMessageService.load();
  16. }
  17. // load Commit Log 加载Commitlog 文件
  18. result = result && this.commitLog.load();
  19. // load Consume Queue 加载消息消费队列
  20. result = result && this.loadConsumeQueue();
  21. if (result) {
  22. //加载存储检测点,检测点主要记录commitlog 文件、Consumequeue 文件、
  23. //Index 索引文件的刷盘点,将在下文的文件刷盘机制中再次提交。
  24. this.storeCheckpoint =
  25. new StoreCheckpoint(StorePathConfigHelper.getStoreCheckpoint(this.messageStoreConfig.getStorePathRootDir()));
  26. //加载索引文件
  27. this.indexService.load(lastExitOK);
  28. //根据Broker 是否是正常停止执行不同的恢复策略
  29. this.recover(lastExitOK);
  30. log.info("load over, and the max phy offset = {}", this.getMaxPhyOffset());
  31. }
  32. } catch (Exception e) {
  33. log.error("load exception", e);
  34. result = false;
  35. }
  36. //如果没能加载成功
  37. if (!result) {
  38. //关闭分配请求
  39. this.allocateMappedFileService.shutdown();
  40. }
  41. return result;
  42. }

判断上次退出是否正常

  1. private boolean isTempFileExist() {
  2. String fileName = StorePathConfigHelper.getAbortFile(this.messageStoreConfig.getStorePathRootDir());
  3. File file = new File(fileName);
  4. return file.exists();
  5. }

也就是这个文件在这里插入图片描述

加载Commitlog文件

  1. public boolean load() {
  2. boolean result = this.mappedFileQueue.load();
  3. log.info("load commit log " + (result ? "OK" : "Failed"));
  4. return result;
  5. }
  6. public boolean load() {
  7. //加载$ { ROCKET_HOME }/store/commitlog 目录下所有文件
  8. File dir = new File(this.storePath);
  9. File[] files = dir.listFiles();
  10. if (files != null) {
  11. // ascending order
  12. //按照文件名排序
  13. Arrays.sort(files);
  14. for (File file : files) {
  15. //如果文件大小与配置文件的单个文件大小不一致,将忽略该目录下所有文件,
  16. if (file.length() != this.mappedFileSize) {
  17. log.warn(file + "\t" + file.length()
  18. + " length not matched message store config value, please check it manually");
  19. return false;
  20. }
  21. try {
  22. //创建MappedFile 对象
  23. MappedFile mappedFile = new MappedFile(file.getPath(), mappedFileSize);
  24. //将wrotePosition 、flushedPosition ,
  25. //committedPosition 三个指针都设置为文件大小。
  26. mappedFile.setWrotePosition(this.mappedFileSize);
  27. mappedFile.setFlushedPosition(this.mappedFileSize);
  28. mappedFile.setCommittedPosition(this.mappedFileSize);
  29. this.mappedFiles.add(mappedFile);
  30. log.info("load " + file.getPath() + " OK");
  31. } catch (IOException e) {
  32. log.error("load file " + file + " error", e);
  33. return false;
  34. }
  35. }
  36. }
  37. return true;
  38. }

加载ConsumeQueue

  1. private boolean loadConsumeQueue() {
  2. File dirLogic = new File(StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()));
  3. File[] fileTopicList = dirLogic.listFiles();
  4. if (fileTopicList != null) {
  5. //遍历消息消费队列根目录
  6. for (File fileTopic : fileTopicList) {
  7. //获取目录名
  8. String topic = fileTopic.getName();
  9. //加载每个消息消费队列下的文件,
  10. File[] fileQueueIdList = fileTopic.listFiles();
  11. if (fileQueueIdList != null) {
  12. for (File fileQueueId : fileQueueIdList) {
  13. int queueId;
  14. try {
  15. //获取文件名,就是queueId
  16. queueId = Integer.parseInt(fileQueueId.getName());
  17. } catch (NumberFormatException e) {
  18. continue;
  19. }
  20. //构建ConsumeQueue 对象
  21. //初始化ConsumeQueue 的topic 、queueld 、storePath 、mappedFileSize 属性
  22. ConsumeQueue logic = new ConsumeQueue(
  23. topic,
  24. queueId,
  25. StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()),
  26. this.getMessageStoreConfig().getMappedFileSizeConsumeQueue(),
  27. this);
  28. //存入consumeQueueTable
  29. this.putConsumeQueue(topic, queueId, logic);
  30. //加载ConsumeQueue
  31. if (!logic.load()) {
  32. return false;
  33. }
  34. }
  35. }
  36. }
  37. }
  38. log.info("load logics queue all over, OK");
  39. return true;
  40. }

加载索引文件

IndexService

  1. //加载索引文件
  2. public boolean load(final boolean lastExitOK) {
  3. File dir = new File(this.storePath);
  4. File[] files = dir.listFiles();
  5. if (files != null) {
  6. // ascending order
  7. Arrays.sort(files);
  8. for (File file : files) {
  9. try {
  10. IndexFile f = new IndexFile(file.getPath(), this.hashSlotNum, this.indexNum, 0, 0);
  11. //加载索引文件
  12. f.load();
  13. //如果上次异常退出
  14. if (!lastExitOK) {
  15. //且索引文件上次刷盘时间小于该索引文件最大的消息时间戳该文件将立即销毁。
  16. if (f.getEndTimestamp() > this.defaultMessageStore.getStoreCheckpoint()
  17. .getIndexMsgTimestamp()) {
  18. f.destroy(0);
  19. continue;
  20. }
  21. }
  22. log.info("load index file OK, " + f.getFileName());
  23. //加入索引文件集合
  24. this.indexFileList.add(f);
  25. } catch (IOException e) {
  26. log.error("load file {} error", file, e);
  27. return false;
  28. } catch (NumberFormatException e) {
  29. log.error("load file {} error", file, e);
  30. }
  31. }
  32. }
  33. return true;
  34. }

加载索引头部信息
IndexFile

  1. public void load() {
  2. this.indexHeader.load();
  3. }

IndexHeader

  1. public void load() {
  2. //分别解析文件头部信息
  3. this.beginTimestamp.set(byteBuffer.getLong(beginTimestampIndex));
  4. this.endTimestamp.set(byteBuffer.getLong(endTimestampIndex));
  5. this.beginPhyOffset.set(byteBuffer.getLong(beginPhyoffsetIndex));
  6. this.endPhyOffset.set(byteBuffer.getLong(endPhyoffsetIndex));
  7. this.hashSlotCount.set(byteBuffer.getInt(hashSlotcountIndex));
  8. this.indexCount.set(byteBuffer.getInt(indexCountIndex));
  9. if (this.indexCount.get() <= 0) {
  10. this.indexCount.set(1);
  11. }
  12. }

根据Broker是否是正常停止执行不同的恢复策略

DefaultMessageStore

  1. private void recover(final boolean lastExitOK) {
  2. //恢复所有ConsumeQueue文件,返回最大的在ConsumeQueue存储的comlog偏移量
  3. long maxPhyOffsetOfConsumeQueue = this.recoverConsumeQueue();
  4. if (lastExitOK) {
  5. //正常停止commitLog文件恢复
  6. this.commitLog.recoverNormally(maxPhyOffsetOfConsumeQueue);
  7. } else {
  8. //异常停止commitLog文件恢复
  9. this.commitLog.recoverAbnormally(maxPhyOffsetOfConsumeQueue);
  10. }
  11. this.recoverTopicQueueTable();
  12. }

恢复所有ConsumeQueue文件

返回最大的在ConsumeQueue存储的comlog偏移量

  1. //恢复所有ConsumeQueue文件
  2. private long recoverConsumeQueue() {
  3. long maxPhysicOffset = -1;
  4. for (ConcurrentMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
  5. //遍历每个ConsumeQueue
  6. for (ConsumeQueue logic : maps.values()) {
  7. //恢复
  8. logic.recover();
  9. if (logic.getMaxPhysicOffset() > maxPhysicOffset) {
  10. maxPhysicOffset = logic.getMaxPhysicOffset();
  11. }
  12. }
  13. }
  14. //返回最大的在ConsumeQueue存储的comlog偏移量
  15. return maxPhysicOffset;
  16. }

恢复ConsumeQueue文件
ConsumeQueue

  1. public void recover() {
  2. final List<MappedFile> mappedFiles = this.mappedFileQueue.getMappedFiles();
  3. if (!mappedFiles.isEmpty()) {
  4. //从倒数第三个文件开始进行恢复
  5. int index = mappedFiles.size() - 3;
  6. //不足3 个文件,则从第一个文件开始恢复。
  7. if (index < 0)
  8. index = 0;
  9. int mappedFileSizeLogics = this.mappedFileSize;
  10. MappedFile mappedFile = mappedFiles.get(index);
  11. ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
  12. //获取该文件的初始偏移量
  13. long processOffset = mappedFile.getFileFromOffset();
  14. //当前文件已校验通过的offset
  15. long mappedFileOffset = 0;
  16. long maxExtAddr = 1;
  17. while (true) {
  18. for (int i = 0; i < mappedFileSizeLogics; i += CQ_STORE_UNIT_SIZE) {
  19. //每次获取一条消息
  20. long offset = byteBuffer.getLong();
  21. int size = byteBuffer.getInt();
  22. long tagsCode = byteBuffer.getLong();
  23. if (offset >= 0 && size > 0) {
  24. mappedFileOffset = i + CQ_STORE_UNIT_SIZE;
  25. //重新记录消息体总长度加上消息在comlog偏移量
  26. this.maxPhysicOffset = offset + size;
  27. if (isExtAddr(tagsCode)) {
  28. maxExtAddr = tagsCode;
  29. }
  30. } else {
  31. //说明遍历到没数据了
  32. log.info("recover current consume queue file over, " + mappedFile.getFileName() + " "
  33. + offset + " " + size + " " + tagsCode);
  34. break;
  35. }
  36. }
  37. //说明恢复到了结尾
  38. if (mappedFileOffset == mappedFileSizeLogics) {
  39. //重新计算下个遍历文件索引
  40. index++;
  41. if (index >= mappedFiles.size()) {
  42. //遍历到最后一个,则结束遍历
  43. log.info("recover last consume queue file over, last mapped file "
  44. + mappedFile.getFileName());
  45. break;
  46. } else {
  47. //恢复下一个文件
  48. mappedFile = mappedFiles.get(index);
  49. byteBuffer = mappedFile.sliceByteBuffer();
  50. processOffset = mappedFile.getFileFromOffset();
  51. mappedFileOffset = 0;
  52. log.info("recover next consume queue file, " + mappedFile.getFileName());
  53. }
  54. } else {
  55. //恢复消息队列结束
  56. log.info("recover current consume queue queue over " + mappedFile.getFileName() + " "
  57. + (processOffset + mappedFileOffset));
  58. break;
  59. }
  60. }
  61. //记录该文件的恢复的物理偏移量
  62. processOffset += mappedFileOffset;
  63. //设置刷盘指针
  64. this.mappedFileQueue.setFlushedWhere(processOffset);
  65. //当前数据提交指针
  66. this.mappedFileQueue.setCommittedWhere(processOffset);
  67. //删除offset 之后的所有文件
  68. this.mappedFileQueue.truncateDirtyFiles(processOffset);
  69. if (isExtReadEnable()) {
  70. this.consumeQueueExt.recover();
  71. log.info("Truncate consume queue extend file by max {}", maxExtAddr);
  72. this.consumeQueueExt.truncateByMaxAddress(maxExtAddr);
  73. }
  74. }
  75. }

删除offset 之后的所有文件
MappedFileQueue

  1. //删除offset 之后的所有文件
  2. public void truncateDirtyFiles(long offset) {
  3. List<MappedFile> willRemoveFiles = new ArrayList<MappedFile>();
  4. //遍历目录下文件
  5. for (MappedFile file : this.mappedFiles) {
  6. //获取文件尾部偏移
  7. long fileTailOffset = file.getFileFromOffset() + this.mappedFileSize;
  8. //若文件尾部偏移>offset
  9. if (fileTailOffset > offset) {
  10. //文件开始偏移>=offset
  11. //说明当前文件包含了有效偏移
  12. if (offset >= file.getFileFromOffset()) {
  13. //分别设置当前文件的刷盘,提交,写入指针.
  14. file.setWrotePosition((int) (offset % this.mappedFileSize));
  15. file.setCommittedPosition((int) (offset % this.mappedFileSize));
  16. file.setFlushedPosition((int) (offset % this.mappedFileSize));
  17. } else {
  18. //说明该文件是有效文件后面创建的
  19. //释放MappedFile 占用的内存资源(内存映射与内存通道等)
  20. file.destroy(1000);
  21. //加入待删除集合
  22. willRemoveFiles.add(file);
  23. }
  24. }
  25. }
  26. //删除文件
  27. this.deleteExpiredFile(willRemoveFiles);
  28. }

MappedFile文件销毁
MappedFile

  1. public boolean destroy(final long intervalForcibly) {
  2. //关闭MappedFile
  3. this.shutdown(intervalForcibly);
  4. //判断是否清理完成
  5. if (this.isCleanupOver()) {
  6. try {
  7. //关闭通道
  8. this.fileChannel.close();
  9. log.info("close file channel " + this.fileName + " OK");
  10. long beginTime = System.currentTimeMillis();
  11. //删除整个物理文件
  12. boolean result = this.file.delete();
  13. log.info("delete file[REF:" + this.getRefCount() + "] " + this.fileName
  14. + (result ? " OK, " : " Failed, ") + "W:" + this.getWrotePosition() + " M:"
  15. + this.getFlushedPosition() + ", "
  16. + UtilAll.computeElapsedTimeMilliseconds(beginTime));
  17. } catch (Exception e) {
  18. log.warn("close file channel " + this.fileName + " Failed. ", e);
  19. }
  20. return true;
  21. } else {
  22. log.warn("destroy mapped file[REF:" + this.getRefCount() + "] " + this.fileName
  23. + " Failed. cleanupOver: " + this.cleanupOver);
  24. }
  25. return false;
  26. }

关闭MappedFile
ReferenceResource

  1. public void shutdown(final long intervalForcibly) {
  2. //默认true
  3. if (this.available) {
  4. //初次调用时available 为true ,设置available为fal se
  5. this.available = false;
  6. //设置初次关闭的时间戳
  7. this.firstShutdownTimestamp = System.currentTimeMillis();
  8. //释放资源,引用次数小于1 的情况下才会释放资源
  9. this.release();
  10. } else if (this.getRefCount() > 0) {
  11. //如果引用次数大于0
  12. //对比当前时间与firstShutdownTimestamp ,如果已经超过了其最大拒绝存活期,每执行
  13. //一次,将引用数减少1000 ,直到引用数小于0 时通过执行release方法释放资源。
  14. if ((System.currentTimeMillis() - this.firstShutdownTimestamp) >= intervalForcibly) {
  15. this.refCount.set(-1000 - this.getRefCount());
  16. this.release();
  17. }
  18. }
  19. }

释放引用和资源
ReferenceResource

  1. public void release() {
  2. //引用减1
  3. long value = this.refCount.decrementAndGet();
  4. if (value > 0)
  5. return;
  6. synchronized (this) {
  7. //释放堆外内存
  8. this.cleanupOver = this.cleanup(value);
  9. }
  10. }

释放堆外内存
MappedFile

  1. public boolean cleanup(final long currentRef) {
  2. //如果available为true ,表示MappedFile当前可用,无须清理,
  3. if (this.isAvailable()) {
  4. log.error("this file[REF:" + currentRef + "] " + this.fileName
  5. + " have not shutdown, stop unmapping.");
  6. return false;
  7. }
  8. //如果资源已经被清除,返回true
  9. if (this.isCleanupOver()) {
  10. log.error("this file[REF:" + currentRef + "] " + this.fileName
  11. + " have cleanup, do not do it again.");
  12. return true;
  13. }
  14. //如果是堆外内存,调用堆外内存的cleanup 方法清除
  15. clean(this.mappedByteBuffer);
  16. //维护虚拟内存
  17. TOTAL_MAPPED_VIRTUAL_MEMORY.addAndGet(this.fileSize * (-1));
  18. //对象个数-1
  19. TOTAL_MAPPED_FILES.decrementAndGet();
  20. log.info("unmap file[REF:" + currentRef + "] " + this.fileName + " OK");
  21. return true;
  22. }

删除过期文件
MappedFileQueue

  1. void deleteExpiredFile(List<MappedFile> files) {
  2. if (!files.isEmpty()) {
  3. Iterator<MappedFile> iterator = files.iterator();
  4. while (iterator.hasNext()) {
  5. MappedFile cur = iterator.next();
  6. //mappedFiles,不包含,就跳过.
  7. if (!this.mappedFiles.contains(cur)) {
  8. iterator.remove();
  9. log.info("This mappedFile {} is not contained by mappedFiles, so skip it.", cur.getFileName());
  10. }
  11. }
  12. try {
  13. //从mappedFiles删除所有
  14. if (!this.mappedFiles.removeAll(files)) {
  15. log.error("deleteExpiredFile remove failed.");
  16. }
  17. } catch (Exception e) {
  18. log.error("deleteExpiredFile has exception.", e);
  19. }
  20. }
  21. }

正常停止commitLog文件恢复

CommitLog

  1. public void recoverNormally(long maxPhyOffsetOfConsumeQueue) {
  2. //在进行文件恢复时查找消息时是否验证CRC
  3. boolean checkCRCOnRecover = this.defaultMessageStore.getMessageStoreConfig().isCheckCRCOnRecover();
  4. final List<MappedFile> mappedFiles = this.mappedFileQueue.getMappedFiles();
  5. if (!mappedFiles.isEmpty()) {
  6. // Began to recover from the last third file
  7. //从倒数第三个文件开始进行恢复
  8. int index = mappedFiles.size() - 3;
  9. //如果不足3个文件,则从第一个文件开始恢复。
  10. if (index < 0)
  11. index = 0;
  12. MappedFile mappedFile = mappedFiles.get(index);
  13. ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
  14. //Commitlog 文件已确认的物理偏移量,等于mappedFile.getFileFromOffset 加上mappedFileOffset 。
  15. long processOffset = mappedFile.getFileFromOffset();
  16. //当前文件已校验通过的offset ,
  17. long mappedFileOffset = 0;
  18. while (true) {
  19. DispatchRequest dispatchRequest = this.checkMessageAndReturnSize(byteBuffer, checkCRCOnRecover);
  20. //取出一条消息
  21. int size = dispatchRequest.getMsgSize();
  22. // Normal data
  23. //查找结果为true 并且消息的长度大于0 表示消息正确,
  24. if (dispatchRequest.isSuccess() && size > 0) {
  25. //mappedFileOffset 指针向前移动本条消息的长度
  26. mappedFileOffset += size;
  27. }
  28. // Come the end of the file, switch to the next file Since the
  29. // return 0 representatives met last hole,
  30. // this can not be included in truncate offset
  31. else if (dispatchRequest.isSuccess() && size == 0) {
  32. //如果查找结果为true 并且消息的长度等于0 ,表示已到该文件的末尾
  33. index++;
  34. //没有文件则退出
  35. if (index >= mappedFiles.size()) {
  36. // Current branch can not happen
  37. log.info("recover last 3 physics file over, last mapped file " + mappedFile.getFileName());
  38. break;
  39. } else {
  40. //若还有文件
  41. mappedFile = mappedFiles.get(index);
  42. //重置变量,继续遍历下个文件,并重新进入循环
  43. byteBuffer = mappedFile.sliceByteBuffer();
  44. processOffset = mappedFile.getFileFromOffset();
  45. mappedFileOffset = 0;
  46. log.info("recover next physics file, " + mappedFile.getFileName());
  47. }
  48. }
  49. // Intermediate file read error
  50. else if (!dispatchRequest.isSuccess()) {
  51. //读取消息错误,直接结束
  52. log.info("recover physics file end, " + mappedFile.getFileName());
  53. break;
  54. }
  55. }
  56. processOffset += mappedFileOffset;
  57. this.mappedFileQueue.setFlushedWhere(processOffset);
  58. this.mappedFileQueue.setCommittedWhere(processOffset);
  59. //删除offset 之后的所有文件
  60. this.mappedFileQueue.truncateDirtyFiles(processOffset);
  61. // Clear ConsumeQueue redundant data
  62. //queue记录的最大commitlog偏移若大于commitlog存储的最大偏移
  63. if (maxPhyOffsetOfConsumeQueue >= processOffset) {
  64. log.warn("maxPhyOffsetOfConsumeQueue({}) >= processOffset({}), truncate dirty logic files", maxPhyOffsetOfConsumeQueue, processOffset);
  65. //删除processOffset之后存储的ConsumeQueue脏数据文件
  66. this.defaultMessageStore.truncateDirtyLogicFiles(processOffset);
  67. }
  68. } else {
  69. // Commitlog case files are deleted
  70. log.warn("The commitlog files are deleted, and delete the consume queue files");
  71. //Commitlog文件不存在
  72. this.mappedFileQueue.setFlushedWhere(0);
  73. this.mappedFileQueue.setCommittedWhere(0);
  74. //销毁所有ConsumeQueue文件
  75. this.defaultMessageStore.destroyLogics();
  76. }
  77. }

删除processOffset之后存储的ConsumeQueue脏数据文件

  1. public void truncateDirtyLogicFiles(long phyOffset) {
  2. ConcurrentMap<String, ConcurrentMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;
  3. for (ConcurrentMap<Integer, ConsumeQueue> maps : tables.values()) {
  4. //遍历每个队列目录
  5. for (ConsumeQueue logic : maps.values()) {
  6. //每个目录执行删除
  7. logic.truncateDirtyLogicFiles(phyOffset);
  8. }
  9. }
  10. }

截断phyOffet之后的文件
ConsumeQueue

  1. public void truncateDirtyLogicFiles(long phyOffet) {
  2. //获取逻辑文件大小
  3. int logicFileSize = this.mappedFileSize;
  4. //设置commitlog最大偏移
  5. this.maxPhysicOffset = phyOffet;
  6. long maxExtAddr = 1;
  7. while (true) {
  8. //获取最后一个文件
  9. MappedFile mappedFile = this.mappedFileQueue.getLastMappedFile();
  10. if (mappedFile != null) {
  11. ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
  12. //清空刷盘,写入,提交位置
  13. mappedFile.setWrotePosition(0);
  14. mappedFile.setCommittedPosition(0);
  15. mappedFile.setFlushedPosition(0);
  16. for (int i = 0; i < logicFileSize; i += CQ_STORE_UNIT_SIZE) {
  17. //获取每条消息
  18. long offset = byteBuffer.getLong();
  19. int size = byteBuffer.getInt();
  20. long tagsCode = byteBuffer.getLong();
  21. //说明为该文件第一条消息
  22. if (0 == i) {
  23. //该文件记录第一条的commitlog偏移>=phyOffet
  24. //则说明该文件记录消息无效
  25. if (offset >= phyOffet) {
  26. //删除文件
  27. this.mappedFileQueue.deleteLastMappedFile();
  28. break;
  29. } else {
  30. //继续遍历下条消息
  31. int pos = i + CQ_STORE_UNIT_SIZE;
  32. //重新设置刷盘,写入,提交位置
  33. mappedFile.setWrotePosition(pos);
  34. mappedFile.setCommittedPosition(pos);
  35. mappedFile.setFlushedPosition(pos);
  36. //设置刷入commitlog偏移
  37. this.maxPhysicOffset = offset + size;
  38. // This maybe not take effect, when not every consume queue has extend file.
  39. if (isExtAddr(tagsCode)) {
  40. maxExtAddr = tagsCode;
  41. }
  42. }
  43. } else {
  44. //不是第一条消息的处理
  45. //说明消息有效
  46. if (offset >= 0 && size > 0) {
  47. if (offset >= phyOffet) {
  48. //这里直接返回,而不是删除文件
  49. //是因为该文件记录的之前消息是有效的
  50. //为什么不清空后面的消息了,这个采用后续消息覆盖解决
  51. //而为了保证消息刷盘,写入,以及提交的位置正确,在前一次执行消息解析的时候
  52. //已经存储了
  53. return;
  54. }
  55. //继续遍历下条消息
  56. int pos = i + CQ_STORE_UNIT_SIZE;
  57. //重新设置刷盘,写入,提交位置
  58. mappedFile.setWrotePosition(pos);
  59. mappedFile.setCommittedPosition(pos);
  60. mappedFile.setFlushedPosition(pos);
  61. //设置刷入commitlog偏移
  62. this.maxPhysicOffset = offset + size;
  63. if (isExtAddr(tagsCode)) {
  64. maxExtAddr = tagsCode;
  65. }
  66. //遍历到最后一条消息,则返回
  67. if (pos == logicFileSize) {
  68. return;
  69. }
  70. } else {
  71. //遍历到文件无效消息,则返回
  72. return;
  73. }
  74. }
  75. }
  76. } else {
  77. //没有文件,直接退出
  78. break;
  79. }
  80. }
  81. if (isExtReadEnable()) {
  82. this.consumeQueueExt.truncateByMaxAddress(maxExtAddr);
  83. }
  84. }

销毁所有ConsumeQueue文件
DefaultMessageStore

  1. public void destroyLogics() {
  2. for (ConcurrentMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
  3. //遍历每一个ConsumeQueue目录
  4. for (ConsumeQueue logic : maps.values()) {
  5. //销毁每个目录所有ConsumeQueue文件
  6. logic.destroy();
  7. }
  8. }
  9. }

ConsumeQueue

  1. public void destroy() {
  2. this.maxPhysicOffset = -1;
  3. this.minLogicOffset = 0;
  4. //将消息消费队列目录下的所有文件全部删除。
  5. this.mappedFileQueue.destroy();
  6. if (isExtReadEnable()) {
  7. this.consumeQueueExt.destroy();
  8. }
  9. }
  10. public void destroy() {
  11. //遍历目录下每个consumeQueue文件
  12. for (MappedFile mf : this.mappedFiles) {
  13. //销毁通道,物理文件
  14. mf.destroy(1000 * 3);
  15. }
  16. //清除集合
  17. this.mappedFiles.clear();
  18. this.flushedWhere = 0;
  19. // delete parent directory
  20. //删除上级目录
  21. File file = new File(storePath);
  22. if (file.isDirectory()) {
  23. file.delete();
  24. }
  25. }

异常停止commitLog文件恢复

CommitLog

  1. //Broker 异常停止文件恢复
  2. @Deprecated
  3. public void recoverAbnormally(long maxPhyOffsetOfConsumeQueue) {
  4. // recover by the minimum time stamp
  5. boolean checkCRCOnRecover = this.defaultMessageStore.getMessageStoreConfig().isCheckCRCOnRecover();
  6. final List<MappedFile> mappedFiles = this.mappedFileQueue.getMappedFiles();
  7. if (!mappedFiles.isEmpty()) {
  8. // Looking beginning to recover from which file
  9. int index = mappedFiles.size() - 1;
  10. MappedFile mappedFile = null;
  11. //从最后一个文件,往前遍历
  12. for (; index >= 0; index--) {
  13. mappedFile = mappedFiles.get(index);
  14. //判断一个消息文件是一个正确的文件
  15. if (this.isMappedFileMatchedRecover(mappedFile)) {
  16. log.info("recover from this mapped file " + mappedFile.getFileName());
  17. break;
  18. }
  19. }
  20. //遍历到最后一个文件都没找到,则遍历最后一个文件消息
  21. if (index < 0) {
  22. index = 0;
  23. mappedFile = mappedFiles.get(index);
  24. }
  25. ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
  26. long processOffset = mappedFile.getFileFromOffset();
  27. long mappedFileOffset = 0;
  28. //遍历消息
  29. while (true) {
  30. //从result 返回的ByteBuffer 中循环读取消息,一次读取一条,
  31. //反序列化并创建DispatchRequest对象,主要记录一条消息数据
  32. DispatchRequest dispatchRequest = this.checkMessageAndReturnSize(byteBuffer, checkCRCOnRecover);
  33. int size = dispatchRequest.getMsgSize();
  34. if (dispatchRequest.isSuccess()) {
  35. // Normal data
  36. //说明有数据
  37. if (size > 0) {
  38. mappedFileOffset += size;
  39. //是否允许转发
  40. if (this.defaultMessageStore.getMessageStoreConfig().isDuplicationEnable()) {
  41. //消息物理偏移量>CommitLog的提交指针,则结束
  42. if (dispatchRequest.getCommitLogOffset() < this.defaultMessageStore.getConfirmOffset()) {
  43. //调用文件转发请求,分别同步index和queue文件
  44. this.defaultMessageStore.doDispatch(dispatchRequest);
  45. }
  46. } else {
  47. this.defaultMessageStore.doDispatch(dispatchRequest);
  48. }
  49. }
  50. // Come the end of the file, switch to the next file
  51. // Since the return 0 representatives met last hole, this can
  52. // not be included in truncate offset
  53. else if (size == 0) {
  54. //无效数据
  55. //遍历下一个文件
  56. index++;
  57. //遍历到了最后一个文件,直接退出
  58. if (index >= mappedFiles.size()) {
  59. // The current branch under normal circumstances should
  60. // not happen
  61. log.info("recover physics file over, last mapped file " + mappedFile.getFileName());
  62. break;
  63. } else {
  64. mappedFile = mappedFiles.get(index);
  65. byteBuffer = mappedFile.sliceByteBuffer();
  66. processOffset = mappedFile.getFileFromOffset();
  67. mappedFileOffset = 0;
  68. log.info("recover next physics file, " + mappedFile.getFileName());
  69. }
  70. }
  71. } else {
  72. //解析数据不成功,直接结束
  73. log.info("recover physics file end, " + mappedFile.getFileName() + " pos=" + byteBuffer.position());
  74. break;
  75. }
  76. }
  77. processOffset += mappedFileOffset;
  78. this.mappedFileQueue.setFlushedWhere(processOffset);
  79. this.mappedFileQueue.setCommittedWhere(processOffset);
  80. //删除offset 之后的所有文件
  81. this.mappedFileQueue.truncateDirtyFiles(processOffset);
  82. // Clear ConsumeQueue redundant data
  83. if (maxPhyOffsetOfConsumeQueue >= processOffset) {
  84. log.warn("maxPhyOffsetOfConsumeQueue({}) >= processOffset({}), truncate dirty logic files", maxPhyOffsetOfConsumeQueue, processOffset);
  85. this.defaultMessageStore.truncateDirtyLogicFiles(processOffset);
  86. }
  87. }
  88. // Commitlog case files are deleted
  89. else {
  90. log.warn("The commitlog files are deleted, and delete the consume queue files");
  91. //尚未找到文件
  92. //设置commitlog 目录的flushedWhere 、committedWhere指针都为0
  93. this.mappedFileQueue.setFlushedWhere(0);
  94. this.mappedFileQueue.setCommittedWhere(0);
  95. //销毁消息消费队列文件。
  96. this.defaultMessageStore.destroyLogics();
  97. }
  98. }

判断一个消息文件是一个正确的文件
CommitLog

  1. //判断一个消息文件是一个正确的文件
  2. private boolean isMappedFileMatchedRecover(final MappedFile mappedFile) {
  3. ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
  4. //通过魔数判断该文件是否符合commitlog 消息文件的存储格式。
  5. int magicCode = byteBuffer.getInt(MessageDecoder.MESSAGE_MAGIC_CODE_POSTION);
  6. if (magicCode != MESSAGE_MAGIC_CODE) {
  7. return false;
  8. }
  9. int sysFlag = byteBuffer.getInt(MessageDecoder.SYSFLAG_POSITION);
  10. int bornhostLength = (sysFlag & MessageSysFlag.BORNHOST_V6_FLAG) == 0 ? 8 : 20;
  11. int msgStoreTimePos = 4 + 4 + 4 + 4 + 4 + 8 + 8 + 4 + 8 + bornhostLength;
  12. //若存储时间为0,说明该消息存储文件中未存储任何消息。
  13. long storeTimestamp = byteBuffer.getLong(msgStoreTimePos);
  14. if (0 == storeTimestamp) {
  15. return false;
  16. }
  17. //如果messagelndexEnable 为true , 表示索引文件的刷盘时间点也参与计算。
  18. if (this.defaultMessageStore.getMessageStoreConfig().isMessageIndexEnable()
  19. && this.defaultMessageStore.getMessageStoreConfig().isMessageIndexSafe()) {
  20. if (storeTimestamp <= this.defaultMessageStore.getStoreCheckpoint().getMinTimestampIndex()) {
  21. log.info("find check timestamp, {} {}",
  22. storeTimestamp,
  23. UtilAll.timeMillisToHumanString(storeTimestamp));
  24. return true;
  25. }
  26. } else {
  27. //文件第一条消息的时间戳小于文件检测点说明该文件部分消息是可靠的,
  28. if (storeTimestamp <= this.defaultMessageStore.getStoreCheckpoint().getMinTimestamp()) {
  29. log.info("find check timestamp, {} {}",
  30. storeTimestamp,
  31. UtilAll.timeMillisToHumanString(storeTimestamp));
  32. return true;
  33. }
  34. }
  35. return false;
  36. }

索引文件的刷盘时间点也参与计算
StoreCheckpoint

  1. public long getMinTimestampIndex() {
  2. //分别取三者最小值,文件刷盘时间点 消息消费队列文件刷盘时间点 索引文件刷盘时间点
  3. return Math.min(this.getMinTimestamp(), this.indexMsgTimestamp);
  4. }

在这里插入图片描述
1 ) CommitLog :消息存储文件,所有消息主题的消息都存储在CommitLog 文件中。
2 ) ConsumeQueue :消息消费队列,消息到达CommitLog 文件后,将异步转发到消息
消费队列,供消息消费者消费。
3 ) IndexFile :消息索引文件,主要存储消息Key 与Offset 的对应关系。
4 )事务状态服务: 存储每条消息的事务状态。
5 )定时消息服务:每一个延迟级别对应一个消息消费队列,存储延迟队列的消息拉取
进度。

RocketMQ 将所有主题的消息存储在同-个文件中,确保消息发送时顺序写文件

RocketMQ 引入了ConsumeQueue 消息队列文件,每个消息主题包含多个消息消费队列,每一个消息队列有一个消息文件。

IndexFile 索引文件,其主要设计理念就是为了加速消息的检索性能,根据消息的属性快速从Commitlog 文件中检索消息。

存储流程

SendMessageProcessor

  1. private CompletableFuture<RemotingCommand> asyncSendMessage(ChannelHandlerContext ctx, RemotingCommand request,
  2. SendMessageContext mqtraceContext,
  3. SendMessageRequestHeader requestHeader) {
  4. //校验topic,以及创建topic配置文件
  5. final RemotingCommand response = preSend(ctx, request, requestHeader);
  6. final SendMessageResponseHeader responseHeader = (SendMessageResponseHeader)response.readCustomHeader();
  7. if (response.getCode() != -1) {
  8. return CompletableFuture.completedFuture(response);
  9. }
  10. final byte[] body = request.getBody();
  11. //获取topic配置
  12. int queueIdInt = requestHeader.getQueueId();
  13. TopicConfig topicConfig = this.brokerController.getTopicConfigManager().selectTopicConfig(requestHeader.getTopic());
  14. if (queueIdInt < 0) {
  15. queueIdInt = randomQueueId(topicConfig.getWriteQueueNums());
  16. }
  17. //创建消息扩展,主要封装一些其他参数
  18. MessageExtBrokerInner msgInner = new MessageExtBrokerInner();
  19. msgInner.setTopic(requestHeader.getTopic());
  20. msgInner.setQueueId(queueIdInt);
  21. if (!handleRetryAndDLQ(requestHeader, response, request, msgInner, topicConfig)) {
  22. return CompletableFuture.completedFuture(response);
  23. }
  24. msgInner.setBody(body);
  25. msgInner.setFlag(requestHeader.getFlag());
  26. MessageAccessor.setProperties(msgInner, MessageDecoder.string2messageProperties(requestHeader.getProperties()));
  27. msgInner.setPropertiesString(requestHeader.getProperties());
  28. msgInner.setBornTimestamp(requestHeader.getBornTimestamp());
  29. msgInner.setBornHost(ctx.channel().remoteAddress());
  30. msgInner.setStoreHost(this.getStoreHost());
  31. msgInner.setReconsumeTimes(requestHeader.getReconsumeTimes() == null ? 0 : requestHeader.getReconsumeTimes());
  32. String clusterName = this.brokerController.getBrokerConfig().getBrokerClusterName();
  33. MessageAccessor.putProperty(msgInner, MessageConst.PROPERTY_CLUSTER, clusterName);
  34. msgInner.setPropertiesString(MessageDecoder.messageProperties2String(msgInner.getProperties()));
  35. CompletableFuture<PutMessageResult> putMessageResult = null;
  36. Map<String, String> origProps = MessageDecoder.string2messageProperties(requestHeader.getProperties());
  37. String transFlag = origProps.get(MessageConst.PROPERTY_TRANSACTION_PREPARED);
  38. if (transFlag != null && Boolean.parseBoolean(transFlag)) {
  39. if (this.brokerController.getBrokerConfig().isRejectTransactionMessage()) {
  40. response.setCode(ResponseCode.NO_PERMISSION);
  41. response.setRemark(
  42. "the broker[" + this.brokerController.getBrokerConfig().getBrokerIP1()
  43. + "] sending transaction message is forbidden");
  44. return CompletableFuture.completedFuture(response);
  45. }
  46. putMessageResult = this.brokerController.getTransactionalMessageService().asyncPrepareMessage(msgInner);
  47. } else {
  48. //异步存储消息
  49. putMessageResult = this.brokerController.getMessageStore().asyncPutMessage(msgInner);
  50. }
  51. return handlePutMessageResultFuture(putMessageResult, response, request, msgInner, responseHeader, mqtraceContext, ctx, queueIdInt);
  52. }

校验msg

SendMessageProcessor

  1. private RemotingCommand preSend(ChannelHandlerContext ctx, RemotingCommand request,
  2. SendMessageRequestHeader requestHeader) {
  3. final RemotingCommand response = RemotingCommand.createResponseCommand(SendMessageResponseHeader.class);
  4. response.setOpaque(request.getOpaque());
  5. response.addExtField(MessageConst.PROPERTY_MSG_REGION, this.brokerController.getBrokerConfig().getRegionId());
  6. response.addExtField(MessageConst.PROPERTY_TRACE_SWITCH, String.valueOf(this.brokerController.getBrokerConfig().isTraceOn()));
  7. log.debug("Receive SendMessage request command {}", request);
  8. final long startTimestamp = this.brokerController.getBrokerConfig().getStartAcceptSendRequestTimeStamp();
  9. if (this.brokerController.getMessageStore().now() < startTimestamp) {
  10. response.setCode(ResponseCode.SYSTEM_ERROR);
  11. response.setRemark(String.format("broker unable to service, until %s", UtilAll.timeMillisToHumanString2(startTimestamp)));
  12. return response;
  13. }
  14. response.setCode(-1);
  15. //校验消息
  16. super.msgCheck(ctx, requestHeader, response);
  17. if (response.getCode() != -1) {
  18. return response;
  19. }
  20. return response;
  21. }

AbstractSendMessageProcessor

  1. protected RemotingCommand msgCheck(final ChannelHandlerContext ctx,
  2. final SendMessageRequestHeader requestHeader, final RemotingCommand response) {
  3. //检查该Broker 是否有写权限。
  4. if (!PermName.isWriteable(this.brokerController.getBrokerConfig().getBrokerPermission())
  5. && this.brokerController.getTopicConfigManager().isOrderTopic(requestHeader.getTopic())) {
  6. response.setCode(ResponseCode.NO_PERMISSION);
  7. response.setRemark("the broker[" + this.brokerController.getBrokerConfig().getBrokerIP1()
  8. + "] sending message is forbidden");
  9. return response;
  10. }
  11. //验证topic长度以及命名格式
  12. if (!TopicValidator.validateTopic(requestHeader.getTopic(), response)) {
  13. return response;
  14. }
  15. //检测该topic是否可以发送消息,默认主题不能发送,仅仅供路由查找。
  16. if (TopicValidator.isNotAllowedSendTopic(requestHeader.getTopic(), response)) {
  17. return response;
  18. }
  19. //获取该topic对应topicConfig
  20. TopicConfig topicConfig =
  21. this.brokerController.getTopicConfigManager().selectTopicConfig(requestHeader.getTopic());
  22. if (null == topicConfig) {
  23. int topicSysFlag = 0;
  24. if (requestHeader.isUnitMode()) {
  25. if (requestHeader.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
  26. topicSysFlag = TopicSysFlag.buildSysFlag(false, true);
  27. } else {
  28. topicSysFlag = TopicSysFlag.buildSysFlag(true, false);
  29. }
  30. }
  31. log.warn("the topic {} not exist, producer: {}", requestHeader.getTopic(), ctx.channel().remoteAddress());
  32. //创建topicConfig
  33. topicConfig = this.brokerController.getTopicConfigManager().createTopicInSendMessageMethod(
  34. requestHeader.getTopic(),
  35. requestHeader.getDefaultTopic(),
  36. RemotingHelper.parseChannelRemoteAddr(ctx.channel()),
  37. requestHeader.getDefaultTopicQueueNums(), topicSysFlag);
  38. if (null == topicConfig) {
  39. if (requestHeader.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
  40. topicConfig =
  41. this.brokerController.getTopicConfigManager().createTopicInSendMessageBackMethod(
  42. requestHeader.getTopic(), 1, PermName.PERM_WRITE | PermName.PERM_READ,
  43. topicSysFlag);
  44. }
  45. }
  46. if (null == topicConfig) {
  47. response.setCode(ResponseCode.TOPIC_NOT_EXIST);
  48. response.setRemark("topic[" + requestHeader.getTopic() + "] not exist, apply first please!"
  49. + FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL));
  50. return response;
  51. }
  52. }
  53. int queueIdInt = requestHeader.getQueueId();
  54. int idValid = Math.max(topicConfig.getWriteQueueNums(), topicConfig.getReadQueueNums());
  55. if (queueIdInt >= idValid) {
  56. String errorInfo = String.format("request queueId[%d] is illegal, %s Producer: %s",
  57. queueIdInt,
  58. topicConfig.toString(),
  59. RemotingHelper.parseChannelRemoteAddr(ctx.channel()));
  60. log.warn(errorInfo);
  61. response.setCode(ResponseCode.SYSTEM_ERROR);
  62. response.setRemark(errorInfo);
  63. return response;
  64. }
  65. return response;
  66. }

TopicConfigManager

  1. public TopicConfig createTopicInSendMessageMethod(final String topic, final String defaultTopic,
  2. final String remoteAddress, final int clientDefaultTopicQueueNums, final int topicSysFlag) {
  3. TopicConfig topicConfig = null;
  4. boolean createNew = false;
  5. try {
  6. if (this.lockTopicConfigTable.tryLock(LOCK_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) {
  7. try {
  8. topicConfig = this.topicConfigTable.get(topic);
  9. //再次获取topic配置文件
  10. if (topicConfig != null)
  11. return topicConfig;
  12. //获取默认topic TBW102配置
  13. TopicConfig defaultTopicConfig = this.topicConfigTable.get(defaultTopic);
  14. if (defaultTopicConfig != null) {
  15. //isAutoCreateTopicEnable=true才能发现默认topic
  16. if (defaultTopic.equals(TopicValidator.AUTO_CREATE_TOPIC_KEY_TOPIC)) {
  17. if (!this.brokerController.getBrokerConfig().isAutoCreateTopicEnable()) {
  18. defaultTopicConfig.setPerm(PermName.PERM_READ | PermName.PERM_WRITE);
  19. }
  20. }
  21. //是否允许继承defaultTopicConfig属性
  22. if (PermName.isInherited(defaultTopicConfig.getPerm())) {
  23. topicConfig = new TopicConfig(topic);
  24. int queueNums =
  25. clientDefaultTopicQueueNums > defaultTopicConfig.getWriteQueueNums() ? defaultTopicConfig
  26. .getWriteQueueNums() : clientDefaultTopicQueueNums;
  27. if (queueNums < 0) {
  28. queueNums = 0;
  29. }
  30. topicConfig.setReadQueueNums(queueNums);
  31. topicConfig.setWriteQueueNums(queueNums);
  32. int perm = defaultTopicConfig.getPerm();
  33. perm &= ~PermName.PERM_INHERIT;
  34. topicConfig.setPerm(perm);
  35. topicConfig.setTopicSysFlag(topicSysFlag);
  36. topicConfig.setTopicFilterType(defaultTopicConfig.getTopicFilterType());
  37. } else {
  38. log.warn("Create new topic failed, because the default topic[{}] has no perm [{}] producer:[{}]",
  39. defaultTopic, defaultTopicConfig.getPerm(), remoteAddress);
  40. }
  41. } else {
  42. log.warn("Create new topic failed, because the default topic[{}] not exist. producer:[{}]",
  43. defaultTopic, remoteAddress);
  44. }
  45. if (topicConfig != null) {
  46. log.info("Create new topic by default topic:[{}] config:[{}] producer:[{}]",
  47. defaultTopic, topicConfig, remoteAddress);
  48. this.topicConfigTable.put(topic, topicConfig);
  49. this.dataVersion.nextVersion();
  50. //标记创建新topic
  51. createNew = true;
  52. //创建topic文件
  53. this.persist();
  54. }
  55. } finally {
  56. this.lockTopicConfigTable.unlock();
  57. }
  58. }
  59. } catch (InterruptedException e) {
  60. log.error("createTopicInSendMessageMethod exception", e);
  61. }
  62. //若创建新的topic成功
  63. if (createNew) {
  64. //向name注册所有topic
  65. this.brokerController.registerBrokerAll(false, true, true);
  66. }
  67. return topicConfig;
  68. }

异步存储消息(默认)

DefaultMessageStore

  1. @Override
  2. public CompletableFuture<PutMessageResult> asyncPutMessage(MessageExtBrokerInner msg) {
  3. //是否可写入
  4. PutMessageStatus checkStoreStatus = this.checkStoreStatus();
  5. if (checkStoreStatus != PutMessageStatus.PUT_OK) {
  6. return CompletableFuture.completedFuture(new PutMessageResult(checkStoreStatus, null));
  7. }
  8. //校验消息
  9. PutMessageStatus msgCheckStatus = this.checkMessage(msg);
  10. if (msgCheckStatus == PutMessageStatus.MESSAGE_ILLEGAL) {
  11. return CompletableFuture.completedFuture(new PutMessageResult(msgCheckStatus, null));
  12. }
  13. long beginTime = this.getSystemClock().now();
  14. //写入消息到commitLog
  15. CompletableFuture<PutMessageResult> putResultFuture = this.commitLog.asyncPutMessage(msg);
  16. putResultFuture.thenAccept((result) -> {
  17. long elapsedTime = this.getSystemClock().now() - beginTime;
  18. if (elapsedTime > 500) {
  19. log.warn("putMessage not in lock elapsed time(ms)={}, bodyLength={}", elapsedTime, msg.getBody().length);
  20. }
  21. this.storeStatsService.setPutMessageEntireTimeMax(elapsedTime);
  22. if (null == result || !result.isOk()) {
  23. this.storeStatsService.getPutMessageFailedTimes().incrementAndGet();
  24. }
  25. });
  26. return putResultFuture;
  27. }

是否可写入

  1. private PutMessageStatus checkStoreStatus() {
  2. //若消息存储关闭
  3. if (this.shutdown) {
  4. log.warn("message store has shutdown, so putMessage is forbidden");
  5. return PutMessageStatus.SERVICE_NOT_AVAILABLE;
  6. }
  7. //Broker 为SLAVE 角色
  8. if (BrokerRole.SLAVE == this.messageStoreConfig.getBrokerRole()) {
  9. long value = this.printTimes.getAndIncrement();
  10. if ((value % 50000) == 0) {
  11. log.warn("message store has shutdown, so putMessage is forbidden");
  12. }
  13. return PutMessageStatus.SERVICE_NOT_AVAILABLE;
  14. }
  15. //当前Rocket不支持写入则拒绝消息写入;
  16. if (!this.runningFlags.isWriteable()) {
  17. long value = this.printTimes.getAndIncrement();
  18. if ((value % 50000) == 0) {
  19. log.warn("message store has shutdown, so putMessage is forbidden");
  20. }
  21. return PutMessageStatus.SERVICE_NOT_AVAILABLE;
  22. } else {
  23. this.printTimes.set(0);
  24. }
  25. if (this.isOSPageCacheBusy()) {
  26. return PutMessageStatus.OS_PAGECACHE_BUSY;
  27. }
  28. return PutMessageStatus.PUT_OK;
  29. }

校验消息

  1. private PutMessageStatus checkMessage(MessageExtBrokerInner msg) {
  2. //消息主题长度超过256 个字符,拒绝该消息写
  3. if (msg.getTopic().length() > Byte.MAX_VALUE) {
  4. log.warn("putMessage message topic length too long " + msg.getTopic().length());
  5. return PutMessageStatus.MESSAGE_ILLEGAL;
  6. }
  7. //消息属性长度超过65536 个字符
  8. if (msg.getPropertiesString() != null && msg.getPropertiesString().length() > Short.MAX_VALUE) {
  9. log.warn("putMessage message properties length too long " + msg.getPropertiesString().length());
  10. return PutMessageStatus.MESSAGE_ILLEGAL;
  11. }
  12. return PutMessageStatus.PUT_OK;
  13. }

CommitLog
写入消息到commitLog

  1. public CompletableFuture<PutMessageResult> asyncPutMessage(final MessageExtBrokerInner msg) {
  2. // Set the storage time
  3. //记录存储时间
  4. msg.setStoreTimestamp(System.currentTimeMillis());
  5. // Set the message body BODY CRC (consider the most appropriate setting
  6. // on the client)
  7. msg.setBodyCRC(UtilAll.crc32(msg.getBody()));
  8. // Back to Results
  9. AppendMessageResult result = null;
  10. StoreStatsService storeStatsService = this.defaultMessageStore.getStoreStatsService();
  11. String topic = msg.getTopic();
  12. int queueId = msg.getQueueId();
  13. final int tranType = MessageSysFlag.getTransactionValue(msg.getSysFlag());
  14. if (tranType == MessageSysFlag.TRANSACTION_NOT_TYPE
  15. || tranType == MessageSysFlag.TRANSACTION_COMMIT_TYPE) {
  16. // Delay Delivery 若消息的延迟级别大于0
  17. if (msg.getDelayTimeLevel() > 0) {
  18. if (msg.getDelayTimeLevel() > this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel()) {
  19. msg.setDelayTimeLevel(this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel());
  20. }
  21. topic = TopicValidator.RMQ_SYS_SCHEDULE_TOPIC;
  22. queueId = ScheduleMessageService.delayLevel2QueueId(msg.getDelayTimeLevel());
  23. // Backup real topic, queueId
  24. MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_TOPIC, msg.getTopic());
  25. MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_QUEUE_ID, String.valueOf(msg.getQueueId()));
  26. msg.setPropertiesString(MessageDecoder.messageProperties2String(msg.getProperties()));
  27. msg.setTopic(topic);
  28. msg.setQueueId(queueId);
  29. }
  30. }
  31. long elapsedTimeInLock = 0;
  32. MappedFile unlockMappedFile = null;
  33. //获取上次写入的文件
  34. MappedFile mappedFile = this.mappedFileQueue.getLastMappedFile();
  35. putMessageLock.lock(); //spin or ReentrantLock ,depending on store config
  36. try {
  37. long beginLockTimestamp = this.defaultMessageStore.getSystemClock().now();
  38. this.beginTimeInLock = beginLockTimestamp;
  39. // Here settings are stored timestamp, in order to ensure an orderly
  40. // global 设置存储时间,保证全局有序
  41. msg.setStoreTimestamp(beginLockTimestamp);
  42. //若没有获取到最后写入的文件,以及文件写满
  43. if (null == mappedFile || mappedFile.isFull()) {
  44. //创建新的文件,偏移地址为0
  45. mappedFile = this.mappedFileQueue.getLastMappedFile(0); // Mark: NewFile may be cause noise
  46. }
  47. //若新建失败,则异常.可能磁盘不足或空间不够
  48. if (null == mappedFile) {
  49. log.error("create mapped file1 error, topic: " + msg.getTopic() + " clientAddr: " + msg.getBornHostString());
  50. beginTimeInLock = 0;
  51. return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.CREATE_MAPEDFILE_FAILED, null));
  52. }
  53. //将消息追加到MappedFile中
  54. result = mappedFile.appendMessage(msg, this.appendMessageCallback);
  55. switch (result.getStatus()) {
  56. case PUT_OK:
  57. break;
  58. case END_OF_FILE:
  59. unlockMappedFile = mappedFile;
  60. // Create a new file, re-write the message
  61. mappedFile = this.mappedFileQueue.getLastMappedFile(0);
  62. if (null == mappedFile) {
  63. // XXX: warn and notify me
  64. log.error("create mapped file2 error, topic: " + msg.getTopic() + " clientAddr: " + msg.getBornHostString());
  65. beginTimeInLock = 0;
  66. return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.CREATE_MAPEDFILE_FAILED, result));
  67. }
  68. result = mappedFile.appendMessage(msg, this.appendMessageCallback);
  69. break;
  70. case MESSAGE_SIZE_EXCEEDED:
  71. case PROPERTIES_SIZE_EXCEEDED:
  72. beginTimeInLock = 0;
  73. return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, result));
  74. case UNKNOWN_ERROR:
  75. beginTimeInLock = 0;
  76. return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, result));
  77. default:
  78. beginTimeInLock = 0;
  79. return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, result));
  80. }
  81. elapsedTimeInLock = this.defaultMessageStore.getSystemClock().now() - beginLockTimestamp;
  82. beginTimeInLock = 0;
  83. } finally {
  84. putMessageLock.unlock();
  85. }
  86. if (elapsedTimeInLock > 500) {
  87. log.warn("[NOTIFYME]putMessage in lock cost time(ms)={}, bodyLength={} AppendMessageResult={}", elapsedTimeInLock, msg.getBody().length, result);
  88. }
  89. if (null != unlockMappedFile && this.defaultMessageStore.getMessageStoreConfig().isWarmMapedFileEnable()) {
  90. this.defaultMessageStore.unlockMappedFile(unlockMappedFile);
  91. }
  92. PutMessageResult putMessageResult = new PutMessageResult(PutMessageStatus.PUT_OK, result);
  93. // Statistics
  94. storeStatsService.getSinglePutMessageTopicTimesTotal(msg.getTopic()).incrementAndGet();
  95. storeStatsService.getSinglePutMessageTopicSizeTotal(topic).addAndGet(result.getWroteBytes());
  96. CompletableFuture<PutMessageStatus> flushResultFuture = submitFlushRequest(result, putMessageResult, msg);
  97. CompletableFuture<PutMessageStatus> replicaResultFuture = submitReplicaRequest(result, putMessageResult, msg);
  98. return flushResultFuture.thenCombine(replicaResultFuture, (flushStatus, replicaStatus) -> {
  99. if (flushStatus != PutMessageStatus.PUT_OK) {
  100. putMessageResult.setPutMessageStatus(PutMessageStatus.FLUSH_DISK_TIMEOUT);
  101. }
  102. if (replicaStatus != PutMessageStatus.PUT_OK) {
  103. putMessageResult.setPutMessageStatus(replicaStatus);
  104. }
  105. return putMessageResult;
  106. });
  107. }

MappedFileQueue
获取上个文件

  1. public MappedFile getLastMappedFile() {
  2. MappedFile mappedFileLast = null;
  3. while (!this.mappedFiles.isEmpty()) {
  4. try {
  5. ///获取尾端文件
  6. mappedFileLast = this.mappedFiles.get(this.mappedFiles.size() - 1);
  7. break;
  8. } catch (IndexOutOfBoundsException e) {
  9. //continue;
  10. } catch (Exception e) {
  11. log.error("getLastMappedFile has exception.", e);
  12. break;
  13. }
  14. }
  15. return mappedFileLast;
  16. }

创建文件

  1. public MappedFile getLastMappedFile(final long startOffset) {
  2. return getLastMappedFile(startOffset, true);
  3. }
  4. public MappedFile getLastMappedFile(final long startOffset, boolean needCreate) {
  5. long createOffset = -1;
  6. MappedFile mappedFileLast = getLastMappedFile();
  7. //创建的偏移必须是mappedFileSize设置的倍数
  8. if (mappedFileLast == null) {
  9. createOffset = startOffset - (startOffset % this.mappedFileSize);
  10. }
  11. if (mappedFileLast != null && mappedFileLast.isFull()) {
  12. createOffset = mappedFileLast.getFileFromOffset() + this.mappedFileSize;
  13. }
  14. if (createOffset != -1 && needCreate) {
  15. //利用偏移为文件名
  16. String nextFilePath = this.storePath + File.separator + UtilAll.offset2FileName(createOffset);
  17. String nextNextFilePath = this.storePath + File.separator
  18. + UtilAll.offset2FileName(createOffset + this.mappedFileSize);
  19. MappedFile mappedFile = null;
  20. if (this.allocateMappedFileService != null) {
  21. //分别异步创建2个文件
  22. mappedFile = this.allocateMappedFileService.putRequestAndReturnMappedFile(nextFilePath,
  23. nextNextFilePath, this.mappedFileSize);
  24. } else {
  25. try {
  26. mappedFile = new MappedFile(nextFilePath, this.mappedFileSize);
  27. } catch (IOException e) {
  28. log.error("create mappedFile exception", e);
  29. }
  30. }
  31. if (mappedFile != null) {
  32. //若mappedFiles队列为空
  33. if (this.mappedFiles.isEmpty()) {
  34. //设置是MappedFileQueue 队列中第一个文件
  35. mappedFile.setFirstCreateInQueue(true);
  36. }
  37. //添加mapppedFiles集合
  38. this.mappedFiles.add(mappedFile);
  39. }
  40. return mappedFile;
  41. }
  42. return mappedFileLast;
  43. }

追加消息
MappedFile

  1. public AppendMessageResult appendMessage(final MessageExtBrokerInner msg, final AppendMessageCallback cb) {
  2. return appendMessagesInner(msg, cb);
  3. }

writeBuffer.slice() : this.mappedByteBuffer.slice();大家注意这句代码
这句代码的意思是
writeBuffer:堆内存ByteBuffer,如果不为空,数据首先将存储在该Buffer 中,然后提交到MappedFile 对应的内存映射文件Buffer .
注意:transientStorePoolEnable为true 时不为空。

mappedByteBuffer:物理文件对应的内存映射Buffer

  1. //将消息追加到MappedFile 中
  2. public AppendMessageResult appendMessagesInner(final MessageExt messageExt, final AppendMessageCallback cb) {
  3. assert messageExt != null;
  4. assert cb != null;
  5. //获取文件当前写位置
  6. int currentPos = this.wrotePosition.get();
  7. //若写的位置小于文件大小
  8. if (currentPos < this.fileSize) {
  9. //创建一个与MappedFile 的共享内存区
  10. ByteBuffer byteBuffer = writeBuffer != null ? writeBuffer.slice() : this.mappedByteBuffer.slice();
  11. //设置缓冲区写入位置
  12. byteBuffer.position(currentPos);
  13. AppendMessageResult result;
  14. if (messageExt instanceof MessageExtBrokerInner) {
  15. //序列化msg
  16. result = cb.doAppend(this.getFileFromOffset(), byteBuffer, this.fileSize - currentPos, (MessageExtBrokerInner) messageExt);
  17. } else if (messageExt instanceof MessageExtBatch) {
  18. result = cb.doAppend(this.getFileFromOffset(), byteBuffer, this.fileSize - currentPos, (MessageExtBatch) messageExt);
  19. } else {
  20. return new AppendMessageResult(AppendMessageStatus.UNKNOWN_ERROR);
  21. }
  22. //记录当前文件写的指针
  23. this.wrotePosition.addAndGet(result.getWroteBytes());
  24. //记录写的时间戳
  25. this.storeTimestamp = result.getStoreTimestamp();
  26. return result;
  27. }
  28. //说明写满,抛出异常
  29. log.error("MappedFile.appendMessage return null, wrotePosition: {} fileSize: {}", currentPos, this.fileSize);
  30. return new AppendMessageResult(AppendMessageStatus.UNKNOWN_ERROR);
  31. }

CommitLog.DefaultAppendMessageCallback

  1. public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer byteBuffer, final int maxBlank,
  2. final MessageExtBrokerInner msgInner) {
  3. // STORETIMESTAMP + STOREHOSTADDRESS + OFFSET <br>
  4. // PHY OFFSET 算出物理偏移量,也就是在内存中的逻辑偏移量
  5. long wroteOffset = fileFromOffset + byteBuffer.position();
  6. int sysflag = msgInner.getSysFlag();
  7. int bornHostLength = (sysflag & MessageSysFlag.BORNHOST_V6_FLAG) == 0 ? 4 + 4 : 16 + 4;
  8. int storeHostLength = (sysflag & MessageSysFlag.STOREHOSTADDRESS_V6_FLAG) == 0 ? 4 + 4 : 16 + 4;
  9. ByteBuffer bornHostHolder = ByteBuffer.allocate(bornHostLength);
  10. ByteBuffer storeHostHolder = ByteBuffer.allocate(storeHostLength);
  11. //重置可读的位置
  12. this.resetByteBuffer(storeHostHolder, storeHostLength);
  13. String msgId;
  14. //创建全局唯一消息ID 4字节ip和4字节端口号,8字节消息偏移量
  15. if ((sysflag & MessageSysFlag.STOREHOSTADDRESS_V6_FLAG) == 0) {
  16. msgId = MessageDecoder.createMessageId(this.msgIdMemory, msgInner.getStoreHostBytes(storeHostHolder), wroteOffset);
  17. } else {
  18. msgId = MessageDecoder.createMessageId(this.msgIdV6Memory, msgInner.getStoreHostBytes(storeHostHolder), wroteOffset);
  19. }
  20. // Record ConsumeQueue information
  21. //记录ConsumeQueue信息
  22. keyBuilder.setLength(0);
  23. keyBuilder.append(msgInner.getTopic());
  24. keyBuilder.append('-');
  25. keyBuilder.append(msgInner.getQueueId());
  26. // topicName-queueId组成
  27. String key = keyBuilder.toString();
  28. //通过key获得偏移
  29. Long queueOffset = CommitLog.this.topicQueueTable.get(key);
  30. if (null == queueOffset) {
  31. queueOffset = 0L;
  32. //当前所有消息队列的当前待写入偏移量。
  33. CommitLog.this.topicQueueTable.put(key, queueOffset);
  34. }
  35. // Transaction messages that require special handling
  36. final int tranType = MessageSysFlag.getTransactionValue(msgInner.getSysFlag());
  37. switch (tranType) {
  38. // Prepared and Rollback message is not consumed, will not enter the
  39. // consumer queuec
  40. case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
  41. case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
  42. queueOffset = 0L;
  43. break;
  44. case MessageSysFlag.TRANSACTION_NOT_TYPE:
  45. case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
  46. default:
  47. break;
  48. }
  49. /**
  50. * Serialize message 序列化消息
  51. */
  52. final byte[] propertiesData =
  53. msgInner.getPropertiesString() == null ? null : msgInner.getPropertiesString().getBytes(MessageDecoder.CHARSET_UTF8);
  54. final int propertiesLength = propertiesData == null ? 0 : propertiesData.length;
  55. if (propertiesLength > Short.MAX_VALUE) {
  56. log.warn("putMessage message properties length too long. length={}", propertiesData.length);
  57. return new AppendMessageResult(AppendMessageStatus.PROPERTIES_SIZE_EXCEEDED);
  58. }
  59. final byte[] topicData = msgInner.getTopic().getBytes(MessageDecoder.CHARSET_UTF8);
  60. final int topicLength = topicData.length;
  61. final int bodyLength = msgInner.getBody() == null ? 0 : msgInner.getBody().length;
  62. //根据消息体的长度、主题的长度、属性的长度结合消息存储格式计算消息的总长度。
  63. final int msgLen = calMsgLength(msgInner.getSysFlag(), bodyLength, topicLength, propertiesLength);
  64. // Exceeds the maximum message 超过最大的单个消息大小,则异常
  65. if (msgLen > this.maxMessageSize) {
  66. CommitLog.log.warn("message size exceeded, msg total size: " + msgLen + ", msg body size: " + bodyLength
  67. + ", maxMessageSize: " + this.maxMessageSize);
  68. return new AppendMessageResult(AppendMessageStatus.MESSAGE_SIZE_EXCEEDED);
  69. }
  70. // Determines whether there is sufficient free space
  71. /*
  72. 如果消息长度+END_FILE_ MIN_ BLANK_ LENGTH 大于CommitLog 文件
  73. 的空闲空间Broker 会重新创建一个新的CommitLog文件来存储该消息。
  74. */
  75. if ((msgLen + END_FILE_MIN_BLANK_LENGTH) > maxBlank) {
  76. this.resetByteBuffer(this.msgStoreItemMemory, maxBlank);
  77. //每个CommitLog 文件最少会空闲8 个字节,
  78. // 高4 字节存储当前文件剩余空间,
  79. // 低4 字节存储魔数: CommitLog.BLANK MAGICCODE 。
  80. // 1 TOTALSIZE
  81. this.msgStoreItemMemory.putInt(maxBlank);
  82. // 2 MAGICCODE
  83. this.msgStoreItemMemory.putInt(CommitLog.BLANK_MAGIC_CODE);
  84. // 3 The remaining space may be any value
  85. // Here the length of the specially set maxBlank
  86. final long beginTimeMills = CommitLog.this.defaultMessageStore.now();
  87. byteBuffer.put(this.msgStoreItemMemory.array(), 0, maxBlank);
  88. //返回AppendMessageStatus.END_OF_FILE
  89. return new AppendMessageResult(AppendMessageStatus.END_OF_FILE, wroteOffset, maxBlank, msgId, msgInner.getStoreTimestamp(),
  90. queueOffset, CommitLog.this.defaultMessageStore.now() - beginTimeMills);
  91. }
  92. // Initialization of storage space
  93. this.resetByteBuffer(msgStoreItemMemory, msgLen);
  94. // 1 TOTALSIZE
  95. this.msgStoreItemMemory.putInt(msgLen);
  96. // 2 MAGICCODE
  97. this.msgStoreItemMemory.putInt(CommitLog.MESSAGE_MAGIC_CODE);
  98. // 3 BODYCRC
  99. this.msgStoreItemMemory.putInt(msgInner.getBodyCRC());
  100. // 4 QUEUEID
  101. this.msgStoreItemMemory.putInt(msgInner.getQueueId());
  102. // 5 FLAG
  103. this.msgStoreItemMemory.putInt(msgInner.getFlag());
  104. // 6 QUEUEOFFSET
  105. this.msgStoreItemMemory.putLong(queueOffset);
  106. // 7 PHYSICALOFFSET
  107. this.msgStoreItemMemory.putLong(fileFromOffset + byteBuffer.position());
  108. // 8 SYSFLAG
  109. this.msgStoreItemMemory.putInt(msgInner.getSysFlag());
  110. // 9 BORNTIMESTAMP
  111. this.msgStoreItemMemory.putLong(msgInner.getBornTimestamp());
  112. // 10 BORNHOST
  113. this.resetByteBuffer(bornHostHolder, bornHostLength);
  114. this.msgStoreItemMemory.put(msgInner.getBornHostBytes(bornHostHolder));
  115. // 11 STORETIMESTAMP
  116. this.msgStoreItemMemory.putLong(msgInner.getStoreTimestamp());
  117. // 12 STOREHOSTADDRESS
  118. this.resetByteBuffer(storeHostHolder, storeHostLength);
  119. this.msgStoreItemMemory.put(msgInner.getStoreHostBytes(storeHostHolder));
  120. // 13 RECONSUMETIMES
  121. this.msgStoreItemMemory.putInt(msgInner.getReconsumeTimes());
  122. // 14 Prepared Transaction Offset
  123. this.msgStoreItemMemory.putLong(msgInner.getPreparedTransactionOffset());
  124. // 15 BODY
  125. this.msgStoreItemMemory.putInt(bodyLength);
  126. if (bodyLength > 0)
  127. this.msgStoreItemMemory.put(msgInner.getBody());
  128. // 16 TOPIC
  129. this.msgStoreItemMemory.put((byte) topicLength);
  130. this.msgStoreItemMemory.put(topicData);
  131. // 17 PROPERTIES
  132. this.msgStoreItemMemory.putShort((short) propertiesLength);
  133. if (propertiesLength > 0)
  134. this.msgStoreItemMemory.put(propertiesData);
  135. final long beginTimeMills = CommitLog.this.defaultMessageStore.now();
  136. // Write messages to the queue buffer
  137. //将消息写入buffer
  138. byteBuffer.put(this.msgStoreItemMemory.array(), 0, msgLen);
  139. AppendMessageResult result = new AppendMessageResult(AppendMessageStatus.PUT_OK, wroteOffset, msgLen, msgId,
  140. msgInner.getStoreTimestamp(), queueOffset, CommitLog.this.defaultMessageStore.now() - beginTimeMills);
  141. switch (tranType) {
  142. case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
  143. case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
  144. break;
  145. case MessageSysFlag.TRANSACTION_NOT_TYPE:
  146. case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
  147. // The next update ConsumeQueue information
  148. //更新消息队列逻辑偏移量。
  149. CommitLog.this.topicQueueTable.put(key, ++queueOffset);
  150. break;
  151. default:
  152. break;
  153. }
  154. return result;
  155. }

计算消息长度

  1. protected static int calMsgLength(int sysFlag, int bodyLength, int topicLength, int propertiesLength) {
  2. int bornhostLength = (sysFlag & MessageSysFlag.BORNHOST_V6_FLAG) == 0 ? 8 : 20;
  3. int storehostAddressLength = (sysFlag & MessageSysFlag.STOREHOSTADDRESS_V6_FLAG) == 0 ? 8 : 20;
  4. final int msgLen = 4 //TOTALSIZE 该消息条目总长度
  5. + 4 //MAGICCODE 魔数固定值Ox daa320a7
  6. + 4 //BODYCRC 消息体e r e 校验码,
  7. + 4 //QUEUEID 消息消费队列ID
  8. + 4 //FLAG 消息FLA G , Rock e tMQ 不做处理, 供应用程序使用
  9. + 8 //QUEUEOFFSET 消息在消息消费队列的偏移量
  10. + 8 //PHYSICALOFFSET 消息在CommitLog 文件中的偏移量
  11. + 4 //SYSFLAG 消息系统Flag ,例如是否压缩、是否是事务消息等
  12. + 8 //BORNTIMESTAMP 消息生产者调用消息发送API 的时间戳
  13. + bornhostLength //BORNHOST 消息发送者IP 、端口号
  14. + 8 //STORETIMESTAMP 消息存储时间戳,
  15. + storehostAddressLength //STOREHOSTADDRESS Broker 服务器IP+ 端口号
  16. + 4 //RECONSUMETIMES 消息重试次数,
  17. + 8 //Prepared Transaction Offset 事务消息物理偏移量
  18. + 4 + (bodyLength > 0 ? bodyLength : 0) //BODY 消息体长度,
  19. + 1 + topicLength //TOPIC
  20. + 2 + (propertiesLength > 0 ? propertiesLength : 0) //propertiesLength 消息属性长度
  21. + 0;
  22. return msgLen;
  23. }

同步存储消息

  1. /**
  2. * 同步存储消息
  3. */
  4. @Override
  5. public PutMessageResult putMessage(MessageExtBrokerInner msg) {
  6. //校验是否可存
  7. PutMessageStatus checkStoreStatus = this.checkStoreStatus();
  8. if (checkStoreStatus != PutMessageStatus.PUT_OK) {
  9. return new PutMessageResult(checkStoreStatus, null);
  10. }
  11. //校验消息属性和主题长度
  12. PutMessageStatus msgCheckStatus = this.checkMessage(msg);
  13. if (msgCheckStatus == PutMessageStatus.MESSAGE_ILLEGAL) {
  14. return new PutMessageResult(msgCheckStatus, null);
  15. }
  16. long beginTime = this.getSystemClock().now();
  17. //存储消息
  18. PutMessageResult result = this.commitLog.putMessage(msg);
  19. long elapsedTime = this.getSystemClock().now() - beginTime;
  20. if (elapsedTime > 500) {
  21. log.warn("not in lock elapsed time(ms)={}, bodyLength={}", elapsedTime, msg.getBody().length);
  22. }
  23. this.storeStatsService.setPutMessageEntireTimeMax(elapsedTime);
  24. if (null == result || !result.isOk()) {
  25. this.storeStatsService.getPutMessageFailedTimes().incrementAndGet();
  26. }
  27. return result;
  28. }

存储消息

  1. public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
  2. // Set the storage time
  3. msg.setStoreTimestamp(System.currentTimeMillis());
  4. // Set the message body BODY CRC (consider the most appropriate setting
  5. // on the client)
  6. msg.setBodyCRC(UtilAll.crc32(msg.getBody()));
  7. // Back to Results
  8. AppendMessageResult result = null;
  9. StoreStatsService storeStatsService = this.defaultMessageStore.getStoreStatsService();
  10. String topic = msg.getTopic();
  11. int queueId = msg.getQueueId();
  12. final int tranType = MessageSysFlag.getTransactionValue(msg.getSysFlag());
  13. if (tranType == MessageSysFlag.TRANSACTION_NOT_TYPE
  14. || tranType == MessageSysFlag.TRANSACTION_COMMIT_TYPE) {
  15. // Delay Delivery
  16. //若消息的延迟级别大于0
  17. if (msg.getDelayTimeLevel() > 0) {
  18. if (msg.getDelayTimeLevel() > this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel()) {
  19. msg.setDelayTimeLevel(this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel());
  20. }
  21. topic = TopicValidator.RMQ_SYS_SCHEDULE_TOPIC;
  22. queueId = ScheduleMessageService.delayLevel2QueueId(msg.getDelayTimeLevel());
  23. // Backup real topic, queueId
  24. MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_TOPIC, msg.getTopic());
  25. MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_QUEUE_ID, String.valueOf(msg.getQueueId()));
  26. msg.setPropertiesString(MessageDecoder.messageProperties2String(msg.getProperties()));
  27. //用延迟消息主题SCHEDULE TOPIC 、消息队列ID 更新原先消息的主题与队列
  28. //用于消息重试机制与定时消息处理
  29. msg.setTopic(topic);
  30. msg.setQueueId(queueId);
  31. }
  32. }
  33. InetSocketAddress bornSocketAddress = (InetSocketAddress) msg.getBornHost();
  34. if (bornSocketAddress.getAddress() instanceof Inet6Address) {
  35. msg.setBornHostV6Flag();
  36. }
  37. InetSocketAddress storeSocketAddress = (InetSocketAddress) msg.getStoreHost();
  38. if (storeSocketAddress.getAddress() instanceof Inet6Address) {
  39. msg.setStoreHostAddressV6Flag();
  40. }
  41. long elapsedTimeInLock = 0;
  42. MappedFile unlockMappedFile = null;
  43. //获取当前可以写入的Commitlog 文件
  44. //文件名存偏移地址,表示该文件中的第一条消息的物理偏移量
  45. MappedFile mappedFile = this.
  46. //看作是${ ROCKET_HOME }/store/commitlog 文件夹
  47. mappedFileQueue.
  48. //对应该文件夹下一个个的文件。
  49. getLastMappedFile();
  50. //加锁,也就是说消息存commitLog是串行的,这样也是为了保证有序,和安全.
  51. //依赖storeConfig
  52. putMessageLock.lock(); //spin or ReentrantLock ,depending on store config
  53. try {
  54. long beginLockTimestamp = this.defaultMessageStore.getSystemClock().now();
  55. this.beginTimeInLock = beginLockTimestamp;
  56. // Here settings are stored timestamp, in order to ensure an orderly
  57. // global
  58. //设置消息存储时间,以保证全局有序
  59. msg.setStoreTimestamp(beginLockTimestamp);
  60. //若文件为空,或者写满.则新建一个commit文件,偏移地址为0
  61. if (null == mappedFile || mappedFile.isFull()) {
  62. mappedFile = this.mappedFileQueue.getLastMappedFile(0); // Mark: NewFile may be cause noise
  63. }
  64. //若新建失败,则异常.可能磁盘不足或空间不够
  65. if (null == mappedFile) {
  66. log.error("create mapped file1 error, topic: " + msg.getTopic() + " clientAddr: " + msg.getBornHostString());
  67. beginTimeInLock = 0;
  68. return new PutMessageResult(PutMessageStatus.CREATE_MAPEDFILE_FAILED, null);
  69. }
  70. result = mappedFile.appendMessage(msg, this.appendMessageCallback);
  71. switch (result.getStatus()) {
  72. case PUT_OK:
  73. break;
  74. case END_OF_FILE:
  75. unlockMappedFile = mappedFile;
  76. // Create a new file, re-write the message
  77. mappedFile = this.mappedFileQueue.getLastMappedFile(0);
  78. if (null == mappedFile) {
  79. // XXX: warn and notify me
  80. log.error("create mapped file2 error, topic: " + msg.getTopic() + " clientAddr: " + msg.getBornHostString());
  81. beginTimeInLock = 0;
  82. return new PutMessageResult(PutMessageStatus.CREATE_MAPEDFILE_FAILED, result);
  83. }
  84. result = mappedFile.appendMessage(msg, this.appendMessageCallback);
  85. break;
  86. case MESSAGE_SIZE_EXCEEDED:
  87. case PROPERTIES_SIZE_EXCEEDED:
  88. beginTimeInLock = 0;
  89. return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, result);
  90. case UNKNOWN_ERROR:
  91. beginTimeInLock = 0;
  92. return new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, result);
  93. default:
  94. beginTimeInLock = 0;
  95. return new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, result);
  96. }
  97. elapsedTimeInLock = this.defaultMessageStore.getSystemClock().now() - beginLockTimestamp;
  98. beginTimeInLock = 0;
  99. } finally {
  100. putMessageLock.unlock();
  101. }
  102. if (elapsedTimeInLock > 500) {
  103. log.warn("[NOTIFYME]putMessage in lock cost time(ms)={}, bodyLength={} AppendMessageResult={}", elapsedTimeInLock, msg.getBody().length, result);
  104. }
  105. if (null != unlockMappedFile && this.defaultMessageStore.getMessageStoreConfig().isWarmMapedFileEnable()) {
  106. this.defaultMessageStore.unlockMappedFile(unlockMappedFile);
  107. }
  108. PutMessageResult putMessageResult = new PutMessageResult(PutMessageStatus.PUT_OK, result);
  109. // Statistics
  110. storeStatsService.getSinglePutMessageTopicTimesTotal(msg.getTopic()).incrementAndGet();
  111. storeStatsService.getSinglePutMessageTopicSizeTotal(topic).addAndGet(result.getWroteBytes());
  112. //根据是同步刷盘还是异步刷盘方式,将内存中的数据持久化到磁盘
  113. handleDiskFlush(result, putMessageResult, msg);
  114. //执行HA 主从同步复制
  115. handleHA(result, putMessageResult, msg);
  116. return putMessageResult;
  117. }

CommitLog刷盘

可通过在broker 配置文件中配置flushDiskType 来设定刷盘方式,可选值为ASYNC FLUSH (异步刷盘)、S刊C_FLUSH ( 同步刷盘) , 默认为异步刷盘
索引文件的刷盘并不是采取定时刷盘机制,而是每更新一次索引文件就会将上一次的改动刷写到磁盘。
CommitLog

  1. /**
  2. * 刷盘
  3. */
  4. public void handleDiskFlush(AppendMessageResult result, PutMessageResult putMessageResult, MessageExt messageExt) {
  5. // Synchronization flush 同步刷盘
  6. //获取刷盘方式,是否为同步刷盘
  7. if (FlushDiskType.SYNC_FLUSH == this.defaultMessageStore.getMessageStoreConfig().getFlushDiskType()) {
  8. final GroupCommitService service = (GroupCommitService) this.flushCommitLogService;
  9. if (messageExt.isWaitStoreMsgOK()) {
  10. //构建GroupCommitRequest 同步任务
  11. GroupCommitRequest request = new GroupCommitRequest(result.getWroteOffset() + result.getWroteBytes());
  12. //提交到service 。
  13. service.putRequest(request);
  14. CompletableFuture<PutMessageStatus> flushOkFuture = request.future();
  15. PutMessageStatus flushStatus = null;
  16. try {
  17. //等待同步刷盘任务完成,如果超时则返回刷盘错误,
  18. flushStatus = flushOkFuture.get(this.defaultMessageStore.getMessageStoreConfig().getSyncFlushTimeout(),
  19. TimeUnit.MILLISECONDS);
  20. } catch (InterruptedException | ExecutionException | TimeoutException e) {
  21. //flushOK=false;
  22. }
  23. if (flushStatus != PutMessageStatus.PUT_OK) {
  24. log.error("do groupcommit, wait for flush failed, topic: " + messageExt.getTopic() + " tags: " + messageExt.getTags()
  25. + " client address: " + messageExt.getBornHostString());
  26. putMessageResult.setPutMessageStatus(PutMessageStatus.FLUSH_DISK_TIMEOUT);
  27. }
  28. } else {
  29. //该线程处于等待状态则将其唤醒。
  30. service.wakeup();
  31. }
  32. }
  33. // Asynchronous flush 异步刷盘
  34. else {
  35. /*
  36. 如果transientStorePoolEnable 为true , RocketMQ 会单独申请一个与目标物理文件( commitlog)
  37. 同样大小的堆外内存, 该堆外内存将使用内存锁定,确保不会被置换到虚拟内存中去,消
  38. 息首先追加到堆外内存,然后提交到与物理文件的内存映射内存中,再flush 到磁盘。如果
  39. transientStorePoolEnable 为false ,消息直接追加到与物理文件直接映射的内存中,然后刷
  40. 写到磁盘中。
  41. */
  42. if (!this.defaultMessageStore.getMessageStoreConfig().isTransientStorePoolEnable()) {
  43. flushCommitLogService.wakeup();
  44. } else {
  45. commitLogService.wakeup();
  46. }
  47. }
  48. }

同步刷盘

放入刷盘任务请求
CommitLog.GroupCommitService

  1. public synchronized void putRequest(final GroupCommitRequest request) {
  2. synchronized (this.requestsWrite) {
  3. //客户端提交同步刷盘任务到GroupCommitService 线程
  4. this.requestsWrite.add(request);
  5. }
  6. //该线程处于等待状态则将其唤醒。
  7. this.wakeup();
  8. }

默认等待5S,等待GroupCommitService线程调用
GroupCommitService

  1. public void run() {
  2. CommitLog.log.info(this.getServiceName() + " service started");
  3. while (!this.isStopped()) {
  4. try {
  5. //休息10ms
  6. this.waitForRunning(10);
  7. //处理一批同步刷盘请求
  8. this.doCommit();
  9. } catch (Exception e) {
  10. CommitLog.log.warn(this.getServiceName() + " service has exception. ", e);
  11. }
  12. }
  13. // Under normal circumstances shutdown, wait for the arrival of the
  14. // request, and then flush
  15. try {
  16. Thread.sleep(10);
  17. } catch (InterruptedException e) {
  18. CommitLog.log.warn("GroupCommitService Exception, ", e);
  19. }
  20. synchronized (this) {
  21. //交换任务,避免锁竞争
  22. this.swapRequests();
  23. }
  24. //处理另外一批同步刷盘请求
  25. this.doCommit();
  26. CommitLog.log.info(this.getServiceName() + " service end");
  27. }

执行刷盘提交任务
GroupCommitService

  1. private void doCommit() {
  2. synchronized (this.requestsRead) {
  3. if (!this.requestsRead.isEmpty()) {
  4. //遍历请求
  5. for (GroupCommitRequest req : this.requestsRead) {
  6. // There may be a message in the next file, so a maximum of
  7. // two times the flush 刷新2次
  8. boolean flushOK = false;
  9. for (int i = 0; i < 2 && !flushOK; i++) {
  10. //当前刷盘指针是否大于下一个刷盘指针
  11. flushOK = CommitLog.this.mappedFileQueue.getFlushedWhere() >= req.getNextOffset();
  12. if (!flushOK) {
  13. //刷盘
  14. CommitLog.this.mappedFileQueue.flush(0);
  15. }
  16. }
  17. //唤醒消息发送线程并通知刷盘结果。
  18. req.wakeupCustomer(flushOK ? PutMessageStatus.PUT_OK : PutMessageStatus.FLUSH_DISK_TIMEOUT);
  19. }
  20. long storeTimestamp = CommitLog.this.mappedFileQueue.getStoreTimestamp();
  21. if (storeTimestamp > 0) {
  22. //更新刷盘检测点StoreCheckpoint 中的PhysicMsgTimestamp
  23. //但并没有执行检测点的刷盘操作,刷盘检测点的刷盘操作将在刷写消息队列文件时触发
  24. CommitLog.this.defaultMessageStore.getStoreCheckpoint().setPhysicMsgTimestamp(storeTimestamp);
  25. }
  26. //清除刷盘请求
  27. this.requestsRead.clear();
  28. } else {
  29. // Because of individual messages is set to not sync flush, it
  30. // will come to this process
  31. //可能出现单个消息为不同步刷新
  32. CommitLog.this.mappedFileQueue.flush(0);
  33. }
  34. }
  35. }

MappedFileQueue

  1. public boolean flush(final int flushLeastPages) {
  2. boolean result = true;
  3. //根据消息偏移量offset 查找MappedFile
  4. MappedFile mappedFile = this.findMappedFileByOffset(this.flushedWhere, this.flushedWhere == 0);
  5. if (mappedFile != null) {
  6. //获取文件最后一次内容写入时间
  7. long tmpTimeStamp = mappedFile.getStoreTimestamp();
  8. //将内存中的数据刷写到磁盘
  9. int offset = mappedFile.flush(flushLeastPages);
  10. //获取刷写磁盘指针
  11. long where = mappedFile.getFileFromOffset() + offset;
  12. result = where == this.flushedWhere;
  13. //记录刷写磁盘指针
  14. this.flushedWhere = where;
  15. if (0 == flushLeastPages) {
  16. //记录文件最后一次内容写入时间
  17. //注意这里记录的是刷入channel或channel映射的byteBuffer的时间,而不是刷盘时间
  18. this.storeTimestamp = tmpTimeStamp;
  19. }
  20. }
  21. return result;
  22. }

将内存中的数据刷写到磁盘
MappedFile

  1. public int flush(final int flushLeastPages) {
  2. if (this.isAbleToFlush(flushLeastPages)) {
  3. if (this.hold()) {
  4. //获取该文件内存映射写指针
  5. //如果开启了堆内存池,则是堆内存写指针
  6. int value = getReadPosition();
  7. try {
  8. //We only append data to fileChannel or mappedByteBuffer, never both.
  9. //说明使用了堆内存,执行到这里堆内存已经写入fileChannel中了,重新刷入磁盘就行了
  10. if (writeBuffer != null || this.fileChannel.position() != 0) {
  11. //文件的所有待定修改立即同步到磁盘,布尔型参数表示在方法返回值前文件的元数据(metadata)是否也要被同步更新到磁盘
  12. this.fileChannel.force(false);
  13. } else {
  14. //说明没用堆内存ByteBuffer,直接使用内存映射刷入即可.
  15. this.mappedByteBuffer.force();
  16. }
  17. } catch (Throwable e) {
  18. log.error("Error occurred when force data to disk.", e);
  19. }
  20. //记录刷入磁盘最新指针
  21. this.flushedPosition.set(value);
  22. this.release();
  23. } else {
  24. log.warn("in flush, hold failed, flush offset = " + this.flushedPosition.get());
  25. this.flushedPosition.set(getReadPosition());
  26. }
  27. }
  28. //返回刷入磁盘最新指针
  29. return this.getFlushedPosition();
  30. }

唤醒消息发送线程并通知刷盘结果。

  1. public void wakeupCustomer(final PutMessageStatus putMessageStatus) {
  2. this.flushOKFuture.complete(putMessageStatus);
  3. }

交换请求
避免同步刷盘消费任务与其他消息生产者提交任务直接的锁竞争

  1. private void swapRequests() {
  2. //这两个容器每执行完一次任务后交换,继续消费任务
  3. List<GroupCommitRequest> tmp = this.requestsWrite;
  4. this.requestsWrite = this.requestsRead;
  5. this.requestsRead = tmp;
  6. }

异步刷盘

如果 transientStorePoolEnable 为false ,消息直接追加到与物理文件直接映射的内存中,然后刷写到磁盘中。
flushCommitLogService机制

CommitLog.FlushRealTimeService

  1. public void run() {
  2. CommitLog.log.info(this.getServiceName() + " service started");
  3. while (!this.isStopped()) {
  4. //默认为false , 表示await 方法等待;如果为true ,表示使用Thread.sleep 方法等待。
  5. boolean flushCommitLogTimed = CommitLog.this.defaultMessageStore.getMessageStoreConfig().isFlushCommitLogTimed();
  6. //FlushRealTimeService 线程任务运行间隔。
  7. int interval = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushIntervalCommitLog();
  8. //一次刷写任务至少包含页数, 如果待刷写数据不足,小于该参数配置的值,将忽略本次刷写任务,默认4 页。
  9. int flushPhysicQueueLeastPages = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushCommitLogLeastPages();
  10. //两次真实刷写任务最大间隔, 默认10s 。
  11. int flushPhysicQueueThoroughInterval =
  12. CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushCommitLogThoroughInterval();
  13. boolean printFlushProgress = false;
  14. // Print flush progress
  15. long currentTimeMillis = System.currentTimeMillis();
  16. /*
  17. 如果距上次提交间隔超过flushPhysicQueueThoroughinterval ,则本次刷盘任务
  18. 将忽略flushPhysicQueuLeastPages , 也就是如果待刷写数据小于指定页数也执行刷写磁盘
  19. 操作。
  20. */
  21. if (currentTimeMillis >= (this.lastFlushTimestamp + flushPhysicQueueThoroughInterval)) {
  22. this.lastFlushTimestamp = currentTimeMillis;
  23. flushPhysicQueueLeastPages = 0;
  24. printFlushProgress = (printTimes++ % 10) == 0;
  25. }
  26. try {
  27. //执行一次刷盘任务前先等待指定时间间隔, 然后再执行刷盘任务。
  28. if (flushCommitLogTimed) {
  29. Thread.sleep(interval);
  30. } else {
  31. this.waitForRunning(interval);
  32. }
  33. if (printFlushProgress) {
  34. this.printFlushProgress();
  35. }
  36. long begin = System.currentTimeMillis();
  37. //将内存中数据刷写到磁盘
  38. CommitLog.this.mappedFileQueue.flush(flushPhysicQueueLeastPages);
  39. long storeTimestamp = CommitLog.this.mappedFileQueue.getStoreTimestamp();
  40. if (storeTimestamp > 0) {
  41. //更新存储检测点文件的comm1tlog 文件的更新时间戳
  42. CommitLog.this.defaultMessageStore.getStoreCheckpoint().setPhysicMsgTimestamp(storeTimestamp);
  43. }
  44. long past = System.currentTimeMillis() - begin;
  45. if (past > 500) {
  46. log.info("Flush data to disk costs {} ms", past);
  47. }
  48. } catch (Throwable e) {
  49. CommitLog.log.warn(this.getServiceName() + " service has exception. ", e);
  50. this.printFlushProgress();
  51. }
  52. }
  53. // Normal shutdown, to ensure that all the flush before exit
  54. boolean result = false;
  55. for (int i = 0; i < RETRY_TIMES_OVER && !result; i++) {
  56. result = CommitLog.this.mappedFileQueue.flush(0);
  57. CommitLog.log.info(this.getServiceName() + " service shutdown, retry " + (i + 1) + " times " + (result ? "OK" : "Not OK"));
  58. }
  59. this.printFlushProgress();
  60. CommitLog.log.info(this.getServiceName() + " service end");
  61. }

将内存中的数据刷写到磁盘
MappedFile

  1. public int flush(final int flushLeastPages) {
  2. if (this.isAbleToFlush(flushLeastPages)) {
  3. if (this.hold()) {
  4. //获取该文件内存映射写指针
  5. //如果开启了堆内存池,则是堆内存写指针
  6. int value = getReadPosition();
  7. try {
  8. //We only append data to fileChannel or mappedByteBuffer, never both.
  9. //说明使用了堆内存,执行到这里堆内存已经写入fileChannel中了,重新刷入磁盘就行了
  10. if (writeBuffer != null || this.fileChannel.position() != 0) {
  11. //文件的所有待定修改立即同步到磁盘,布尔型参数表示在方法返回值前文件的元数据(metadata)是否也要被同步更新到磁盘
  12. this.fileChannel.force(false);
  13. } else {
  14. //说明没用堆内存ByteBuffer,直接使用内存映射刷入即可.
  15. this.mappedByteBuffer.force();
  16. }
  17. } catch (Throwable e) {
  18. log.error("Error occurred when force data to disk.", e);
  19. }
  20. //记录刷入磁盘最新指针
  21. this.flushedPosition.set(value);
  22. this.release();
  23. } else {
  24. log.warn("in flush, hold failed, flush offset = " + this.flushedPosition.get());
  25. this.flushedPosition.set(getReadPosition());
  26. }
  27. }
  28. //返回刷入磁盘最新指针
  29. return this.getFlushedPosition();
  30. }

commitLogService提交
如果transientStorePoolEnable 为true , RocketMQ 会单独申请一个与目标物理文件( commitlog)同样大小的堆外内存, 该堆外内存将使用内存锁定,确保不会被置换到虚拟内存中去,消息首先追加到堆外内存,然后提交到与物理文件的内存映射内存中,再flush到磁盘。
CommitLog.CommitRealTimeService

  1. public void run() {
  2. CommitLog.log.info(this.getServiceName() + " service started");
  3. while (!this.isStopped()) {
  4. //CommitRealTimeService 线程间隔时间,默认200ms
  5. //将ByteBuffer 新追加的内容提交到MappedByteBuffer
  6. int interval = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getCommitIntervalCommitLog();
  7. //一次提交任务至少包含页数, 如果待提交数据不足,小于该参数配置的值,将忽略本次提交任务,默认4 页。
  8. int commitDataLeastPages = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getCommitCommitLogLeastPages();
  9. //两次真实提交最大间隔,默认200ms 。
  10. int commitDataThoroughInterval =
  11. CommitLog.this.defaultMessageStore.getMessageStoreConfig().getCommitCommitLogThoroughInterval();
  12. long begin = System.currentTimeMillis();
  13. //如果距上次提交间隔超过commitDataThoroughlnterval ,
  14. if (begin >= (this.lastCommitTimestamp + commitDataThoroughInterval)) {
  15. //本次提交忽略commitCommitLogLeastPages参数
  16. //也就是如果待提交数据小于指定页数, 也执行提交操作。
  17. this.lastCommitTimestamp = begin;
  18. commitDataLeastPages = 0;
  19. }
  20. try {
  21. //执行提交操作,将待提交数据提交到物理文件的内存映射内存区
  22. boolean result = CommitLog.this.mappedFileQueue.commit(commitDataLeastPages);
  23. long end = System.currentTimeMillis();
  24. //如果返回false ,并不是代表提交失败,而是只提交了一部分数据
  25. if (!result) {
  26. this.lastCommitTimestamp = end; // result = false means some data committed.
  27. //now wake up flush thread.
  28. //唤醒刷盘线程执行刷盘操作
  29. //该线程每完成一次提交动作,将等待200ms 再继续执行下一次提交任务。
  30. flushCommitLogService.wakeup();
  31. }
  32. if (end - begin > 500) {
  33. log.info("Commit data to file costs {} ms", end - begin);
  34. }
  35. //等待200ms
  36. this.waitForRunning(interval);
  37. } catch (Throwable e) {
  38. CommitLog.log.error(this.getServiceName() + " service has exception. ", e);
  39. }
  40. }
  41. boolean result = false;
  42. for (int i = 0; i < RETRY_TIMES_OVER && !result; i++) {
  43. result = CommitLog.this.mappedFileQueue.commit(0);
  44. CommitLog.log.info(this.getServiceName() + " service shutdown, retry " + (i + 1) + " times " + (result ? "OK" : "Not OK"));
  45. }
  46. CommitLog.log.info(this.getServiceName() + " service end");
  47. }
  48. }

MappedFileQueue

  1. public boolean commit(final int commitLeastPages) {
  2. boolean result = true;
  3. //根据消息偏移量offset 查找MappedFile
  4. MappedFile mappedFile = this.findMappedFileByOffset(this.committedWhere, this.committedWhere == 0);
  5. if (mappedFile != null) {
  6. //执行提交操作
  7. int offset = mappedFile.commit(commitLeastPages);
  8. long where = mappedFile.getFileFromOffset() + offset;
  9. result = where == this.committedWhere;
  10. //记录提交指针
  11. this.committedWhere = where;
  12. }
  13. return result;
  14. }

根据消息偏移量offset 查找MappedFile
MappedFileQueue

  1. public MappedFile findMappedFileByOffset(final long offset, final boolean returnFirstOnNotFound) {
  2. try {
  3. MappedFile firstMappedFile = this.getFirstMappedFile();
  4. MappedFile lastMappedFile = this.getLastMappedFile();
  5. if (firstMappedFile != null && lastMappedFile != null) {
  6. //偏移不在起始和结束文件中,说明越界.
  7. if (offset < firstMappedFile.getFileFromOffset() || offset >= lastMappedFile.getFileFromOffset() + this.mappedFileSize) {
  8. LOG_ERROR.warn("Offset not matched. Request offset: {}, firstOffset: {}, lastOffset: {}, mappedFileSize: {}, mappedFiles count: {}",
  9. offset,
  10. firstMappedFile.getFileFromOffset(),
  11. lastMappedFile.getFileFromOffset() + this.mappedFileSize,
  12. this.mappedFileSize,
  13. this.mappedFiles.size());
  14. } else {
  15. //因为RocketMQ定时删除存储文件
  16. //所以第一个文件偏移开始并不一定是000000.
  17. //同理可得offset / this.mappedFileSize并不能定位到具体文件
  18. //所以还需要减去第一个文件的偏移/文件大小,算出磁盘中起始第几个文件
  19. int index = (int) ((offset / this.mappedFileSize) - (firstMappedFile.getFileFromOffset() / this.mappedFileSize));
  20. MappedFile targetFile = null;
  21. try {
  22. //获取映射文件
  23. targetFile = this.mappedFiles.get(index);
  24. } catch (Exception ignored) {
  25. }
  26. //再次检测是否在文件范围内
  27. if (targetFile != null && offset >= targetFile.getFileFromOffset()
  28. && offset < targetFile.getFileFromOffset() + this.mappedFileSize) {
  29. return targetFile;
  30. }
  31. //遍历所有文件查找
  32. for (MappedFile tmpMappedFile : this.mappedFiles) {
  33. if (offset >= tmpMappedFile.getFileFromOffset()
  34. && offset < tmpMappedFile.getFileFromOffset() + this.mappedFileSize) {
  35. return tmpMappedFile;
  36. }
  37. }
  38. }
  39. //如果配置了没找到返回第一个,就返回第一个文件
  40. if (returnFirstOnNotFound) {
  41. return firstMappedFile;
  42. }
  43. }
  44. } catch (Exception e) {
  45. log.error("findMappedFileByOffset Exception", e);
  46. }
  47. return null;
  48. }

执行提交操作
MappedFile

  1. public int commit(final int commitLeastPages) {
  2. /*
  3. writeBuffer如果为空,直接返回wrotePosition 指针,无须执行commit 操作,
  4. 表明commit 操作的实际是writeBuffer堆外内存
  5. */
  6. if (writeBuffer == null) {
  7. //no need to commit data to file channel, so just regard wrotePosition as committedPosition.
  8. return this.wrotePosition.get();
  9. }
  10. //判断是否执行commit 操作,主要判断页是否满足
  11. if (this.isAbleToCommit(commitLeastPages)) {
  12. //添加引用
  13. if (this.hold()) {
  14. //具体的提交实现
  15. commit0(commitLeastPages);
  16. //释放引用
  17. this.release();
  18. } else {
  19. log.warn("in commit, hold failed, commit offset = " + this.committedPosition.get());
  20. }
  21. }
  22. // All dirty data has been committed to FileChannel.
  23. ///所有脏数据已经写入channel,且该文件已经提交满了.
  24. if (writeBuffer != null && this.transientStorePool != null && this.fileSize == this.committedPosition.get()) {
  25. //归还堆外内存给堆内存池
  26. this.transientStorePool.returnBuffer(writeBuffer);
  27. //释放GC
  28. this.writeBuffer = null;
  29. }
  30. //返回最新提交位置
  31. return this.committedPosition.get();
  32. }

实时更新消息消费队列与索引文件

消息消费队列文件、消息属性索引文件都是基于CommitLog 文件构建的, 当消息生产者提交的消息存储在Commitlog 文件中,ConsumeQueue 、IndexFile 需要及时更新,否则消息无法及时被消费,根据消息属性查找消息也会出现较大延迟。

Broker 服务器在启动时会启动ReputMessageService 线程,准实时转发CommitLog 文件更新事件, 相应的任务处理器根据
转发的消息及时更新ConsumeQueue 、IndexFile 文件。
DefaultMessageStore

  1. public void start() throws Exception {
  2. //获取${ROCKETMQ_HOME}\store\lock的锁
  3. lock = lockFile.getChannel().tryLock(0, 1, false);
  4. //如果文件不存在,且锁不是抢占式锁则锁失败,说明已经开启锁了
  5. if (lock == null || lock.isShared() || !lock.isValid()) {
  6. throw new RuntimeException("Lock failed,MQ already started");
  7. }
  8. //文件写入Lock,并刷入磁盘
  9. lockFile.getChannel().write(ByteBuffer.wrap("lock".getBytes()));
  10. lockFile.getChannel().force(true);
  11. {
  12. /**
  13. * 1. Make sure the fast-forward messages to be truncated during the recovering according to the max physical offset of the commitlog;
  14. * 2. DLedger committedPos may be missing, so the maxPhysicalPosInLogicQueue maybe bigger that maxOffset returned by DLedgerCommitLog, just let it go;
  15. * 3. Calculate the reput offset according to the consume queue;
  16. * 4. Make sure the fall-behind messages to be dispatched before starting the commitlog, especially when the broker role are automatically changed.
  17. */
  18. //获取commitLog最小偏移量
  19. long maxPhysicalPosInLogicQueue = commitLog.getMinOffset();
  20. //获取ConsumeQueue记录的最大偏移
  21. for (ConcurrentMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
  22. //因为ConsumeQueue是在commitLog之后刷入磁盘,所以ConsumeQueue里的数据可能会比commitLog小
  23. for (ConsumeQueue logic : maps.values()) {
  24. if (logic.getMaxPhysicOffset() > maxPhysicalPosInLogicQueue) {
  25. maxPhysicalPosInLogicQueue = logic.getMaxPhysicOffset();
  26. }
  27. }
  28. }
  29. //若还未获取第一个文件则从0开始
  30. if (maxPhysicalPosInLogicQueue < 0) {
  31. maxPhysicalPosInLogicQueue = 0;
  32. }
  33. //在这期间可能磁盘出现问题,将maxPhysicalPosInLogicQueue重置为最小偏移.保证数据安全.
  34. if (maxPhysicalPosInLogicQueue < this.commitLog.getMinOffset()) {
  35. maxPhysicalPosInLogicQueue = this.commitLog.getMinOffset();
  36. /**
  37. * This happens in following conditions:
  38. * 1. If someone removes all the consumequeue files or the disk get damaged. 如果有人删除了consumequeue文件或磁盘损坏。
  39. * 2. Launch a new broker, and copy the commitlog from other brokers. 启动新的broker,并将委托日志复制到其他broker中。
  40. *
  41. * All the conditions has the same in common that the maxPhysicalPosInLogicQueue should be 0.
  42. * If the maxPhysicalPosInLogicQueue is gt 0, there maybe something wrong.
  43. */
  44. log.warn("[TooSmallCqOffset] maxPhysicalPosInLogicQueue={} clMinOffset={}", maxPhysicalPosInLogicQueue, this.commitLog.getMinOffset());
  45. }
  46. log.info("[SetReputOffset] maxPhysicalPosInLogicQueue={} clMinOffset={} clMaxOffset={} clConfirmedOffset={}",
  47. maxPhysicalPosInLogicQueue, this.commitLog.getMinOffset(), this.commitLog.getMaxOffset(), this.commitLog.getConfirmOffset());
  48. //maxPhysicalPosInLogicQueue:ReputMessageService 从哪个物理偏移量开始转发消息给ConsumeQueue和IndexFile 。
  49. this.reputMessageService.setReputFromOffset(maxPhysicalPosInLogicQueue);
  50. //开启线程转发消息给ConsumeQueu巳和IndexFile
  51. this.reputMessageService.start();
  52. /**
  53. * 1. Finish dispatching the messages fall behind, then to start other services.
  54. * 2. DLedger committedPos may be missing, so here just require dispatchBehindBytes <= 0
  55. */
  56. while (true) {
  57. if (dispatchBehindBytes() <= 0) {
  58. break;
  59. }
  60. Thread.sleep(1000);
  61. log.info("Try to finish doing reput the messages fall behind during the starting, reputOffset={} maxOffset={} behind={}", this.reputMessageService.getReputFromOffset(), this.getMaxPhyOffset(), this.dispatchBehindBytes());
  62. }
  63. this.recoverTopicQueueTable();
  64. }
  65. if (!messageStoreConfig.isEnableDLegerCommitLog()) {
  66. this.haService.start();
  67. this.handleScheduleMessageService(messageStoreConfig.getBrokerRole());
  68. }
  69. this.flushConsumeQueueService.start();
  70. this.commitLog.start();
  71. this.storeStatsService.start();
  72. this.createTempFile();
  73. this.addScheduleTask();
  74. this.shutdown = false;
  75. }

lock也就是目录这个文件
在这里插入图片描述

DefaultMessageStore.ReputMessageService

  1. @Override
  2. public void run() {
  3. DefaultMessageStore.log.info(this.getServiceName() + " service started");
  4. //如果线程没关闭
  5. while (!this.isStopped()) {
  6. try {
  7. //每执行一次任务推送休息1毫秒
  8. Thread.sleep(1);
  9. // 继续尝试推送消息到消息消费队列和索引文件,
  10. this.doReput();
  11. } catch (Exception e) {
  12. DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
  13. }
  14. }
  15. DefaultMessageStore.log.info(this.getServiceName() + " service end");
  16. }
  17. private void doReput() {
  18. //说明调度过多,commitLog已经过期
  19. if (this.reputFromOffset < DefaultMessageStore.this.commitLog.getMinOffset()) {
  20. log.warn("The reputFromOffset={} is smaller than minPyOffset={}, this usually indicate that the dispatch behind too much and the commitlog has expired.",
  21. this.reputFromOffset, DefaultMessageStore.this.commitLog.getMinOffset());
  22. this.reputFromOffset = DefaultMessageStore.this.commitLog.getMinOffset();
  23. }
  24. for (boolean doNext = true; this.isCommitLogAvailable() && doNext; ) {
  25. //是否允许转发
  26. if (DefaultMessageStore.this.getMessageStoreConfig().isDuplicationEnable()
  27. //reputFromOffset>CommitLog的提交指针,则结束
  28. && this.reputFromOffset >= DefaultMessageStore.this.getConfirmOffset()) {
  29. break;
  30. }
  31. //返回reputFromOffset 偏移量开始的全部有效数据(commitlog 文件)
  32. SelectMappedBufferResult result = DefaultMessageStore.this.commitLog.getData(reputFromOffset);
  33. if (result != null) {
  34. try {
  35. //获取在文件中的偏移位置.算上文件起始地址
  36. this.reputFromOffset = result.getStartOffset();
  37. for (int readSize = 0; readSize < result.getSize() && doNext; ) {
  38. //从result 返回的ByteBuffer 中循环读取消息,一次读取一条,
  39. //反序列化并创建DispatchRequest对象,主要记录一条消息数据
  40. DispatchRequest dispatchRequest =
  41. DefaultMessageStore.this.commitLog.checkMessageAndReturnSize(result.getByteBuffer(), false, false);
  42. //在这里默认采用的是getMsgSize
  43. int size = dispatchRequest.getBufferSize() == -1 ? dispatchRequest.getMsgSize() : dispatchRequest.getBufferSize();
  44. if (dispatchRequest.isSuccess()) {
  45. if (size > 0) {
  46. //最终将分别调用CommitLogDispatcherBuildConsumeQueue (构建消息消费队列)、
  47. //CommitLogDispatcherBuildlndex (构建索引文件) 。
  48. DefaultMessageStore.this.doDispatch(dispatchRequest);
  49. if (BrokerRole.SLAVE != DefaultMessageStore.this.getMessageStoreConfig().getBrokerRole()
  50. && DefaultMessageStore.this.brokerConfig.isLongPollingEnable()) {
  51. DefaultMessageStore.this.messageArrivingListener.arriving(dispatchRequest.getTopic(),
  52. dispatchRequest.getQueueId(), dispatchRequest.getConsumeQueueOffset() + 1,
  53. dispatchRequest.getTagsCode(), dispatchRequest.getStoreTimestamp(),
  54. dispatchRequest.getBitMap(), dispatchRequest.getPropertiesMap());
  55. }
  56. //更改为下一条消息偏移量
  57. this.reputFromOffset += size;
  58. readSize += size;
  59. if (DefaultMessageStore.this.getMessageStoreConfig().getBrokerRole() == BrokerRole.SLAVE) {
  60. DefaultMessageStore.this.storeStatsService
  61. .getSinglePutMessageTopicTimesTotal(dispatchRequest.getTopic()).incrementAndGet();
  62. DefaultMessageStore.this.storeStatsService
  63. .getSinglePutMessageTopicSizeTotal(dispatchRequest.getTopic())
  64. .addAndGet(dispatchRequest.getMsgSize());
  65. }
  66. } else if (size == 0) {
  67. //返回下一个文件的起始偏移
  68. this.reputFromOffset = DefaultMessageStore.this.commitLog.rollNextFile(this.reputFromOffset);
  69. //跳过循环
  70. readSize = result.getSize();
  71. }
  72. } else if (!dispatchRequest.isSuccess()) {
  73. //没解析到完整的消息
  74. if (size > 0) {
  75. log.error("[BUG]read total count not equals msg total size. reputFromOffset={}", reputFromOffset);
  76. //跳过不完整的消息,并声明这是一个bug
  77. this.reputFromOffset += size;
  78. } else {
  79. //没数据,标记不需要执行下一条.
  80. doNext = false;
  81. // If user open the dledger pattern or the broker is master node,
  82. // it will not ignore the exception and fix the reputFromOffset variable
  83. if (DefaultMessageStore.this.getMessageStoreConfig().isEnableDLegerCommitLog() ||
  84. DefaultMessageStore.this.brokerConfig.getBrokerId() == MixAll.MASTER_ID) {
  85. log.error("[BUG]dispatch message to consume queue error, COMMITLOG OFFSET: {}",
  86. this.reputFromOffset);
  87. this.reputFromOffset += result.getSize() - readSize;
  88. }
  89. }
  90. }
  91. }
  92. } finally {
  93. result.release();
  94. }
  95. } else {
  96. doNext = false;
  97. }
  98. }
  99. }

获取当前Commitlog 目录最小偏移量
CommitLog

  1. public long getMinOffset() {
  2. //获取第一个文件
  3. MappedFile mappedFile = this.mappedFileQueue.getFirstMappedFile();
  4. if (mappedFile != null) {
  5. //第一个文件可用
  6. if (mappedFile.isAvailable()) {
  7. //返回该文件起始偏移量
  8. return mappedFile.getFileFromOffset();
  9. } else {
  10. //返回下个文件的起始偏移量
  11. return this.rollNextFile(mappedFile.getFileFromOffset());
  12. }
  13. }
  14. return -1;
  15. }

返回reputFromOffset 偏移量开始的全部有效数据(commitlog 文件)

  1. public SelectMappedBufferResult getData(final long offset) {
  2. return this.getData(offset, offset == 0);
  3. }
  4. public SelectMappedBufferResult getData(final long offset, final boolean returnFirstOnNotFound) {
  5. //获取单个 CommitLog 文件大小,默认1G,
  6. int mappedFileSize = this.defaultMessageStore.getMessageStoreConfig().getMappedFileSizeCommitLog();
  7. //根据消息偏移量offset 查找MappedFile
  8. MappedFile mappedFile = this.mappedFileQueue.findMappedFileByOffset(offset, returnFirstOnNotFound);
  9. if (mappedFile != null) {
  10. int pos = (int) (offset % mappedFileSize);
  11. //查找pos 到当前最大可读之间的数据
  12. SelectMappedBufferResult result = mappedFile.selectMappedBuffer(pos);
  13. return result;
  14. }
  15. return null;
  16. }

根据消息偏移量offset 查找MappedFile
MappedFileQueue

  1. public MappedFile findMappedFileByOffset(final long offset, final boolean returnFirstOnNotFound) {
  2. try {
  3. MappedFile firstMappedFile = this.getFirstMappedFile();
  4. MappedFile lastMappedFile = this.getLastMappedFile();
  5. if (firstMappedFile != null && lastMappedFile != null) {
  6. //偏移不在起始和结束文件中,说明越界.
  7. if (offset < firstMappedFile.getFileFromOffset() || offset >= lastMappedFile.getFileFromOffset() + this.mappedFileSize) {
  8. LOG_ERROR.warn("Offset not matched. Request offset: {}, firstOffset: {}, lastOffset: {}, mappedFileSize: {}, mappedFiles count: {}",
  9. offset,
  10. firstMappedFile.getFileFromOffset(),
  11. lastMappedFile.getFileFromOffset() + this.mappedFileSize,
  12. this.mappedFileSize,
  13. this.mappedFiles.size());
  14. } else {
  15. //因为RocketMQ定时删除存储文件
  16. //所以第一个文件偏移开始并不一定是000000.
  17. //同理可得offset / this.mappedFileSize并不能定位到具体文件
  18. //所以还需要减去第一个文件的偏移/文件大小,算出磁盘中起始第几个文件
  19. int index = (int) ((offset / this.mappedFileSize) - (firstMappedFile.getFileFromOffset() / this.mappedFileSize));
  20. MappedFile targetFile = null;
  21. try {
  22. //获取映射文件
  23. targetFile = this.mappedFiles.get(index);
  24. } catch (Exception ignored) {
  25. }
  26. //再次检测是否在文件范围内
  27. if (targetFile != null && offset >= targetFile.getFileFromOffset()
  28. && offset < targetFile.getFileFromOffset() + this.mappedFileSize) {
  29. return targetFile;
  30. }
  31. //遍历所有文件查找
  32. for (MappedFile tmpMappedFile : this.mappedFiles) {
  33. if (offset >= tmpMappedFile.getFileFromOffset()
  34. && offset < tmpMappedFile.getFileFromOffset() + this.mappedFileSize) {
  35. return tmpMappedFile;
  36. }
  37. }
  38. }
  39. //如果配置了没找到返回第一个,就返回第一个文件
  40. if (returnFirstOnNotFound) {
  41. return firstMappedFile;
  42. }
  43. }
  44. } catch (Exception e) {
  45. log.error("findMappedFileByOffset Exception", e);
  46. }
  47. return null;
  48. }

查找pos 到当前最大可读之间的数据
MappedFile

  1. public SelectMappedBufferResult selectMappedBuffer(int pos) {
  2. //获取最大可读数据位置
  3. int readPosition = getReadPosition();
  4. //若有数据可读
  5. if (pos < readPosition && pos >= 0) {
  6. if (this.hold()) {
  7. /*
  8. 操作ByteBuffer 时如果使用了slice () 方法,对其ByteBuffer 进行读取时一般手动指定
  9. position 与limit 指针,而不是调用flip 方法来切换读写状态。
  10. */
  11. //由于在整个写入期间都未曾改变MappedByteBuffer的指针
  12. //所以mappedByteBuffer.slice()方法返回的共享缓存区空间为整个MappedFile
  13. ByteBuffer byteBuffer = this.mappedByteBuffer.slice();
  14. byteBuffer.position(pos);
  15. int size = readPosition - pos;
  16. ByteBuffer byteBufferNew = byteBuffer.slice();
  17. byteBufferNew.limit(size);
  18. return new SelectMappedBufferResult(this.fileFromOffset + pos, byteBufferNew, size, this);
  19. }
  20. }
  21. return null;
  22. }

调度

  1. public void doDispatch(DispatchRequest req) {
  2. //调用文件转发请求,分别同步index和queue文件
  3. for (CommitLogDispatcher dispatcher : this.dispatcherList) {
  4. dispatcher.dispatch(req);
  5. }
  6. }

根据消息更新ConumeQueue

DefaultMessageStore

  1. public void dispatch(DispatchRequest request) {
  2. final int tranType = MessageSysFlag.getTransactionValue(request.getSysFlag());
  3. switch (tranType) {
  4. case MessageSysFlag.TRANSACTION_NOT_TYPE:
  5. case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
  6. //根据消息更新ConumeQueue
  7. DefaultMessageStore.this.putMessagePositionInfo(request);
  8. break;
  9. case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
  10. case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
  11. break;
  12. }
  13. }
  14. }

DefaultMessageStore

  1. //根据消息更新ConumeQueue
  2. public void putMessagePositionInfo(DispatchRequest dispatchRequest) {
  3. //根据消息主题与队列ID ,先获取对应的ConumeQueue 文件
  4. ConsumeQueue cq = this.findConsumeQueue(dispatchRequest.getTopic(), dispatchRequest.getQueueId());
  5. //将内容更新到内存映射文件中,不刷盘,因为ConumeQueue刷盘固定异步刷盘
  6. cq.putMessagePositionInfoWrapper(dispatchRequest);
  7. }

根据topic和queueId查询ConsumeQueue,没有则新建

  1. public ConsumeQueue findConsumeQueue(String topic, int queueId) {
  2. //根据主题获取消息消费队列目录
  3. ConcurrentMap<Integer, ConsumeQueue> map = consumeQueueTable.get(topic);
  4. if (null == map) {
  5. ConcurrentMap<Integer, ConsumeQueue> newMap = new ConcurrentHashMap<Integer, ConsumeQueue>(128);
  6. //没有则新建并保存
  7. ConcurrentMap<Integer, ConsumeQueue> oldMap = consumeQueueTable.putIfAbsent(topic, newMap);
  8. if (oldMap != null) {
  9. map = oldMap;
  10. } else {
  11. map = newMap;
  12. }
  13. }
  14. //根据队列Id获取队列文件
  15. ConsumeQueue logic = map.get(queueId);
  16. if (null == logic) {
  17. ConsumeQueue newLogic = new ConsumeQueue(
  18. topic,
  19. queueId,
  20. StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()),
  21. this.getMessageStoreConfig().getMappedFileSizeConsumeQueue(),
  22. this);
  23. //没有则新建并保存
  24. ConsumeQueue oldLogic = map.putIfAbsent(queueId, newLogic);
  25. if (oldLogic != null) {
  26. logic = oldLogic;
  27. } else {
  28. logic = newLogic;
  29. }
  30. }
  31. return logic;
  32. }

将内容更新到内存映射文件中,不刷盘,因为ConumeQueue刷盘固定异步刷盘

  1. public void putMessagePositionInfoWrapper(DispatchRequest request) {
  2. final int maxRetries = 30;
  3. boolean canWrite = this.defaultMessageStore.getRunningFlags().isCQWriteable();
  4. for (int i = 0; i < maxRetries && canWrite; i++) {
  5. long tagsCode = request.getTagsCode();
  6. if (isExtWriteEnable()) {
  7. ConsumeQueueExt.CqExtUnit cqExtUnit = new ConsumeQueueExt.CqExtUnit();
  8. cqExtUnit.setFilterBitMap(request.getBitMap());
  9. cqExtUnit.setMsgStoreTime(request.getStoreTimestamp());
  10. cqExtUnit.setTagsCode(request.getTagsCode());
  11. long extAddr = this.consumeQueueExt.put(cqExtUnit);
  12. if (isExtAddr(extAddr)) {
  13. tagsCode = extAddr;
  14. } else {
  15. log.warn("Save consume queue extend fail, So just save tagsCode! {}, topic:{}, queueId:{}, offset:{}", cqExtUnit,
  16. topic, queueId, request.getCommitLogOffset());
  17. }
  18. }
  19. //将内容更新到内存映射文件中,不刷盘,因为ConumeQueue刷盘固定异步刷盘
  20. boolean result = this.putMessagePositionInfo(request.getCommitLogOffset(),
  21. request.getMsgSize(), tagsCode, request.getConsumeQueueOffset());
  22. if (result) {
  23. if (this.defaultMessageStore.getMessageStoreConfig().getBrokerRole() == BrokerRole.SLAVE ||
  24. this.defaultMessageStore.getMessageStoreConfig().isEnableDLegerCommitLog()) {
  25. this.defaultMessageStore.getStoreCheckpoint().setPhysicMsgTimestamp(request.getStoreTimestamp());
  26. }
  27. this.defaultMessageStore.getStoreCheckpoint().setLogicsMsgTimestamp(request.getStoreTimestamp());
  28. return;
  29. } else {
  30. // XXX: warn and notify me
  31. log.warn("[BUG]put commit log position info to " + topic + ":" + queueId + " " + request.getCommitLogOffset()
  32. + " failed, retry " + i + " times");
  33. try {
  34. Thread.sleep(1000);
  35. } catch (InterruptedException e) {
  36. log.warn("", e);
  37. }
  38. }
  39. }
  40. // XXX: warn and notify me
  41. log.error("[BUG]consume queue can not write, {} {}", this.topic, this.queueId);
  42. this.defaultMessageStore.getRunningFlags().makeLogicsQueueError();
  43. }
  44. private boolean putMessagePositionInfo(final long offset, final int size, final long tagsCode,
  45. final long cqOffset) {
  46. //可能重复构建消息
  47. // maxPhysicOffset:消息体总长度加上消息在comlog偏移量
  48. if (offset + size <= this.maxPhysicOffset) {
  49. log.warn("Maybe try to build consume queue repeatedly maxPhysicOffset={} phyOffset={}", maxPhysicOffset, offset);
  50. return true;
  51. }
  52. this.byteBufferIndex.flip();
  53. this.byteBufferIndex.limit(CQ_STORE_UNIT_SIZE);
  54. //依次将消息偏移量、消息长度、taghashcode 写入到ByteBuffer堆缓冲区 中
  55. this.byteBufferIndex.putLong(offset);
  56. this.byteBufferIndex.putInt(size);
  57. this.byteBufferIndex.putLong(tagsCode);
  58. /* 根据consumeQueueOffset 计算ConumeQueue 中的物理地址, 将内容追加到ConsumeQueue 的内
  59. 存映射文件中(本操作只追击并不刷盘), ConumeQueue 的刷盘方式固定为异步刷盘模式。*/
  60. final long expectLogicOffset = cqOffset * CQ_STORE_UNIT_SIZE;
  61. //获取最后一个文件,找不到则创建
  62. MappedFile mappedFile = this.mappedFileQueue.getLastMappedFile(expectLogicOffset);
  63. if (mappedFile != null) {
  64. //若是队列第一个文件,该文件写指针为0
  65. if (mappedFile.isFirstCreateInQueue() && cqOffset != 0 && mappedFile.getWrotePosition() == 0) {
  66. //设置最小逻辑偏移
  67. this.minLogicOffset = expectLogicOffset;
  68. //设置刷盘指针
  69. this.mappedFileQueue.setFlushedWhere(expectLogicOffset);
  70. //当前数据提交指针,内存中ByteBuffer 当前的写指针,该值大于等于flushedWhere 。
  71. this.mappedFileQueue.setCommittedWhere(expectLogicOffset);
  72. this.fillPreBlank(mappedFile, expectLogicOffset);
  73. log.info("fill pre blank space " + mappedFile.getFileName() + " " + expectLogicOffset + " "
  74. + mappedFile.getWrotePosition());
  75. }
  76. if (cqOffset != 0) {
  77. long currentLogicOffset = mappedFile.getWrotePosition() + mappedFile.getFileFromOffset();
  78. if (expectLogicOffset < currentLogicOffset) {
  79. log.warn("Build consume queue repeatedly, expectLogicOffset: {} currentLogicOffset: {} Topic: {} QID: {} Diff: {}",
  80. expectLogicOffset, currentLogicOffset, this.topic, this.queueId, expectLogicOffset - currentLogicOffset);
  81. return true;
  82. }
  83. if (expectLogicOffset != currentLogicOffset) {
  84. LOG_ERROR.warn(
  85. "[BUG]logic queue order maybe wrong, expectLogicOffset: {} currentLogicOffset: {} Topic: {} QID: {} Diff: {}",
  86. expectLogicOffset,
  87. currentLogicOffset,
  88. this.topic,
  89. this.queueId,
  90. expectLogicOffset - currentLogicOffset
  91. );
  92. }
  93. }
  94. this.maxPhysicOffset = offset + size;//写入文件通道
  95. return mappedFile.appendMessage(this.byteBufferIndex.array());
  96. }
  97. return false;
  98. }

MappedFileQueue
获取最后一个文件,找不到则创建

  1. /**
  2. * 获取最后一个文件,找不到则创建
  3. */
  4. public MappedFile getLastMappedFile(final long startOffset) {
  5. return getLastMappedFile(startOffset, true);
  6. }
  7. public MappedFile getLastMappedFile(final long startOffset, boolean needCreate) {
  8. long createOffset = -1;
  9. MappedFile mappedFileLast = getLastMappedFile();
  10. //创建的偏移必须是mappedFileSize设置的倍数
  11. if (mappedFileLast == null) {
  12. createOffset = startOffset - (startOffset % this.mappedFileSize);
  13. }
  14. //写满则新建
  15. if (mappedFileLast != null && mappedFileLast.isFull()) {
  16. //计算新建文件的起始偏移
  17. createOffset = mappedFileLast.getFileFromOffset() + this.mappedFileSize;
  18. }
  19. if (createOffset != -1 && needCreate) {
  20. //利用偏移为文件名
  21. String nextFilePath = this.storePath + File.separator + UtilAll.offset2FileName(createOffset);
  22. String nextNextFilePath = this.storePath + File.separator
  23. + UtilAll.offset2FileName(createOffset + this.mappedFileSize);
  24. MappedFile mappedFile = null;
  25. if (this.allocateMappedFileService != null) {
  26. //分别异步创建2个文件
  27. mappedFile = this.allocateMappedFileService.putRequestAndReturnMappedFile(nextFilePath,
  28. nextNextFilePath, this.mappedFileSize);
  29. } else {
  30. try {
  31. //同步创建文件
  32. mappedFile = new MappedFile(nextFilePath, this.mappedFileSize);
  33. } catch (IOException e) {
  34. log.error("create mappedFile exception", e);
  35. }
  36. }
  37. if (mappedFile != null) {
  38. //若mappedFiles队列为空
  39. if (this.mappedFiles.isEmpty()) {
  40. //设置是MappedFileQueue 队列中第一个文件
  41. mappedFile.setFirstCreateInQueue(true);
  42. }
  43. //添加mapppedFiles集合
  44. this.mappedFiles.add(mappedFile);
  45. }
  46. return mappedFile;
  47. }
  48. return mappedFileLast;
  49. }

异步创建文件

  1. public MappedFile putRequestAndReturnMappedFile(String nextFilePath, String nextNextFilePath, int fileSize) {
  2. int canSubmitRequests = 2;
  3. if (this.messageStore.getMessageStoreConfig().isTransientStorePoolEnable()) {
  4. if (this.messageStore.getMessageStoreConfig().isFastFailIfNoBufferInStorePool()
  5. && BrokerRole.SLAVE != this.messageStore.getMessageStoreConfig().getBrokerRole()) {
  6. //if broker is slave, don't fast fail even no buffer in pool
  7. canSubmitRequests = this.messageStore.getTransientStorePool().availableBufferNums() - this.requestQueue.size();
  8. }
  9. }
  10. AllocateRequest nextReq = new AllocateRequest(nextFilePath, fileSize);
  11. boolean nextPutOK = this.requestTable.putIfAbsent(nextFilePath, nextReq) == null;
  12. if (nextPutOK) {
  13. if (canSubmitRequests <= 0) {
  14. log.warn("[NOTIFYME]TransientStorePool is not enough, so create mapped file error, " +
  15. "RequestQueueSize : {}, StorePoolSize: {}", this.requestQueue.size(), this.messageStore.getTransientStorePool().availableBufferNums());
  16. this.requestTable.remove(nextFilePath);
  17. return null;
  18. }
  19. //添加生产者队列,异步线程创建文件
  20. boolean offerOK = this.requestQueue.offer(nextReq);
  21. if (!offerOK) {
  22. log.warn("never expected here, add a request to preallocate queue failed");
  23. }
  24. canSubmitRequests--;
  25. }
  26. AllocateRequest nextNextReq = new AllocateRequest(nextNextFilePath, fileSize);
  27. boolean nextNextPutOK = this.requestTable.putIfAbsent(nextNextFilePath, nextNextReq) == null;
  28. if (nextNextPutOK) {
  29. if (canSubmitRequests <= 0) {
  30. log.warn("[NOTIFYME]TransientStorePool is not enough, so skip preallocate mapped file, " +
  31. "RequestQueueSize : {}, StorePoolSize: {}", this.requestQueue.size(), this.messageStore.getTransientStorePool().availableBufferNums());
  32. this.requestTable.remove(nextNextFilePath);
  33. } else {
  34. boolean offerOK = this.requestQueue.offer(nextNextReq);
  35. if (!offerOK) {
  36. log.warn("never expected here, add a request to preallocate queue failed");
  37. }
  38. }
  39. }
  40. //是否创建成功
  41. if (hasException) {
  42. log.warn(this.getServiceName() + " service has exception. so return null");
  43. return null;
  44. }
  45. AllocateRequest result = this.requestTable.get(nextFilePath);
  46. try {
  47. if (result != null) {
  48. boolean waitOK = result.getCountDownLatch().await(waitTimeOut, TimeUnit.MILLISECONDS);
  49. if (!waitOK) {
  50. log.warn("create mmap timeout " + result.getFilePath() + " " + result.getFileSize());
  51. return null;
  52. } else {
  53. this.requestTable.remove(nextFilePath);
  54. return result.getMappedFile();
  55. }
  56. } else {
  57. log.error("find preallocate mmap failed, this never happen");
  58. }
  59. } catch (InterruptedException e) {
  60. log.warn(this.getServiceName() + " service has exception. ", e);
  61. }
  62. return null;
  63. }

同步创建文件 (默认)

文件初始化

  1. public MappedFile(final String fileName, final int fileSize) throws IOException {
  2. init(fileName, fileSize);//文件初始化
  3. }
  4. private void init(final String fileName, final int fileSize) throws IOException {
  5. this.fileName = fileName;
  6. this.fileSize = fileSize;
  7. this.file = new File(fileName);
  8. //fileFromOffset为文件名
  9. this.fileFromOffset = Long.parseLong(this.file.getName());
  10. boolean ok = false;
  11. ensureDirOK(this.file.getParent());
  12. try {
  13. //创建文件读写通道
  14. this.fileChannel = new RandomAccessFile(this.file, "rw").getChannel();
  15. //将文件映射内存中
  16. this.mappedByteBuffer = this.fileChannel.map(MapMode.READ_WRITE, 0, fileSize);
  17. //重新计算MappedFile虚拟内存。
  18. TOTAL_MAPPED_VIRTUAL_MEMORY.addAndGet(fileSize);
  19. //计算文件个数
  20. TOTAL_MAPPED_FILES.incrementAndGet();
  21. ok = true;
  22. } catch (FileNotFoundException e) {
  23. log.error("Failed to create file " + this.fileName, e);
  24. throw e;
  25. } catch (IOException e) {
  26. log.error("Failed to map file " + this.fileName, e);
  27. throw e;
  28. } finally {
  29. //执行失败,但通道创建成功,则关闭通道
  30. if (!ok && this.fileChannel != null) {
  31. this.fileChannel.close();
  32. }
  33. }
  34. }

根据消息更新Index 索引文件

也是从doDispatch过来的
在这里插入图片描述
构建索引

  1. public void buildIndex(DispatchRequest req) {
  2. //获取或创建IndexFile 文件
  3. IndexFile indexFile = retryGetAndCreateIndexFile();
  4. if (indexFile != null) {
  5. //获取所有文件最大的物理偏移量
  6. long endPhyOffset = indexFile.getEndPhyOffset();
  7. DispatchRequest msg = req;
  8. String topic = msg.getTopic();
  9. String keys = msg.getKeys();
  10. //如果该消息的物理偏移量小于索引文件中的物理偏移
  11. if (msg.getCommitLogOffset() < endPhyOffset) {
  12. //说明是重复数据,忽略本次索引构建
  13. return;
  14. }
  15. final int tranType = MessageSysFlag.getTransactionValue(msg.getSysFlag());
  16. switch (tranType) {
  17. case MessageSysFlag.TRANSACTION_NOT_TYPE:
  18. case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
  19. case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
  20. break;
  21. case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
  22. return;
  23. }
  24. //如果消息的唯一键不为空
  25. if (req.getUniqKey() != null) {
  26. //则添加到Hash索引中,以便加速根据唯一键检索消息。
  27. indexFile = putKey(indexFile, msg, buildKey(topic, req.getUniqKey()));
  28. if (indexFile == null) {
  29. log.error("putKey error commitlog {} uniqkey {}", req.getCommitLogOffset(), req.getUniqKey());
  30. return;
  31. }
  32. }
  33. //构建索引键, RocketMQ 支持为同一个消息建立多个索引,多个索引键空格分开。
  34. if (keys != null && keys.length() > 0) {
  35. String[] keyset = keys.split(MessageConst.KEY_SEPARATOR);
  36. for (int i = 0; i < keyset.length; i++) {
  37. String key = keyset[i];
  38. if (key.length() > 0) {
  39. indexFile = putKey(indexFile, msg, buildKey(topic, key));
  40. if (indexFile == null) {
  41. log.error("putKey error commitlog {} uniqkey {}", req.getCommitLogOffset(), req.getUniqKey());
  42. return;
  43. }
  44. }
  45. }
  46. }
  47. } else {
  48. log.error("build index error, stop building index");
  49. }
  50. }

获取或创建IndexFile 文件

IndexService

  1. public IndexFile retryGetAndCreateIndexFile() {
  2. IndexFile indexFile = null;
  3. //默认尝试创建MAX_TRY_IDX_CREATE次
  4. for (int times = 0; null == indexFile && times < MAX_TRY_IDX_CREATE; times++) {
  5. //获取最后文件,如果没有则创建
  6. indexFile = this.getAndCreateLastIndexFile();
  7. if (null != indexFile)
  8. break;
  9. try {
  10. log.info("Tried to create index file " + times + " times");
  11. //创建失败,休眠1秒,尝试再次创建
  12. Thread.sleep(1000);
  13. } catch (InterruptedException e) {
  14. log.error("Interrupted", e);
  15. }
  16. }
  17. if (null == indexFile) {
  18. //标记索引文件不能创建
  19. this.defaultMessageStore.getAccessRights().makeIndexFileError();
  20. log.error("Mark index file cannot build flag");
  21. }
  22. return indexFile;
  23. }
  24. //获取最后一个文件,若没有则创建
  25. public IndexFile getAndCreateLastIndexFile() {
  26. IndexFile indexFile = null;
  27. IndexFile prevIndexFile = null;
  28. long lastUpdateEndPhyOffset = 0;
  29. long lastUpdateIndexTimestamp = 0;
  30. {
  31. this.readWriteLock.readLock().lock();
  32. //如果文件集合不为空
  33. if (!this.indexFileList.isEmpty()) {
  34. //获取最后一个索引文件
  35. IndexFile tmp = this.indexFileList.get(this.indexFileList.size() - 1);
  36. //若文件没写满
  37. if (!tmp.isWriteFull()) {
  38. indexFile = tmp;
  39. } else {
  40. lastUpdateEndPhyOffset = tmp.getEndPhyOffset();
  41. lastUpdateIndexTimestamp = tmp.getEndTimestamp();
  42. prevIndexFile = tmp;
  43. }
  44. }
  45. this.readWriteLock.readLock().unlock();
  46. }
  47. //文件集合没文件,或者文件写满了,则新建文件
  48. if (indexFile == null) {
  49. try {
  50. String fileName =
  51. this.storePath + File.separator
  52. + UtilAll.timeMillisToHumanString(System.currentTimeMillis());
  53. //这里创建了文件
  54. indexFile =
  55. new IndexFile(fileName, this.hashSlotNum, this.indexNum, lastUpdateEndPhyOffset,
  56. lastUpdateIndexTimestamp);
  57. this.readWriteLock.writeLock().lock();
  58. //加入文件集合
  59. this.indexFileList.add(indexFile);
  60. } catch (Exception e) {
  61. log.error("getLastIndexFile exception ", e);
  62. } finally {
  63. this.readWriteLock.writeLock().unlock();
  64. }
  65. //将写满文件刷入磁盘
  66. if (indexFile != null) {
  67. final IndexFile flushThisFile = prevIndexFile;
  68. Thread flushThread = new Thread(new Runnable() {
  69. @Override
  70. public void run() {
  71. IndexService.this.flush(flushThisFile);
  72. }
  73. }, "FlushIndexFileThread");
  74. flushThread.setDaemon(true);
  75. flushThread.start();
  76. }
  77. }
  78. return indexFile;
  79. }

添加到Hash索引文件中

IndexService

  1. private IndexFile putKey(IndexFile indexFile, DispatchRequest msg, String idxKey) {
  2. for (boolean ok = indexFile.putKey(idxKey, msg.getCommitLogOffset(), msg.getStoreTimestamp()); !ok; ) {
  3. log.warn("Index file [" + indexFile.getFileName() + "] is full, trying to create another one");
  4. //说明文件写满,则再次获取一个文件
  5. indexFile = retryGetAndCreateIndexFile();
  6. if (null == indexFile) {
  7. return null;
  8. }
  9. //尝试写入
  10. ok = indexFile.putKey(idxKey, msg.getCommitLogOffset(), msg.getStoreTimestamp());
  11. }
  12. return indexFile;
  13. }

将消息索引键与消息偏移量映射关系写入到IndexFile

  1. public boolean putKey(final String key, final long phyOffset, final long storeTimestamp) {
  2. //如果当前已使用条目大于等于允许最大条目数时,则返回fasle ,表示当前索引文件已写满
  3. if (this.indexHeader.getIndexCount() < this.indexNum) {
  4. //根据key算出key 的hashcode
  5. int keyHash = indexKeyHashMethod(key);
  6. //定位hasbcode对应的hash槽下标
  7. int slotPos = keyHash % this.hashSlotNum;
  8. //hashcode对应的hash槽的物理地址=头部40字节+对应hash槽下标*槽大小
  9. int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
  10. FileLock fileLock = null;
  11. try {
  12. // fileLock = this.fileChannel.lock(absSlotPos, hashSlotSize,
  13. // false);
  14. //读取hash 槽中存储的数据,每个槽占用4字节,也就是getInt就可以了
  15. int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
  16. //如果hash 槽存储的数据小于等于0 或大于当前索引文件中的索引条目格式,则将slotValue 设置为0
  17. //说明槽尚未占用
  18. if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()) {
  19. slotValue = invalidIndex;
  20. }
  21. //计算待存储消息的时间戳与第一条消息时间戳的差值,并转换成秒。
  22. long timeDiff = storeTimestamp - this.indexHeader.getBeginTimestamp();
  23. timeDiff = timeDiff / 1000;
  24. if (this.indexHeader.getBeginTimestamp() <= 0) {
  25. timeDiff = 0;
  26. } else if (timeDiff > Integer.MAX_VALUE) {
  27. timeDiff = Integer.MAX_VALUE;
  28. } else if (timeDiff < 0) {
  29. timeDiff = 0;
  30. }
  31. //计算新添加条目起始偏移量
  32. int absIndexPos =
  33. //头部字节长度
  34. IndexHeader.INDEX_HEADER_SIZE +
  35. //hash槽数量*单个槽大小
  36. this.hashSlotNum * hashSlotSize
  37. //当前index条目个数*单个条目大小
  38. + this.indexHeader.getIndexCount() * indexSize;
  39. //之所以只存储HashCode 而不存储具体的key , 是为
  40. //了将Index 条目设计为定长结构,才能方便地检索与定位条目。
  41. this.mappedByteBuffer.putInt(absIndexPos, keyHash);
  42. //消息对应的物理偏移量。
  43. this.mappedByteBuffer.putLong(absIndexPos + 4, phyOffset);
  44. //该消息存储时间与第一条消息的时间戳的差值,小于0 该消息无效
  45. this.mappedByteBuffer.putInt(absIndexPos + 4 + 8, (int) timeDiff);
  46. //当产生hash槽冲突时,Hash 槽中存储的是该Hash Code 所对应的最新的Index 条目的下标,
  47. // 新的Index 条目的最后4 个字节存储该Hash Code 上一个条目的Index 下标。
  48. //如果Hash 槽中存储的值为0 或大于当前lndexFile 最大条目数或小于- 1,表示该Hash 槽当前并没有与之对应的Index 条目。
  49. this.mappedByteBuffer.putInt(absIndexPos + 4 + 8 + 4, slotValue);
  50. //当前hash槽的值存入MappedByteBuffer 中。将覆盖原先Hash 槽的值。
  51. this.mappedByteBuffer.putInt(absSlotPos, this.indexHeader.getIndexCount());
  52. //如果当前文件只包含一个条目,默认值为1
  53. if (this.indexHeader.getIndexCount() <= 1) {
  54. //更新BeginPhyOffset和BeginTimestamp
  55. this.indexHeader.setBeginPhyOffset(phyOffset);
  56. this.indexHeader.setBeginTimestamp(storeTimestamp);
  57. }
  58. //slotValue为0,说明新增hash槽使用
  59. if (invalidIndex == slotValue) {
  60. this.indexHeader.incHashSlotCount();
  61. }
  62. //记录新的索引个数
  63. this.indexHeader.incIndexCount();
  64. this.indexHeader.setEndPhyOffset(phyOffset);
  65. this.indexHeader.setEndTimestamp(storeTimestamp);
  66. return true;
  67. } catch (Exception e) {
  68. log.error("putKey exception, Key: " + key + " KeyHashCode: " + key.hashCode(), e);
  69. } finally {
  70. if (fileLock != null) {
  71. try {
  72. fileLock.release();
  73. } catch (IOException e) {
  74. log.error("Failed to release the lock", e);
  75. }
  76. }
  77. }
  78. } else {
  79. log.warn("Over index file capacity: index count = " + this.indexHeader.getIndexCount()
  80. + "; index max num = " + this.indexNum);
  81. }
  82. return false;
  83. }

过期文件删除机制

默认文件72小时尚未更新,则删除.,通过在Broker 配置文件中设置fi leReservedTime 来改变过期时间,单位为小时·

org.apache.rocketmq.store.DefaultMessageStore#addScheduleTask
在这里插入图片描述
DefaultMessageStore

  1. private void cleanFilesPeriodically() {
  2. //清除消息存储文件( Commitlog 文件)和消息消费队列文件( ConsumeQueue文件)
  3. this.cleanCommitLogService.run();
  4. this.cleanConsumeQueueService.run();
  5. }

删除过期文件
DefaultMessageStore.CleanCommitLogService

  1. private void deleteExpiredFiles() {
  2. int deleteCount = 0;
  3. //文件保留时间, 也就是从最后一次更新时间到现在, 如果超过了该时间, 则认为是过期文件, 可以被删除。
  4. long fileReservedTime = DefaultMessageStore.this.getMessageStoreConfig().getFileReservedTime();
  5. //删除物理文件的间隔,因为在一次清除过程中, 可能需要被删除的文件不止一个,该值指定两次删除文件的问隔时间。
  6. int deletePhysicFilesInterval = DefaultMessageStore.this.getMessageStoreConfig().getDeleteCommitLogFilesInterval();
  7. /*
  8. 在清除过期文件时, 如果该文件被其他线程所占用(引用次数大于0 , 比如读取消息),
  9. 此时会阻止此次删除任务, 同时在第一次试图删除该
  10. 文件时记录当前时间戳, destroyMapedFilelntervalForcibly 表示第一次拒绝删除之后能保留
  11. 的最大时间,在此时间内, 同样可以被拒绝删除, 同时会将引用减少1000 个,超过该时间
  12. 间隔后,文件将被强制删除。
  13. */
  14. int destroyMapedFileIntervalForcibly = DefaultMessageStore.this.getMessageStoreConfig().getDestroyMapedFileIntervalForcibly();
  15. boolean timeup = this.isTimeToDelete();
  16. boolean spacefull = this.isSpaceToDelete();
  17. boolean manualDelete = this.manualDeleteFileSeveralTimes > 0;
  18. /*
  19. RocketMQ 在如下三种情况任意之一满足的情况下将继续执行删除文件操作。
  20. 指定删除文件的时间点, RocketMQ 通过delete When 设置一天的固定时间执行一次删除过期文件操作, 默认为凌晨4 点。
  21. 磁盘空间是否充足,如果磁盘空间不充足,则返回true ,表示应该触发过期文件删除操作。
  22. 预留,手工触发,可以通过调用excuteDeleteFilesManualy 方法手工触发过期文件删除
  23. */
  24. if (timeup || spacefull || manualDelete) {
  25. if (manualDelete)
  26. this.manualDeleteFileSeveralTimes--;
  27. boolean cleanAtOnce = DefaultMessageStore.this.getMessageStoreConfig().isCleanFileForciblyEnable() && this.cleanImmediately;
  28. log.info("begin to delete before {} hours file. timeup: {} spacefull: {} manualDeleteFileSeveralTimes: {} cleanAtOnce: {}",
  29. fileReservedTime,
  30. timeup,
  31. spacefull,
  32. manualDeleteFileSeveralTimes,
  33. cleanAtOnce);
  34. fileReservedTime *= 60 * 60 * 1000;
  35. //删除文件
  36. deleteCount = DefaultMessageStore.this.commitLog.deleteExpiredFile(fileReservedTime, deletePhysicFilesInterval,
  37. destroyMapedFileIntervalForcibly, cleanAtOnce);
  38. if (deleteCount > 0) {
  39. } else if (spacefull) {
  40. //没有删除文件,但是磁盘空间很快不足.
  41. log.warn("disk space will be full soon, but delete file failed.");
  42. }
  43. }
  44. }

磁盘空间是否充足
CleanCommitLogService

  1. private boolean isSpaceToDelete() {
  2. //表示commitlog 、consumequeue 文件所在磁盘分区的最大使用量,如果超过该值, 则需要立即清除过期文件。
  3. double ratio = DefaultMessageStore.this.getMessageStoreConfig().getDiskMaxUsedSpaceRatio() / 100.0;
  4. //表示是否需要立即执行清除过期文件操作。
  5. cleanImmediately = false;
  6. {
  7. String storePathPhysic = DefaultMessageStore.this.getMessageStoreConfig().getStorePathCommitLog();
  8. /*
  9. physicRatio: 当前commitlog 目录所在的磁盘分区的磁盘使用率,通过File # getTotalSpace
  10. ()获取文件所在磁盘分区的总容量,通过File#getFreeSpace () 获取文件所在磁盘分区
  11. 剩余容量。
  12. */
  13. double physicRatio = UtilAll.getDiskPartitionSpaceUsedPercent(storePathPhysic);
  14. /*diskSpaceWarningLevelRatio 通过系统参数-Drocketmq. broker.diskSpaceWamingLevelRatio
  15. 设置,默认0 . 90 。如果磁盘分区使用率超过该阔值,将设置磁盘不可写, 此时会拒绝新消息
  16. 的写人。*/
  17. if (physicRatio > diskSpaceWarningLevelRatio) {
  18. boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskFull();
  19. if (diskok) {
  20. DefaultMessageStore.log.error("physic disk maybe full soon " + physicRatio + ", so mark disk full");
  21. }
  22. cleanImmediately = true;
  23. } else if (physicRatio > diskSpaceCleanForciblyRatio) {
  24. /*diskSpaceCleanForciblyRatio:通过系统参数-Drocketmq. broker. diskSpaceCleanForciblyRatio
  25. 设置, 默认0 . 85 。如果磁盘分区使用超过该阔值,建议立即执行过期文件清除,但不会拒绝
  26. 新消息的写入。*/
  27. cleanImmediately = true;
  28. } else {
  29. boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskOK();
  30. if (!diskok) {
  31. DefaultMessageStore.log.info("physic disk space OK " + physicRatio + ", so mark disk ok");
  32. }
  33. }
  34. //磁盘很快就要满了,需要执行清除文件
  35. if (physicRatio < 0 || physicRatio > ratio) {
  36. DefaultMessageStore.log.info("physic disk maybe full soon, so reclaim space, " + physicRatio);
  37. return true;
  38. }
  39. }
  40. {
  41. String storePathLogics = StorePathConfigHelper
  42. .getStorePathConsumeQueue(DefaultMessageStore.this.getMessageStoreConfig().getStorePathRootDir());
  43. double logicsRatio = UtilAll.getDiskPartitionSpaceUsedPercent(storePathLogics);
  44. if (logicsRatio > diskSpaceWarningLevelRatio) {
  45. boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskFull();
  46. if (diskok) {
  47. DefaultMessageStore.log.error("logics disk maybe full soon " + logicsRatio + ", so mark disk full");
  48. }
  49. cleanImmediately = true;
  50. } else if (logicsRatio > diskSpaceCleanForciblyRatio) {
  51. cleanImmediately = true;
  52. } else {
  53. boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskOK();
  54. if (!diskok) {
  55. DefaultMessageStore.log.info("logics disk space OK " + logicsRatio + ", so mark disk ok");
  56. }
  57. }
  58. if (logicsRatio < 0 || logicsRatio > ratio) {
  59. DefaultMessageStore.log.info("logics disk maybe full soon, so reclaim space, " + logicsRatio);
  60. return true;
  61. }
  62. }
  63. //磁盘使用率正常
  64. return false;
  65. }

执行文件销毁与删除
CommitLog

  1. public int deleteExpiredFile(
  2. final long expiredTime,
  3. final int deleteFilesInterval,
  4. final long intervalForcibly,
  5. final boolean cleanImmediately
  6. ) {
  7. //执行文件销毁与删除
  8. return this.mappedFileQueue.deleteExpiredFileByTime(expiredTime, deleteFilesInterval, intervalForcibly, cleanImmediately);
  9. }
  10. public int deleteExpiredFileByTime(final long expiredTime,
  11. final int deleteFilesInterval,
  12. final long intervalForcibly,
  13. final boolean cleanImmediately) {
  14. //获取所有文件映射数组
  15. Object[] mfs = this.copyMappedFiles(0);
  16. if (null == mfs)
  17. return 0;
  18. int mfsLength = mfs.length - 1;
  19. int deleteCount = 0;
  20. List<MappedFile> files = new ArrayList<MappedFile>();
  21. if (null != mfs) {
  22. //从倒数第二个文件开始遍历
  23. for (int i = 0; i < mfsLength; i++) {
  24. MappedFile mappedFile = (MappedFile) mfs[i];
  25. //计算文件的最大存活时间( = 文件的最后一次更新时间+文件存活时间(默认72 小时)) ,
  26. long liveMaxTimestamp = mappedFile.getLastModifiedTimestamp() + expiredTime;
  27. if (//当前时间大于文件的最大存活
  28. System.currentTimeMillis() >= liveMaxTimestamp ||
  29. //需要强制删除文件(当磁盘使用超过设定的阔值)
  30. cleanImmediately) {
  31. //清除MappedFile 占有的相关资源
  32. if (mappedFile.destroy(intervalForcibly)) {
  33. //若执行成,将该文件加入到待删除文件列表中
  34. files.add(mappedFile);
  35. deleteCount++;
  36. if (files.size() >= DELETE_FILES_BATCH_MAX) {
  37. break;
  38. }
  39. if (deleteFilesInterval > 0 && (i + 1) < mfsLength) {
  40. try {
  41. Thread.sleep(deleteFilesInterval);
  42. } catch (InterruptedException e) {
  43. }
  44. }
  45. } else {
  46. break;
  47. }
  48. } else {
  49. //avoid deleting files in the middle
  50. break;
  51. }
  52. }
  53. }
  54. //将文件从mappedFiles文件集合删除
  55. deleteExpiredFile(files);
  56. return deleteCount;
  57. }

MappedFile文件销毁
MappedFile

  1. public boolean destroy(final long intervalForcibly) {
  2. //关闭MappedFile
  3. this.shutdown(intervalForcibly);
  4. //判断是否清理完成
  5. if (this.isCleanupOver()) {
  6. try {
  7. //关闭通道
  8. this.fileChannel.close();
  9. log.info("close file channel " + this.fileName + " OK");
  10. long beginTime = System.currentTimeMillis();
  11. //删除整个物理文件
  12. boolean result = this.file.delete();
  13. log.info("delete file[REF:" + this.getRefCount() + "] " + this.fileName
  14. + (result ? " OK, " : " Failed, ") + "W:" + this.getWrotePosition() + " M:"
  15. + this.getFlushedPosition() + ", "
  16. + UtilAll.computeElapsedTimeMilliseconds(beginTime));
  17. } catch (Exception e) {
  18. log.warn("close file channel " + this.fileName + " Failed. ", e);
  19. }
  20. return true;
  21. } else {
  22. log.warn("destroy mapped file[REF:" + this.getRefCount() + "] " + this.fileName
  23. + " Failed. cleanupOver: " + this.cleanupOver);
  24. }
  25. return false;
  26. }

总结

为了保证消息不丢失,MQ做了持久化处理
为了保证消息有序,MQ做了单一文件存储
为了方便定位消息文件,消息存文件以偏移为文件名
为了提高单一文件存储带来的读取慢问题,MQ添加了消费队列文件和hash索引文件
为了提高存储消息效率,MQ提供异步和同步刷盘模式,并且新增堆内存池,用来提高消息直接刷入channel效率
为了保证消息的容错性,MQ添加了abort文件,记录broker正常还是异常关闭,并且新增checkPoint文件用于文件异常关闭恢复.
为了节约磁盘空间,新增删除过期文件机制。

发表评论

表情:
评论列表 (有 0 条评论,260人围观)

还没有评论,来说两句吧...

相关阅读