国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python生成lmdb格式的文件實例

2020-02-15 23:34:46
字體:
來源:轉載
供稿:網友

在crnn訓練的時候需要用到lmdb格式的數據集,下面是python生成lmdb個是數據集的代碼,注意一定要在linux系統下,否則會讀入圖像的時候出問題,可能遇到的問題都在代碼里面注釋了,看代碼即可。

#-*- coding:utf-8 -*- import osimport lmdb#先pip install這個模塊哦import cv2import globimport numpy as np  def checkImageIsValid(imageBin): if imageBin is None:  return False imageBuf = np.fromstring(imageBin, dtype=np.uint8) img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE) if img is None:  return False imgH, imgW = img.shape[0], img.shape[1] if imgH * imgW == 0:  return False return True def writeCache(env, cache): with env.begin(write=True) as txn:  for k, v in cache.iteritems():   txn.put(k, v) def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True): """ Create LMDB dataset for CRNN training.# ARGS:  outputPath : LMDB output path  imagePathList : list of image path  labelList  : list of corresponding groundtruth texts  lexiconList : (optional) list of lexicon lists  checkValid : if true, check the validity of every image """ # print (len(imagePathList) , len(labelList)) assert(len(imagePathList) == len(labelList)) nSamples = len(imagePathList) print '...................' env = lmdb.open(outputPath, map_size=8589934592)#1099511627776)所需要的磁盤空間的最小值,之前是1T,我改成了8g,否則會報磁盤空間不足,這個數字是字節  cache = {} cnt = 1 for i in xrange(nSamples):  imagePath = imagePathList[i]  label = labelList[i]  if not os.path.exists(imagePath):   print('%s does not exist' % imagePath)   continue  with open(imagePath, 'r') as f:   imageBin = f.read()  if checkValid:   if not checkImageIsValid(imageBin):    print('%s is not a valid image' % imagePath)#注意一定要在linux下,否則f.read就不可用了,就會輸出這個信息    continue   imageKey = 'image-%09d' % cnt  labelKey = 'label-%09d' % cnt  cache[imageKey] = imageBin  cache[labelKey] = label  if lexiconList:   lexiconKey = 'lexicon-%09d' % cnt   cache[lexiconKey] = ' '.join(lexiconList[i])  if cnt % 1000 == 0:   writeCache(env, cache)   cache = {}   print('Written %d / %d' % (cnt, nSamples))  cnt += 1 nSamples = cnt - 1 cache['num-samples'] = str(nSamples) writeCache(env, cache) print('Created dataset with %d samples' % nSamples)  def read_text(path):  with open(path) as f:  text = f.read() text = text.strip()  return text  if __name__ == '__main__': # lmdb 輸出目錄 outputPath = 'D:/ruanjianxiazai/tuxiangyangben/fengehou/train'#訓練集和驗證集要跑兩遍這個程序,分兩次生成  path = "D:/ruanjianxiazai/tuxiangyangben/fengehou/chenguang/*.jpg"#將txt與jpg的都放在同一個文件里面 imagePathList = glob.glob(path) print '------------',len(imagePathList),'------------' imgLabelLists = [] for p in imagePathList:  try:   imgLabelLists.append((p, read_text(p.replace('.jpg', '.txt'))))  except:   continue    # imgLabelList = [ (p, read_text(p.replace('.jpg', '.txt'))) for p in imagePathList] # sort by labelList imgLabelList = sorted(imgLabelLists, key = lambda x:len(x[1])) imgPaths = [ p[0] for p in imgLabelList] txtLists = [ p[1] for p in imgLabelList]  createDataset(outputPath, imgPaths, txtLists, lexiconList=None, checkValid=True)             
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 清苑县| 西华县| 九江市| 富锦市| 宁河县| 新野县| 清新县| 志丹县| 嘉兴市| 延长县| 罗甸县| 托克逊县| 丰原市| 大港区| 巴林右旗| 浮梁县| 上栗县| 改则县| 凤凰县| 太仓市| 霍州市| 阜新| 黑河市| 彭山县| 敖汉旗| 潜山县| 互助| 巴林左旗| 多伦县| 视频| 上饶市| 浮梁县| 黔东| 双峰县| 抚松县| 澄江县| 同心县| 余姚市| 金阳县| 正宁县| 阿城市|