解决大批量数据导出Excel产生内存溢出的方案

合集下载
  1. 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
  2. 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
  3. 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。
if (recOffset == 1) {
break;
}
sheets.add(sh);
}
return sheets;
}
static int getRows(List<Record> records) {
int row = 0;
for (Iterator itr = records.iterator(); itr.hasNext();) {
}
//以第一篇文档的最后一个sheet为根,以后的数据都追加在这个sheet后面
Sheet rootSheet = sheets.get(sheets.size() - 1);
int rootRows = getRowsOfSheet(rootSheet); //记录第一篇文档的行数,以后的行数在此基础上增加
List<Sheet> sheets = getSheets(workbook, rootRecords);
if(sheets == null || sheets.size() == 0) {
throw new IllegalArgumentException("第一篇文档的格式错误,必须有至少一个sheet");
int index = workbook.addSSTString(sstRecord.getString(j));
//记录原来的索引和现在的索引的对应关系
map.put(Integer.valueOf(j), Integer.valueOf(index));
}
} else if (record.getSid() == LabelSSTRecord.sid) {
/**
*将多个Xls文件合并为一个,适用于只有一个sheet,并且格式相同的文档
* @param inputs输入的Xls文件
* @param out输出文件
*/
public static void merge(InputStream[] inputs, OutputStream out) {
if (inputs == null || inputs.length <= 1) {
}
}
rootRows += rowsOfCurXls;
}
byte[] data = getBytes(workbook, sheets.toArray(new Sheet[0]));
write(out, data);
}
static void write(OutputStream out, byte[] data) {
for (int i = 0; i < nSheets; i++) {
sheets[i].preSerialize();
}
int totalsize = workbook.getSize();
// pre-calculate all the sheet sizes and set BOF indexes
POIFSFileSystem fs = new POIFSFileSystem();
// Write out the Workbook stream
try {
fs.creawk.baidu.comeDocument(new ByteArrayInputStream(data), "Workbook");
fs.writeFilesystem(out);
throw new IllegalArgumentException("没有传入输入流数组,或只有一个输入流.");
}
List<Record> rootRecords = getRecords(inputs[0]);
Workbook workbook = Workbook.createWorkbook(rootRecords);
int recOffset = workbook.getNumRecords();
int sheetNum = 0;
// convert all LabelRecord records to LabelSSTRecord
convertLabelRecords(records, recOffset, workbook);
RowRecord rowRecord = (RowRecord) record;
//调整行号
rowRecord.setRowNumber(rootRows + rowRecord.getRowNumber());
rootSheet.addRow(rowRecord); //追加Row
rowsOfCurXls++; //记录当前文档的行数
InputStream stream = poifs.getRoot().createDocumentInputStream("Workbook");
return org.apache.poi.hssf.record.RecordFactory.createRecords(stream);
} catch (IOException e) {
logger.error("IO异常:{}", e.getMessage());
e.printStackTrace();
}
return Collections.EMPTY_LIST;
}
static void convertLabelRecords(List records, int offset, Workbook workbook) {
Record record = (Record) itr.next();
if (record.getSid() == RowRecord.sid) {
row++;
}
}
return row;
}
static int getRowsOfSheet(Sheet sheet) {
int rows = 0;
sheet.setLoc(0);
List<Sheet> sheets = new ArrayList();
while (recOffset < records.size()) {
Sheet sh = Sheet.createSheet(records, sheetNum++, recOffset);
recOffset = sh.getEofLoc() + 1;
LabelSSTRecord label = (LabelSSTRecord) record;
//调整SST索引的对应关系
label.setSSTIndex(map.get(Integer.valueOf(label.getSSTIndex())));
}
//追加ValueCell
if (record instanceof CellValueRecordInterface) {
// HSSFSheet[] sheets = getSheets();
int nSheets = sheets.length;
// before getting the workbook size we must tell the sheets that
// serialization is about to occur.
while(sheet.getNextRow() != null) {
rows++;
}
return rows;
}
@SuppressWarnings("deprecation")
static List<Record> getRecords(InputStream input) {
try {
POIFSFileSystem poifs = new POIFSFileSystem(input);
CellValueRecordInterface cell = (CellValueRecordInterface) record;
int cellRow = cell.getRow() + rootRows;
cell.setRow(cellRow);
rootSheet.addValueRecord(cellRow, cell);
int[] estimatedSheetSizes = new int[nSheets];
for (int k = 0; k < nSheets; k++) {
workbook.setSheetBof(k, totalsize);
out.flush();
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
static List<Sheet> getSheets(Workbook workbook, List records) {
POI或者JXL在导出大量数据的时候,由于它们将每一个单元格生都成一个Cell对象,所以很容易导致内存溢出。解决这个问题,唯一的办法是弄清楚Excel的二进制格式(汗),并且用流的方式读写Excel。POI和JXL其实提供了二进制方式读写Excel的API,只是因为缺少文档和实例,所以使用的人不多。我编写了这个简单的合并Excel的类,它只适合合并结构相同的多个Excel文件。好在这个功能已经可以解决数据导出产生OOM的问题:将数据分批导出然后合并。
rootSheet.setLoc(rootSheet.getDimsLoc());
Map<Integer, Integer> map = new HashMap(10000);
for (int i = 1; i < inputs.length; i++) { //从第二篇开始遍历
List<Record> records = getRecords(inputs[i]);
for (int k = offset; k < records.size(); k++) {
Record rec = (Record) records.get(k);
if (rec.getSid() == LabelRecord.sid) {
LabelRecord oldrec = (LabelRecord) rec;
records.remove(k);
LabelSSTRecord newrec = new LabelSSTRecord();
int stringid = workbook.addSSTString(new UnicodeString(oldrec.getValue()));
newrec.setRow(oldrec.getRow());
newrec.setColumn(oldrec.getColumn());
newrec.setXFIndex(oldrec.getXFIndex());
newrec.setSSTIndex(stringid);
records.add(k, newrec);
}
}
}
public static byte[] getBytes(Workbook workbook, Sheet[] sheets) {
int rowsOfCurXls = 0;
//遍历当前文档的每一个record
for (Iterator itr = records.iterator(); itr.hasNext();) {
Record record = (Record) itr.next();
if (record.getSid() == RowRecord.sid) { //如果是RowRecord
下面的代码使用POI3.1,合并11个3000多行的文档用时约6秒,我实在找不到更多的测试用的文档了。
Java代码
@SuppressWarnings("unchecked")
public class XlsMergeUtil {
private static Logger logger = LoggerFactory.getLogger(XlsMergeUtil.class);
}
//SST记录,SST保存xls文件中唯一的String,各个String都是对应着SST记录的索引
else if (record.getSid() == SSTRecord.sid) {
SSTRecord sstRecord = (SSTRecord) record;
for (int j = 0; j < sstRecord.getNumUniqueStrings(); j++) {
相关文档
最新文档