* Allow alternate matching group order for archive short/long desc

* max short/long desc file input byte size (ignore files larger than N)
* Add Arj support via 'arj'
This commit is contained in:
Bryan Ashby 2017-01-29 22:30:48 -07:00
parent 9525afddd3
commit 6f1015305b
6 changed files with 65 additions and 23 deletions

View File

@ -256,18 +256,16 @@ module.exports = class ArchiveUtil {
if(exitCode) {
return cb(new Error(`List failed with exit code: ${exitCode}`));
}
//if(err) {
// return cb(err);
// }
const entryGroupOrder = archiver.list.entryGroupOrder || { byteSize : 1, fileName : 2 };
const entries = [];
const entryMatchRe = new RegExp(archiver.list.entryMatch, 'gm');
let m;
while((m = entryMatchRe.exec(output))) {
// :TODO: allow alternate ordering!!!
entries.push({
byteSize : parseInt(m[1]),
fileName : m[2],
byteSize : parseInt(m[entryGroupOrder.byteSize]),
fileName : m[entryGroupOrder.fileName],
});
}

View File

@ -246,7 +246,7 @@ function getDefaultConfig() {
},
decompress : {
cmd : '7za',
args : [ 'e', '-o{extractPath}', '{archivePath}' ]
args : [ 'e', '-o{extractPath}', '{archivePath}' ] // :TODO: should be 'x'?
},
list : {
cmd : '7za',
@ -279,6 +279,30 @@ function getDefaultConfig() {
cmd : 'lha',
args : [ '-ew={extractPath}', '{archivePath}', '{fileList}' ]
}
},
Arj : {
//
// 'arj' command can be obtained from:
// * apt-get: arj
//
decompress : {
cmd : 'arj',
args : [ 'x', '{archivePath}', '{extractPath}' ],
},
list : {
cmd : 'arj',
args : [ 'l', '{archivePath}' ],
entryMatch : '^([^\\s]+)\\s+([0-9]+)\\s+[0-9]+\\s[0-9\\.]+\\s+[0-9]{2}\\-[0-9]{2}\\-[0-9]{2}\\s[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\s+(?:[^\\r\\n]+)$',
entryGroupOrder : { // defaults to { byteSize : 1, fileName : 2 }
fileName : 1,
byteSize : 2,
}
},
extract : {
cmd : 'arj',
args : [ 'e', '{archivePath}', '{extractPath}', '{fileList}' ],
}
}
},
@ -305,7 +329,7 @@ function getDefaultConfig() {
sig : '60ea',
offset : 0,
exts : [ 'arj' ],
handler : '7Zip',
handler : 'Arj',
desc : 'ARJ Archive',
},
rar : {
@ -428,15 +452,18 @@ function getDefaultConfig() {
// areas with an explicit |storageDir| will be stored relative to |areaStoragePrefix|:
areaStoragePrefix : paths.join(__dirname, './../file_base/'),
maxDescFileByteSize : 471859, // ~1/4 MB
maxDescLongFileByteSize : 524288, // 1/2 MB
fileNamePatterns: {
// These are NOT case sensitive
// FILE_ID.DIZ - https://en.wikipedia.org/wiki/FILE_ID.DIZ
shortDesc : [
desc : [
'^FILE_ID\.DIZ$', '^DESC\.SDI$', '^DESCRIPT\.ION$', '^FILE\.DES$', '$FILE\.SDI$', '^DISK\.ID$'
],
// common README filename - https://en.wikipedia.org/wiki/README
longDesc : [
descLong : [
'^.*\.NFO$', '^README\.1ST$', '^README\.TXT$', '^READ\.ME$', '^README$', '^README\.md$'
],
},

View File

@ -280,7 +280,7 @@ function populateFileEntryWithArchive(fileEntry, filePath, stepInfo, iterator, c
const extractList = [];
const shortDescFile = entries.find( e => {
return Config.fileBase.fileNamePatterns.shortDesc.find( pat => new RegExp(pat, 'i').test(e.fileName) );
return Config.fileBase.fileNamePatterns.desc.find( pat => new RegExp(pat, 'i').test(e.fileName) );
});
if(shortDescFile) {
@ -288,7 +288,7 @@ function populateFileEntryWithArchive(fileEntry, filePath, stepInfo, iterator, c
}
const longDescFile = entries.find( e => {
return Config.fileBase.fileNamePatterns.longDesc.find( pat => new RegExp(pat, 'i').test(e.fileName) );
return Config.fileBase.fileNamePatterns.descLong.find( pat => new RegExp(pat, 'i').test(e.fileName) );
});
if(longDescFile) {
@ -318,25 +318,38 @@ function populateFileEntryWithArchive(fileEntry, filePath, stepInfo, iterator, c
});
});
},
function readDescFiles(descFiles, callback) {
// :TODO: we shoudl probably omit files that are too large
function readDescFiles(descFiles, callback) {
async.each(Object.keys(descFiles), (descType, next) => {
const path = descFiles[descType];
if(!path) {
return next(null);
}
fs.readFile(path, (err, data) => {
if(err || !data) {
fs.stat(path, (err, stats) => {
if(err) {
return next(null);
}
//
// Assume FILE_ID.DIZ, NFO files, etc. are CP437.
//
// :TODO: This isn't really always the case - how to handle this? We could do a quick detection...
fileEntry[descType] = iconv.decode(sliceAtSauceMarker(data, 0x1a), 'cp437');
return next(null);
// skip entries that are too large
const maxFileSizeKey = `max${_.upperFirst(descType)}FileByteSize`;
if(Config.fileBase[maxFileSizeKey] && stats.size > Config.fileBase[maxFileSizeKey]) {
Log.debug( { byteSize : stats.size, maxByteSize : Config.fileBase[maxFileSizeKey] }, `Skipping "${descType}"; Too large` );
return next(null);
}
fs.readFile(path, (err, data) => {
if(err || !data) {
return next(null);
}
//
// Assume FILE_ID.DIZ, NFO files, etc. are CP437.
//
// :TODO: This isn't really always the case - how to handle this? We could do a quick detection...
fileEntry[descType] = iconv.decode(sliceAtSauceMarker(data, 0x1a), 'cp437');
return next(null);
});
});
}, () => {
// cleanup but don't wait

View File

@ -371,6 +371,10 @@ function cleanControlCodes(input, options) {
function createCleanAnsi(input, options, cb) {
if(!input) {
return cb('');
}
options.width = options.width || 80;
options.height = options.height || 25;

View File

@ -1,6 +1,7 @@
#!/usr/bin/env node
/* jslint node: true */
'use strict';
/*

View File

@ -41,7 +41,6 @@
"temptmp" : "^1.0.0"
},
"devDependencies": {
"lodash-migrate": "^0.3.16"
},
"engines": {
"node": ">=6.9.2"