From a48b69706aa94bfd38d2f19f8bed0740d1c71a2b Mon Sep 17 00:00:00 2001 From: Leo Horwitz Date: Thu, 31 Aug 2023 17:11:09 -0700 Subject: [PATCH] @W-14009441@: fill in fake header for csv output @W-14009441@: clean comments/documentation @W-14009441@: fix ancillary tests @W-14009441@: create file on no violations and add some tests --- src/lib/formatter/RuleResultRecombinator.ts | 32 +---- src/lib/util/RunOutputProcessor.ts | 16 +-- test/commands/scanner/run.filters.test.ts | 11 +- test/commands/scanner/run.severity.test.ts | 4 +- test/commands/scanner/run.test.ts | 149 ++++++++++++++++---- test/lib/RuleManager.test.ts | 10 +- test/lib/util/RunOutputProcessor.test.ts | 36 +++-- 7 files changed, 170 insertions(+), 88 deletions(-) diff --git a/src/lib/formatter/RuleResultRecombinator.ts b/src/lib/formatter/RuleResultRecombinator.ts index cc02eaa20..4975a71e2 100644 --- a/src/lib/formatter/RuleResultRecombinator.ts +++ b/src/lib/formatter/RuleResultRecombinator.ts @@ -110,12 +110,7 @@ export class RuleResultRecombinator { private static constructXml(results: RuleResult[]): string { let resultXml = ``; - // If the results were just an empty string, we can return it. - if (results.length === 0) { - return resultXml; - } - - const normalizeSeverity: boolean = results[0].violations.length > 0 && !(results[0].violations[0].normalizedSeverity === undefined) + const normalizeSeverity: boolean = results[0]?.violations.length > 0 && !(results[0]?.violations[0].normalizedSeverity === undefined) let problemCount = 0; @@ -188,10 +183,6 @@ export class RuleResultRecombinator { } private static constructJunit(results: RuleResult[]): string { - // If there are no results, we can just return an empty string. - if (!results || results.length === 0) { - return ''; - } // Otherwise, we'll need to start constructing our JUnit XML. To do that, we'll need a map from file names to // lists of the tags generated from violations found in the corresponding file. @@ -279,10 +270,6 @@ URL: ${url}`; } private static constructTable(results: RuleResult[]): RecombinedData { - // If the results were just an empty string, we can return it. - if (results.length === 0) { - return ''; - } const columns = this.violationsAreDfa(results) ? ['Source Location', 'Sink Location', 'Description', 'Category', 'URL'] : ['Location', 'Description', 'Category', 'URL']; @@ -341,10 +328,6 @@ URL: ${url}`; } private static constructJson(results: RuleResult[], verboseViolations = false): string { - if (results.length === 0) { - return ''; - } - if (verboseViolations) { const resultsVerbose = JSON.parse(JSON.stringify(results)) as RuleResult[]; for (const result of resultsVerbose) { @@ -363,12 +346,7 @@ URL: ${url}`; } private static async constructHtml(results: RuleResult[], verboseViolations = false): Promise { - // If the results were just an empty string, we can return it. - if (results.length === 0) { - return ''; - } - - const normalizeSeverity: boolean = results[0].violations.length > 0 && !(results[0].violations[0].normalizedSeverity === undefined); + const normalizeSeverity: boolean = results[0]?.violations.length > 0 && !(results[0]?.violations[0].normalizedSeverity === undefined); const isDfa = this.violationsAreDfa(results); @@ -434,12 +412,8 @@ URL: ${url}`; } private static async constructCsv(results: RuleResult[]): Promise { - // If the results were just an empty list, we can return an empty string - if (results.length === 0) { - return ''; - } const isDfa: boolean = this.violationsAreDfa(results); - const normalizeSeverity: boolean = results[0].violations.length > 0 && !(results[0].violations[0].normalizedSeverity === undefined) + const normalizeSeverity: boolean = results[0]?.violations.length > 0 && !(results[0]?.violations[0].normalizedSeverity === undefined) const csvRows = []; // There will always be columns for the problem counter and the severity. diff --git a/src/lib/util/RunOutputProcessor.ts b/src/lib/util/RunOutputProcessor.ts index 13f76214c..43c25b351 100644 --- a/src/lib/util/RunOutputProcessor.ts +++ b/src/lib/util/RunOutputProcessor.ts @@ -28,21 +28,11 @@ export class RunOutputProcessor { public processRunOutput(rrr: RecombinedRuleResults): AnyJson { - const {minSev, summaryMap, results} = rrr; - // If the results are an empty string, it means no violations were found. - if (results === '') { - // Build an appropriate message... - const msg = messages.getMessage('output.noViolationsDetected', [[...summaryMap.keys()].join(', ')]); - // ...log it to the console... - this.ux.log(msg); - // ...and return it for use with the --json flag. - return msg; - } + const {minSev, results} = rrr; - // If we actually have violations, there's some stuff we need to do with them. We'll build an array of message parts, - // and then log them all at the end. + // With the list of violations, we'll build an array of message parts, and then log them all at the end. let msgComponents: string[] = []; - // We need a summary of the information we were provided. + // We need a summary of the information we were provided (blank/empty if no violations). msgComponents = [...msgComponents, ...this.buildRunSummaryMsgParts(rrr)]; // We need to surface the results directly to the user, then add a message describing what we did. msgComponents.push(this.opts.outfile ? this.writeToOutfile(results) : this.writeToConsole(results)); diff --git a/test/commands/scanner/run.filters.test.ts b/test/commands/scanner/run.filters.test.ts index 8dc6945c1..04db8b790 100644 --- a/test/commands/scanner/run.filters.test.ts +++ b/test/commands/scanner/run.filters.test.ts @@ -19,7 +19,16 @@ describe('scanner:run tests that result in the use of RuleFilters', function () '--engine', 'eslint-lwc' ]) .it('LWC Engine Successfully parses LWC code', ctx => { - expect(ctx.stdout).to.contain('No rule violations found.'); + // If there's a summary, then it'll be separated from the CSV by an empty line. Throw it away. + const [csv, _] = ctx.stdout.trim().split(/\n\r?\n/); + + // Since it's a CSV, the rows themselves are separated by newline characters, and there's a header row we + // need to discard. + const rows = csv.trim().split('\n'); + rows.shift(); + + // There should be no rows (besides the header) because there are no violations. + expect(rows.length).to.equal(0, 'Should be no violations detected'); }); setupCommandTest diff --git a/test/commands/scanner/run.severity.test.ts b/test/commands/scanner/run.severity.test.ts index d5d7de7be..4e9682f45 100644 --- a/test/commands/scanner/run.severity.test.ts +++ b/test/commands/scanner/run.severity.test.ts @@ -20,7 +20,9 @@ describe('scanner:run', function () { '--severity-threshold', '3' ]) .it('When no violations are found, no error is thrown', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + const output = JSON.parse(ctx.stdout); + // There should be no violations (nor any exceptions). + expect(output.length).to.equal(0, 'Should be no violations from one engine'); expect(ctx.stderr).to.not.contain(processorMessages.getMessage('output.sevThresholdSummary', ['3']), 'Error should not be present'); }); diff --git a/test/commands/scanner/run.test.ts b/test/commands/scanner/run.test.ts index f27a51e2d..bcdf2527d 100644 --- a/test/commands/scanner/run.test.ts +++ b/test/commands/scanner/run.test.ts @@ -30,6 +30,15 @@ describe('scanner:run', function () { expect(violations[1]).to.match(/line="19".+rule="ApexUnitTestClassShouldHaveAsserts"/); } + function validateNoViolationsXmlOutput(xml: string): void { + // We'll split the output by the tag, so we can get individual violations. + const violations = xml.split(' { setupCommandTest .command(['scanner:run', @@ -57,8 +66,8 @@ describe('scanner:run', function () { '--ruleset', 'ApexUnit', '--format', 'xml' ]) - .it('When the file contains no violations, a message is logged to the console', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + .it('When the file contains no violations, XML with no violations is logged to the console', ctx => { + validateNoViolationsXmlOutput(ctx.stdout); }); }); @@ -180,6 +189,24 @@ describe('scanner:run', function () { const fileContents = fs.readFileSync('testout.xml').toString(); validateXmlOutput(fileContents); }); + setupCommandTest + .command(['scanner:run', + '--target', path.join('test', 'code-fixtures', 'apex', 'YetAnotherTestClass.cls'), + '--ruleset', 'ApexUnit', + '--outfile', 'testout.xml' + ]) + .finally(ctx => { + // Regardless of what happens in the test itself, we need to delete the file we created. + if (fs.existsSync('testout.xml')) { + fs.unlinkSync('testout.xml'); + } + }) + .it('An XML file with no violations is created', ctx => { + // Verify that an empty XML file was actually created. + expect(fs.existsSync('testout.xml')).to.equal(true, 'The command should have created an empty output file'); + const fileContents = fs.readFileSync('testout.xml').toString(); + validateNoViolationsXmlOutput(fileContents); + }); }); }); @@ -192,7 +219,7 @@ describe('scanner:run', function () { expect(summary).to.not.equal(null, 'Expected summary to be not null'); expect(summary).to.contain(processorMessages.getMessage('output.engineSummaryTemplate', ['pmd', 2, 1]), 'Summary should be correct'); } - // Since it's a CSV, the rows themselves are separated by newline chaacters, and there's a header row we + // Since it's a CSV, the rows themselves are separated by newline characters, and there's a header row we // need to discard. const rows = csv.trim().split('\n'); rows.shift(); @@ -209,6 +236,23 @@ describe('scanner:run', function () { expect(data[1][5]).to.equal('"ApexUnitTestClassShouldHaveAsserts"', 'Violation #2 should be of the expected type'); } + function validateNoViolationsCsvOutput(contents: string, expectSummary=true): void { + // If there's a summary, then it'll be separated from the CSV by an empty line. + const [csv, summary] = contents.trim().split(/\n\r?\n/); + if (expectSummary) { + expect(summary).to.not.equal(undefined, 'Expected summary to be not undefined'); + expect(summary).to.not.equal(null, 'Expected summary to be not null'); + expect(summary).to.contain(processorMessages.getMessage('output.engineSummaryTemplate', ['pmd', 2, 1]), 'Summary should be correct'); + } + // Since it's a CSV, the rows themselves are separated by newline characters, and there's a header row we + // need to discard. + const rows = csv.trim().split('\n'); + rows.shift(); + + // There should be no rows (besides the header) because there are no violations. + expect(rows.length).to.equal(0, 'Should be two violations detected'); + } + setupCommandTest .command(['scanner:run', '--target', path.join('test', 'code-fixtures', 'apex', 'SomeTestClass.cls'), @@ -250,8 +294,8 @@ describe('scanner:run', function () { '--ruleset', 'ApexUnit', '--format', 'csv' ]) - .it('When no violations are detected, a message is logged to the console', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + .it('When no violations are detected, empty CSV is printed to the console', ctx => { + validateNoViolationsCsvOutput(ctx.stdout, false); }); setupCommandTest @@ -266,10 +310,12 @@ describe('scanner:run', function () { fs.unlinkSync('testout.csv'); } }) - .it('When --oufile is provided and no violations are detected, output file should not be created', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); - expect(ctx.stdout).to.not.contain(processorMessages.getMessage('output.writtenToOutFile', ['testout.csv'])); - expect(fs.existsSync('testout.csv')).to.be.false; + .it('When --outfile is provided and no violations are detected, CSV file with no violations is created', ctx => { + expect(ctx.stdout).to.contain(processorMessages.getMessage('output.writtenToOutFile', ['testout.csv'])); + + const fileContents = fs.readFileSync('testout.csv').toString(); + expect(fs.existsSync('testout.csv')).to.be.true; + validateNoViolationsCsvOutput(fileContents, false); }); }); @@ -290,6 +336,15 @@ describe('scanner:run', function () { expect(rows[1]['ruleName']).to.equal('ApexUnitTestClassShouldHaveAsserts', 'Violation #2 should be of the expected type'); } + function validateNoViolationsHtmlOutput(html: string): void { + // there should be no instance of a filled violations object + const result = html.match(/const violations = (\[.+\]);/); + expect(result).to.be.null; + // there should be an empty violations object + const emptyResult = html.match(/const violations = \[\];/); + expect(emptyResult).to.be.not.null; + } + setupCommandTest .command(['scanner:run', '--target', path.join('test', 'code-fixtures', 'apex', 'SomeTestClass.cls'), @@ -297,7 +352,7 @@ describe('scanner:run', function () { '--format', 'html' ]) .it('Properly writes HTML to console', ctx => { - // Parse out the JSON results + // Parse out the HTML results validateHtmlOutput(ctx.stdout); }); @@ -330,8 +385,8 @@ describe('scanner:run', function () { '--ruleset', 'ApexUnit', '--format', 'html' ]) - .it('When no violations are detected, a message is logged to the console', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + .it('When no violations are detected, HTML with no violations is logged to the console', ctx => { + validateNoViolationsHtmlOutput(ctx.stdout); }); setupCommandTest @@ -346,10 +401,11 @@ describe('scanner:run', function () { fs.unlinkSync(outputFile); } }) - .it('When --oufile is provided and no violations are detected, output file should not be created', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); - expect(ctx.stdout).to.not.contain(processorMessages.getMessage('output.writtenToOutFile', [outputFile])); - expect(fs.existsSync(outputFile)).to.be.false; + .it('When --outfile is provided and no violations are detected, HTML file with no violations should be created', ctx => { + expect(ctx.stdout).to.contain(processorMessages.getMessage('output.writtenToOutFile', [outputFile])); + expect(fs.existsSync(outputFile)).to.be.true; + const fileContents = fs.readFileSync('testout.html').toString(); + validateNoViolationsHtmlOutput(fileContents); }); }); @@ -365,6 +421,13 @@ describe('scanner:run', function () { expect(output[0].violations[1].line).to.equal(19, 'Violation #2 should occur on the expected line'); } + function validateNoViolationsJsonOutput(json: string): void { + const output = JSON.parse(json); + // There should be no violations. + expect(output.length).to.equal(0, 'Should be no violations from one engine'); + } + + setupCommandTest .command(['scanner:run', '--target', path.join('test', 'code-fixtures', 'apex', 'SomeTestClass.cls'), @@ -406,8 +469,8 @@ describe('scanner:run', function () { '--ruleset', 'ApexUnit', '--format', 'json' ]) - .it('When no violations are detected, a message is logged to the console', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + .it('When no violations are detected, JSON with no violations is logged to the console', ctx => { + validateNoViolationsJsonOutput(ctx.stdout); }); setupCommandTest @@ -422,10 +485,11 @@ describe('scanner:run', function () { fs.unlinkSync('testout.json'); } }) - .it('When --oufile is provided and no violations are detected, output file should not be created', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); - expect(ctx.stdout).to.not.contain(processorMessages.getMessage('output.writtenToOutFile', ['testout.json'])); - expect(fs.existsSync('testout.json')).to.be.false; + .it('When --outfile is provided and no violations are detected, a JSON file with no violations is created', ctx => { + expect(ctx.stdout).to.contain(processorMessages.getMessage('output.writtenToOutFile', ['testout.json'])); + expect(fs.existsSync('testout.json')).to.be.true; + const fileContents = fs.readFileSync('testout.json').toString(); + validateNoViolationsJsonOutput(fileContents); }); }); @@ -454,8 +518,13 @@ describe('scanner:run', function () { '--ruleset', 'ApexUnit', '--format', 'table' ]) - .it('When no violations are detected, a message is logged to the console', ctx => { - expect(ctx.stdout).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + .it('When no violations are detected, an empty table is logged to the console', ctx => { + // Split the output by newline characters and throw away the first two rows, which are the column names and a separator. + // That will leave us with just the rows. + const rows = ctx.stdout.trim().split('\n'); + + // Expect to find no violations listing this class. + expect(rows.find(r => r.indexOf("SomeTestClass.cls") > 0)).to.equal(undefined, "more rows??"); }); }); @@ -543,7 +612,8 @@ describe('scanner:run', function () { .it('--json flag wraps message about no violations occuring', ctx => { const output = JSON.parse(ctx.stdout); expect(output.status).to.equal(0, 'Should have finished properly'); - expect(output.result).to.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + expect(output.result).to.not.contain(processorMessages.getMessage('output.noViolationsDetected', ['pmd, retire-js'])); + expect(output.result.length).to.equal(0, 'When no violations are present, JSON result should be empty array.') }); }); @@ -629,6 +699,12 @@ describe('scanner:run', function () { setupCommandTest .command(['scanner:run', '--target', 'path/that/does/not/matter', '--format', 'csv', '--outfile', 'notcsv.xml']) + .finally(ctx => { + // Regardless of what happens in the test itself, we need to delete the file we created. + if (fs.existsSync('notcsv.xml')) { + fs.unlinkSync('notcsv.xml'); + } + }) .it('Warning logged when output file format does not match format', ctx => { expect(ctx.stdout).to.contain(commonMessages.getMessage('validations.outfileFormatMismatch', ['csv', 'xml'])); }); @@ -663,8 +739,16 @@ describe('scanner:run', function () { '--format', 'csv' ]) .it('The baseConfig enables the usage of default Javascript Types', ctx => { - // There should be no violations. - expect(ctx.stdout).to.contains('No rule violations found.', 'Should be no violations found in the file.'); + // If there's a summary, then it'll be separated from the CSV by an empty line. Throw it away. + const [csv, _] = ctx.stdout.trim().split(/\n\r?\n/); + + // Since it's a CSV, the rows themselves are separated by newline characters, and there's a header row we + // need to discard. + const rows = csv.trim().split('\n'); + rows.shift(); + + // There should be no rows (besides the header) because there are no violations. + expect(rows.length).to.equal(0, 'Should be two violations detected'); }); // TODO: THIS TEST WAS IMPLEMENTED FOR W-7791882. THE FIX FOR THAT BUG WAS SUB-OPTIMAL, AND WE NEED TO CHANGE IT IN 3.0. @@ -691,7 +775,16 @@ describe('scanner:run', function () { '--env', '{"qunit": true}' ]) .it('Providing qunit in the --env override should resolve errors about that framework', ctx => { - expect(ctx.stdout).to.contain('No rule violations found.', 'Should be no violations found in the file.'); + // If there's a summary, then it'll be separated from the CSV by an empty line. Throw it away. + const [csv, _] = ctx.stdout.trim().split(/\n\r?\n/); + + // Since it's a CSV, the rows themselves are separated by newline characters, and there's a header row we + // need to discard. + const rows = csv.trim().split('\n'); + rows.shift(); + + // There should be no rows (besides the header) because there are no violations. + expect(rows.length).to.equal(0, 'Should be two violations detected'); }); }); diff --git a/test/lib/RuleManager.test.ts b/test/lib/RuleManager.test.ts index f426f9881..45819719d 100644 --- a/test/lib/RuleManager.test.ts +++ b/test/lib/RuleManager.test.ts @@ -273,7 +273,7 @@ describe('RuleManager', () => { const {results} = await ruleManager.runRulesMatchingCriteria(filters, invalidTarget, runOptions, EMPTY_ENGINE_OPTIONS); - expect(results).to.equal(''); + expect(results).to.equal('[]'); Sinon.assert.calledWith(uxSpy, EVENTS.WARNING_ALWAYS, messages.getMessage('warning.targetSkipped', [invalidTarget.join(', ')])); Sinon.assert.callCount(telemetrySpy, 1); }); @@ -395,7 +395,7 @@ describe('RuleManager', () => { const {results} = await ruleManager.runRulesMatchingCriteria(filters, ['app'], runOptions, EMPTY_ENGINE_OPTIONS); expect(typeof results).to.equal('string', `Output ${results} should have been a string`); - expect(results).to.equal('', `Output ${results} should have been an empty string`); + expect(results).to.equal('[]', `Output ${results} should have been an empty string`); Sinon.assert.callCount(telemetrySpy, 1); }); @@ -407,7 +407,7 @@ describe('RuleManager', () => { const {results} = await ruleManager.runRulesMatchingCriteria(filters, invalidTarget, runOptions, EMPTY_ENGINE_OPTIONS); - expect(results).to.equal(''); + expect(results).to.equal('[]'); Sinon.assert.callCount(uxSpy, 1); Sinon.assert.calledWith(uxSpy, EVENTS.WARNING_ALWAYS, messages.getMessage("warning.targetSkipped", [invalidTarget.join(', ')])); Sinon.assert.callCount(telemetrySpy, 1); @@ -422,7 +422,7 @@ describe('RuleManager', () => { const {results} = await ruleManager.runRulesMatchingCriteria(filters, invalidTarget, runOptions, EMPTY_ENGINE_OPTIONS); - expect(results).to.equal(''); + expect(results).to.equal('[]'); Sinon.assert.callCount(uxSpy, 1); Sinon.assert.calledWith(uxSpy, EVENTS.WARNING_ALWAYS, messages.getMessage("warning.targetSkipped", [invalidTarget.join(', ')])); Sinon.assert.callCount(telemetrySpy, 1); @@ -436,7 +436,7 @@ describe('RuleManager', () => { const {results} = await ruleManager.runRulesMatchingCriteria(filters, invalidTargets, runOptions, EMPTY_ENGINE_OPTIONS); - expect(results).to.equal(''); + expect(results).to.equal('[]'); Sinon.assert.callCount(uxSpy, 1); Sinon.assert.calledWith(uxSpy, EVENTS.WARNING_ALWAYS, messages.getMessage("warning.targetsSkipped", [invalidTargets.join(', ')])); Sinon.assert.callCount(telemetrySpy, 1); diff --git a/test/lib/util/RunOutputProcessor.test.ts b/test/lib/util/RunOutputProcessor.test.ts index bfff52f8d..fa19663d9 100644 --- a/test/lib/util/RunOutputProcessor.test.ts +++ b/test/lib/util/RunOutputProcessor.test.ts @@ -62,6 +62,16 @@ const FAKE_TABLE_OUTPUT = { ] }; +const EMPTY_TABLE_OUTPUT = { + "columns": [ + "Location", + "Description", + "Category", + "URL" + ], + "rows": [] +}; + const FAKE_CSV_OUTPUT = `"Problem","File","Severity","Line","Column","Rule","Description","URL","Category","Engine" "1","/Users/jfeingold/ts-sample-project/src/file-with-problems.ts","2","3","7","no-unused-vars","'UNUSED' is assigned a value but never used.","https://eslint.org/docs/rules/no-unused-vars","Variables","eslint-typescript" "2","/Users/jfeingold/ts-sample-project/src/file-with-problems.ts","2","3","7","@typescript-eslint/no-unused-vars","'UNUSED' is assigned a value but never used.","https://github.com/typescript-eslint/typescript-eslint/blob/v2.33.0/packages/eslint-plugin/docs/rules/no-unused-vars.md","Variables","eslint-typescript" @@ -138,17 +148,18 @@ describe('RunOutputProcessor', () => { const summaryMap: Map = new Map(); summaryMap.set('pmd', {fileCount: 0, violationCount: 0}); summaryMap.set('eslint', {fileCount: 0, violationCount: 0}); - const fakeRes: RecombinedRuleResults = {minSev: 0, summaryMap, results: ''}; + const fakeTableResults: RecombinedRuleResults = {minSev: 1, results: EMPTY_TABLE_OUTPUT, summaryMap: new Map()} // THIS IS THE PART BEING TESTED. - const output: AnyJson = rop.processRunOutput(fakeRes); + const output: AnyJson = rop.processRunOutput(fakeTableResults); - // We expect that the message logged to the console and the message returned should both be the default - const expectedMsg = processorMessages.getMessage('output.noViolationsDetected', ['pmd, eslint']); + const expectedTableSummary = `${processorMessages.getMessage('output.writtenToConsole')}`; + // We expect that an empty table should be logged to the console and that the output should be an empty array Sinon.assert.callCount(logSpy, 1); - Sinon.assert.callCount(tableSpy, 0); - Sinon.assert.calledWith(logSpy, expectedMsg); - expect(output).to.equal(expectedMsg, 'Should have returned expected message'); + Sinon.assert.callCount(tableSpy, 1); + Sinon.assert.calledWith(logSpy, expectedTableSummary); + // TODO is there a better way to check for empty array? output is an empty array, but typed as AnyJson. + expect(output.toString()).to.equal('', 'Should have returned empty results'); }); describe('Test Case: Table', () => { @@ -297,17 +308,20 @@ ${processorMessages.getMessage('output.writtenToConsole')}`; const summaryMap: Map = new Map(); summaryMap.set('pmd', {fileCount: 0, violationCount: 0}); summaryMap.set('eslint', {fileCount: 0, violationCount: 0}); - const fakeRes: RecombinedRuleResults = {minSev: 0, summaryMap, results: ''}; + const fakeRes: RecombinedRuleResults = {minSev: 0, summaryMap, results: '"Problem","Severity","File","Line","Column","Rule","Description","URL","Category","Engine"'}; // THIS IS THE PART BEING TESTED. const output: AnyJson = rop.processRunOutput(fakeRes); - // We expect that the message logged to the console and the message returned should both be the default - const expectedMsg = processorMessages.getMessage('output.noViolationsDetected', ['pmd, eslint']); + // We expect the empty CSV output followed by the default engine summary and written-to-file messages are logged to the console + const expectedMsg = `${processorMessages.getMessage('output.engineSummaryTemplate', ['pmd', 0, 0])} +${processorMessages.getMessage('output.engineSummaryTemplate', ['eslint', 0, 0])} +${processorMessages.getMessage('output.writtenToOutFile', [fakeFilePath])}`; + Sinon.assert.callCount(logSpy, 1); Sinon.assert.callCount(tableSpy, 0); Sinon.assert.calledWith(logSpy, expectedMsg); - expect(fakeFiles.length).to.equal(0, 'No files should be created'); + expect(fakeFiles.length).to.equal(1, 'A CSV file with only a header should be created'); expect(output).to.equal(expectedMsg, 'Should have returned expected message'); });