-
Notifications
You must be signed in to change notification settings - Fork 28
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add support for behave BDD test framework #278
base: master
Are you sure you want to change the base?
Changes from 12 commits
9f5f1a8
a4061d4
5bab316
5aa87a0
07c9a6a
881d3be
20ab035
0b4c4f9
1a87a0e
22344ae
c5cec0a
ca8c3ef
987a087
b827ce0
1ead2a6
974a3bc
dc3c76e
1519b1f
1b12d4d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,116 @@ | ||
import * as path from 'path'; | ||
|
||
import { TestInfo, TestSuiteInfo } from 'vscode-test-adapter-api'; | ||
import { TestEvent } from 'vscode-test-adapter-api'; | ||
|
||
// Typescript interfaces for behave json output | ||
type IStatus = 'passed' | 'failed' | 'skipped'; | ||
|
||
interface IScenario { | ||
type: string; | ||
keyword: string; | ||
name: string; | ||
tags: any[]; | ||
location: string; | ||
steps: IStep[]; | ||
status: IStatus; | ||
} | ||
|
||
interface IFeature { | ||
keyword: string; | ||
name: string; | ||
tags: any[]; | ||
location: string; | ||
status: IStatus; | ||
elements?: IScenario[]; | ||
} | ||
interface IStep { | ||
keyword: string; | ||
step_type: string; | ||
name: string; | ||
location: string; | ||
match: any; | ||
result: IResult; | ||
text?: string[]; | ||
} | ||
interface IResult { | ||
status: IStatus; | ||
duration: number; | ||
error_message?: string[]; | ||
} | ||
|
||
function safeJsonParse(text: string) : IFeature[] { | ||
try { | ||
return JSON.parse(text); | ||
} catch (err) { | ||
// this.logger.log('warn', 'parse json failed: ${text}'); | ||
return []; | ||
} | ||
} | ||
|
||
export function parseTestSuites(content: string, cwd: string): (TestSuiteInfo | TestInfo)[] { | ||
const discoveryResult = safeJsonParse(content); | ||
|
||
let stepid = 0; | ||
const suites = discoveryResult.map(feature => <TestSuiteInfo | TestInfo>({ | ||
type: 'suite' as 'suite', | ||
id: feature.location, | ||
label: feature.name, | ||
file: extractFile(feature.location, cwd), | ||
line: extractLine(feature.location), | ||
tooltip: feature.location, | ||
children: (feature.elements || []).map(scenario => ({ | ||
kondratyev-nv marked this conversation as resolved.
Show resolved
Hide resolved
|
||
type: 'suite' as 'suite', | ||
id: scenario.location, | ||
label: scenario.name, | ||
file: extractFile(scenario.location, cwd), | ||
line: extractLine(scenario.location), | ||
tooltip: scenario.location, | ||
children: scenario.steps.map(step => ({ | ||
type: 'test' as 'test', | ||
id: 'step' + (stepid += 1), | ||
label: step.name, | ||
file: extractFile(step.location, cwd), | ||
line: extractLine(step.location), | ||
tooltip: step.location, | ||
})), | ||
})), | ||
})); | ||
|
||
return suites; | ||
} | ||
|
||
function extractLine(text: string) : number { | ||
const separatorIndex = text.indexOf(':'); | ||
return Number(text.substring(separatorIndex + 1)); | ||
kondratyev-nv marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
|
||
function extractFile(text: string, cwd : string) { | ||
const separatorIndex = text.indexOf(':'); | ||
return path.resolve(cwd, text.substring(0, separatorIndex)); | ||
} | ||
|
||
export function parseTestStates(content: string): TestEvent[] { | ||
const runtestResult = safeJsonParse(content); | ||
|
||
let states : TestEvent[] = []; | ||
|
||
let stepid = 0; | ||
|
||
runtestResult.forEach( feature => { | ||
(feature.elements || []).forEach( scenario => { | ||
const steps = scenario.steps.map( (step) : TestEvent => ({ | ||
type: 'test' as 'test', | ||
state: step.result.status, | ||
test: 'step' + (stepid += 1), | ||
message: (step.result.error_message ? step.result.error_message.join('\n') : ''), | ||
decorations: [], | ||
description: undefined, | ||
})); | ||
states = states.concat(steps); | ||
}); | ||
}); | ||
|
||
return states; | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,209 @@ | ||
import * as path from 'path'; | ||
|
||
import { | ||
TestEvent, TestSuiteInfo | ||
} from 'vscode-test-adapter-api'; | ||
|
||
import { ArgumentParser } from 'argparse'; | ||
import { IWorkspaceConfiguration } from '../configuration/workspaceConfiguration'; | ||
import { IEnvironmentVariables, EnvironmentVariablesLoader } from '../environmentVariablesLoader'; | ||
import { ILogger } from '../logging/logger'; | ||
import { IProcessExecution, runProcess } from '../processRunner'; | ||
import { IDebugConfiguration, ITestRunner } from '../testRunner'; | ||
import { empty } from '../utilities/collections'; | ||
import { setDescriptionForEqualLabels } from '../utilities/tests'; | ||
import { parseTestStates } from './behaveTestJsonParser'; | ||
import { parseTestSuites } from './behaveTestJsonParser'; | ||
import { runModule } from '../pythonRunner'; | ||
|
||
// --- Behave Exit Codes --- | ||
// 0: All tests were collected and passed successfully | ||
// 1: Some tests have failed | ||
const BEHAVE_NON_ERROR_EXIT_CODES = [0, 1]; | ||
|
||
const DISCOVERY_OUTPUT_PLUGIN_INFO = { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this is not actually used, right? Can you please try removing this? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure what you mean. Behave exit codes are not documented (to my current knowledge). I have seen 0 and 1 as valid exit codes and unfortunately 0 when an error occurs (that then spits out an error string instead of json data). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I meant DISCOVERY_OUTPUT_PLUGIN_INFO constant 🙂 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It is used in the loadEnvironmentVariables in the same file (line 154 and 159). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, it's referenced, but it makes sense only for pytest - pytest is run with a custom plugin (in resources/python folder). Since behave does not need any custom plugins this constant and its usages are not actually useful. If you have any doubts, I can suggest writing tests first and then try removing this, I'm pretty sure tests won't break. |
||
PACKAGE_PATH: path.resolve(__dirname, '../../resources/python'), | ||
MODULE_NAME: 'vscode_python_test_adapter.behave.discovery_output_plugin', | ||
}; | ||
|
||
interface IBehaveArguments { | ||
argumentsToPass: string[]; | ||
locations: string[]; | ||
} | ||
|
||
|
||
export class BehaveTestRunner implements ITestRunner { | ||
|
||
private readonly testExecutions: Map<string, IProcessExecution> = new Map<string, IProcessExecution>(); | ||
|
||
constructor( | ||
public readonly adapterId: string, | ||
private readonly logger: ILogger | ||
) { } | ||
|
||
public cancel(): void { | ||
this.testExecutions.forEach((execution, test) => { | ||
this.logger.log('info', `Cancelling execution of ${test}`); | ||
try { | ||
execution.cancel(); | ||
} catch (error) { | ||
this.logger.log('crit', `Cancelling execution of ${test} failed: ${error}`); | ||
} | ||
}); | ||
} | ||
|
||
public async debugConfiguration(config: IWorkspaceConfiguration, test: string): Promise<IDebugConfiguration> { | ||
const additionalEnvironment = await this.loadEnvironmentVariables(config); | ||
const runArguments = this.getRunArguments(test, config.getBehaveConfiguration().behaveArguments); | ||
const params = [ ...runArguments.argumentsToPass, ...runArguments.locations]; | ||
return { | ||
module: 'behave', | ||
cwd: config.getCwd(), | ||
args: params, | ||
env: additionalEnvironment, | ||
}; | ||
} | ||
|
||
public async load(config: IWorkspaceConfiguration): Promise<TestSuiteInfo | undefined> { | ||
if (!config.getBehaveConfiguration().isBehaveEnabled) { | ||
this.logger.log('info', 'Behave test discovery is disabled'); | ||
return undefined; | ||
} | ||
const additionalEnvironment = await this.loadEnvironmentVariables(config); | ||
this.logger.log('info', `Discovering tests using python path '${config.pythonPath()}' in ${config.getCwd()}`); | ||
|
||
const discoveryArguments = this.getDiscoveryArguments(config.getBehaveConfiguration().behaveArguments); | ||
this.logger.log('info', `Running behave with arguments: ${discoveryArguments.argumentsToPass.join(', ')}`); | ||
this.logger.log('info', `Running behave with locations: ${discoveryArguments.locations.join(', ')}`); | ||
|
||
const params = [ ...discoveryArguments.argumentsToPass, ...discoveryArguments.locations]; | ||
|
||
const result = await this.runBehave(config, additionalEnvironment, params).complete(); | ||
const tests = parseTestSuites(result.output, config.getCwd()); | ||
if (empty(tests)) { | ||
this.logger.log('warn', 'No tests discovered'); | ||
return undefined; | ||
} | ||
|
||
setDescriptionForEqualLabels(tests, path.sep); | ||
return { | ||
type: 'suite', | ||
id: this.adapterId, | ||
label: 'Behave tests', | ||
children: tests, | ||
}; | ||
} | ||
|
||
public async run(config: IWorkspaceConfiguration, test: string): Promise<TestEvent[]> { | ||
if (!config.getBehaveConfiguration().isBehaveEnabled) { | ||
this.logger.log('info', 'Behave test execution is disabled'); | ||
return []; | ||
} | ||
const additionalEnvironment = await this.loadEnvironmentVariables(config); | ||
this.logger.log('info', `Running tests using python path '${config.pythonPath()}' in ${config.getCwd()}`); | ||
|
||
const testRunArguments = this.getRunArguments(test, config.getBehaveConfiguration().behaveArguments); | ||
this.logger.log('info', `Running behave with arguments: ${testRunArguments.argumentsToPass.join(', ')}`); | ||
this.logger.log('info', `Running behave with locations: ${testRunArguments.locations.join(', ')}`); | ||
|
||
const params = [ ...testRunArguments.argumentsToPass, ...testRunArguments.locations]; | ||
|
||
const result = await this.runBehave(config, additionalEnvironment, params).complete(); | ||
const states = parseTestStates(result.output); | ||
if (empty(states)) { | ||
// maybe an error occured | ||
this.logger.log('warn', 'No tests run'); | ||
this.logger.log('warn', 'Output: ${result.output}'); | ||
} | ||
|
||
return states; | ||
} | ||
|
||
private runBehave(config: IWorkspaceConfiguration, env: IEnvironmentVariables, args: string[]): IProcessExecution { | ||
const behavePath = config.getBehaveConfiguration().behavePath(); | ||
if (behavePath === path.basename(behavePath)) { | ||
this.logger.log('info', `Running ${behavePath} as a Python module`); | ||
return runModule({ | ||
pythonPath: config.pythonPath(), | ||
module: config.getBehaveConfiguration().behavePath(), | ||
environment: env, | ||
args, | ||
cwd: config.getCwd(), | ||
acceptedExitCodes: BEHAVE_NON_ERROR_EXIT_CODES, | ||
}); | ||
} | ||
|
||
this.logger.log('info', `Running ${behavePath} as an executable`); | ||
return runProcess( | ||
behavePath, | ||
args, | ||
{ | ||
cwd: config.getCwd(), | ||
environment: env, | ||
acceptedExitCodes: BEHAVE_NON_ERROR_EXIT_CODES, | ||
}); | ||
} | ||
|
||
private async loadEnvironmentVariables(config: IWorkspaceConfiguration): Promise<IEnvironmentVariables> { | ||
const envFileEnvironment = await EnvironmentVariablesLoader.load(config.envFile(), process.env, this.logger); | ||
|
||
const updatedPythonPath = [ | ||
config.getCwd(), | ||
envFileEnvironment.PYTHONPATH, | ||
process.env.PYTHONPATH, | ||
DISCOVERY_OUTPUT_PLUGIN_INFO.PACKAGE_PATH | ||
].filter(item => item).join(path.delimiter); | ||
|
||
const updatedBehavePlugins = [ | ||
envFileEnvironment.BEHAVE_PLUGINS, | ||
DISCOVERY_OUTPUT_PLUGIN_INFO.MODULE_NAME | ||
].filter(item => item).join(','); | ||
|
||
return { | ||
...envFileEnvironment, | ||
PYTHONPATH: updatedPythonPath, | ||
BEHAVE_PLUGINS: updatedBehavePlugins, | ||
}; | ||
} | ||
|
||
private getDiscoveryArguments(rawBehaveArguments: string[]): IBehaveArguments { | ||
const argumentParser = this.configureCommonArgumentParser(); | ||
const [knownArguments, argumentsToPass] = argumentParser.parse_known_args(rawBehaveArguments); | ||
return { | ||
locations: (knownArguments as { locations?: string[] }).locations || [], | ||
argumentsToPass: ['-d', '-f', 'json', '--no-summary', '--no-snippets'].concat(argumentsToPass), | ||
}; | ||
} | ||
|
||
// @ts-expect-error | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can this be removed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, should be doable. Currently the test parameter is not used, but it should be. |
||
private getRunArguments(test: string, rawBehaveArguments: string[]): IBehaveArguments { | ||
const argumentParser = this.configureCommonArgumentParser(); | ||
const [knownArguments, argumentsToPass] = argumentParser.parse_known_args(rawBehaveArguments); | ||
return { | ||
locations: (knownArguments as { locations?: string[] }).locations || [], | ||
argumentsToPass: ['-f', 'json', '--no-summary', '--no-snippets'].concat(argumentsToPass), | ||
}; | ||
} | ||
|
||
private configureCommonArgumentParser() { | ||
const argumentParser = new ArgumentParser({ | ||
exit_on_error: false, | ||
}); | ||
argumentParser.add_argument( | ||
'-D', '--define', | ||
{ action: 'store', dest: 'define' }); | ||
argumentParser.add_argument( | ||
'-e', '--exclude', | ||
{ action: 'store', dest: 'exclude' }); | ||
argumentParser.add_argument( | ||
'-i', '--include', | ||
{ action: 'store', dest: 'include' }); | ||
|
||
// Handle positional arguments (list of testsuite directories to run behave in). | ||
argumentParser.add_argument( | ||
'locations', | ||
{ nargs: '*' }); | ||
|
||
return argumentParser; | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should this be uncommented?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I will turn this into a comment as there is no logger accessible here (at the moment).
I also look into adding some test and I may need some help with that.
Which test scenarios do you like to see covered ?
Are there any available tests that may serve as a blueprint ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Some examples are
The minimal coverage I would say is to replicate the core UI functionality and user flow, for example
testing.behaveEnabled
and maybe args) and verify that all expected tests are discovered (see 'should discover tests' tests).