1
0
mirror of https://github.com/SquidDev-CC/CC-Tweaked synced 2024-12-12 11:10:29 +00:00

Add a basic test framework for CraftOS

This runs tests on CraftOS using a tiny test runner that I originally
knocked up for LuaDash. It can be run both from JUnit (so IDEA and
Gradle) and in-game in the shell, so is pretty accessible to work with.

I also add a very basic POC test for the io library. I'd like to flesh
this out soon enough to contain most of the things from the original io
test.
This commit is contained in:
SquidDev 2019-03-02 02:09:14 +00:00
parent eaf24a3ceb
commit 52e1906d42
6 changed files with 928 additions and 24 deletions

View File

@ -0,0 +1,406 @@
/*
* This file is part of ComputerCraft - http://www.computercraft.info
* Copyright Daniel Ratcliffe, 2011-2019. Do not distribute without permission.
* Send enquiries to dratcliffe@gmail.com
*/
package dan200.computercraft.core;
import dan200.computercraft.ComputerCraft;
import dan200.computercraft.api.filesystem.IWritableMount;
import dan200.computercraft.api.lua.ILuaAPI;
import dan200.computercraft.api.lua.ILuaContext;
import dan200.computercraft.api.lua.LuaException;
import dan200.computercraft.core.computer.BasicEnvironment;
import dan200.computercraft.core.computer.Computer;
import dan200.computercraft.core.computer.MainThread;
import dan200.computercraft.core.filesystem.FileSystemException;
import dan200.computercraft.core.filesystem.MemoryMount;
import dan200.computercraft.core.terminal.Terminal;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.*;
import org.junit.jupiter.api.function.Executable;
import org.opentest4j.AssertionFailedError;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Stream;
import static dan200.computercraft.core.apis.ArgumentHelper.getTable;
import static dan200.computercraft.core.apis.ArgumentHelper.getType;
/**
* Loads tests from {@code test-rom/spec} and executes them.
*
* This spins up a new computer and runs the {@code mcfly.lua} script. This will then load all files in the {@code spec}
* directory and register them with {@code cct_test.start}.
*
* From the test names, we generate a tree of {@link DynamicNode}s which queue an event and wait for
* {@code cct_test.submit} to be called. McFly pulls these events, executes the tests and then calls the submit method.
*
* Once all tests are done, we invoke {@code cct_test.finish} in order to mark everything as complete.
*/
public class ComputerTestDelegate
{
private static final Logger LOG = LogManager.getLogger( ComputerTestDelegate.class );
private static final long TICK_TIME = TimeUnit.MILLISECONDS.toNanos( 50 );
private static final long TIMEOUT = TimeUnit.SECONDS.toNanos( 10 );
private final ReentrantLock lock = new ReentrantLock();
private Computer computer;
private final Condition hasTests = lock.newCondition();
private DynamicNodeBuilder tests;
private final Condition hasRun = lock.newCondition();
private String currentTest;
private boolean runFinished;
private Throwable runResult;
private final Condition hasFinished = lock.newCondition();
private boolean finished = false;
@BeforeEach
public void before()
{
ComputerCraft.logPeripheralErrors = true;
ComputerCraft.log = LogManager.getLogger( ComputerCraft.MOD_ID );
Terminal term = new Terminal( 78, 20 );
IWritableMount mount = new MemoryMount()
.addFile( "startup.lua", "loadfile('test/mcfly.lua', _ENV)('test/spec') cct_test.finish()" );
computer = new Computer( new BasicEnvironment( mount ), term, 0 );
computer.addApi( new ILuaAPI()
{
@Override
public String[] getNames()
{
return new String[] { "cct_test" };
}
@Nonnull
@Override
public String[] getMethodNames()
{
return new String[] { "start", "submit", "finish" };
}
@Override
public void startup()
{
try
{
computer.getAPIEnvironment().getFileSystem().mount(
"test-rom", "test",
BasicEnvironment.createMount( ComputerTestDelegate.class, "test-rom", "test" )
);
}
catch( FileSystemException e )
{
throw new IllegalStateException( e );
}
}
@Nullable
@Override
public Object[] callMethod( @Nonnull ILuaContext context, int method, @Nonnull Object[] arguments ) throws LuaException, InterruptedException
{
switch( method )
{
case 0: // start: Submit several tests and signal for #get to run
{
LOG.info( "Received tests from computer" );
DynamicNodeBuilder root = new DynamicNodeBuilder( "" );
for( Object key : getTable( arguments, 0 ).keySet() )
{
if( !(key instanceof String) ) throw new LuaException( "Non-key string " + getType( key ) );
String name = (String) key;
String[] parts = name.split( "\0" );
DynamicNodeBuilder builder = root;
for( int i = 0; i < parts.length - 1; i++ ) builder = builder.get( parts[i] );
builder.runs( parts[parts.length - 1], () -> {
// Run it
lock.lockInterruptibly();
try
{
// Set the current test
runResult = null;
runFinished = false;
currentTest = name;
// Tell the computer to run it
LOG.info( "Starting '{}'", formatName( name ) );
computer.queueEvent( "cct_test_run", new Object[] { name } );
long remaining = TIMEOUT;
while( remaining > 0 && computer.isOn() && !runFinished )
{
tick();
long waiting = hasRun.awaitNanos( TICK_TIME );
if( waiting > 0 ) break;
remaining -= TICK_TIME;
}
LOG.info( "Finished '{}'", formatName( name ) );
if( remaining <= 0 )
{
throw new IllegalStateException( "Timed out waiting for test" );
}
else if( !computer.isOn() )
{
throw new IllegalStateException( "Computer turned off mid-execution" );
}
if( runResult != null ) throw runResult;
}
finally
{
lock.unlock();
currentTest = null;
}
} );
}
lock.lockInterruptibly();
try
{
tests = root;
hasTests.signal();
}
finally
{
lock.unlock();
}
return null;
}
case 1: // submit: Submit the result of a test, allowing the test executor to continue
{
Map<?, ?> tbl = getTable( arguments, 0 );
String name = (String) tbl.get( "name" );
String status = (String) tbl.get( "status" );
String message = (String) tbl.get( "message" );
String trace = (String) tbl.get( "trace" );
StringBuilder wholeMessage = new StringBuilder();
if( message != null ) wholeMessage.append( message );
if( trace != null )
{
if( wholeMessage.length() != 0 ) wholeMessage.append( '\n' );
wholeMessage.append( trace );
}
lock.lockInterruptibly();
try
{
LOG.info( "'{}' finished with {}", formatName( name ), status );
// Skip if a test mismatch
if( !name.equals( currentTest ) )
{
LOG.warn( "Skipping test '{}', as we're currently executing '{}'", formatName( name ), formatName( currentTest ) );
return null;
}
switch( status )
{
case "ok":
case "pending":
break;
case "fail":
runResult = new AssertionFailedError( wholeMessage.toString() );
break;
case "error":
runResult = new IllegalStateException( wholeMessage.toString() );
break;
}
runFinished = true;
hasRun.signal();
}
finally
{
lock.unlock();
}
return null;
}
case 2: // finish: Signal to after that execution has finished
LOG.info( "Finished" );
lock.lockInterruptibly();
try
{
finished = true;
hasFinished.signal();
}
finally
{
lock.unlock();
}
return null;
default:
return null;
}
}
} );
computer.turnOn();
}
@AfterEach
public void after() throws InterruptedException
{
try
{
LOG.info( "Finished execution" );
computer.queueEvent( "cct_test_run", null );
// Wait for test execution to fully finish
lock.lockInterruptibly();
try
{
long remaining = TIMEOUT;
while( remaining > 0 && !finished )
{
tick();
if( hasFinished.awaitNanos( TICK_TIME ) > 0 ) break;
remaining -= TICK_TIME;
}
if( remaining <= 0 ) throw new IllegalStateException( "Timed out waiting for finish." + dump() );
if( !finished ) throw new IllegalStateException( "Computer did not finish." + dump() );
}
finally
{
lock.unlock();
}
}
finally
{
// Show a dump of computer output
System.out.println( dump() );
// And shutdown
computer.shutdown();
}
}
@TestFactory
public Stream<DynamicNode> get() throws InterruptedException
{
lock.lockInterruptibly();
try
{
long remaining = TIMEOUT;
while( remaining > 0 & tests == null )
{
tick();
if( hasTests.awaitNanos( TICK_TIME ) > 0 ) break;
remaining -= TICK_TIME;
}
if( remaining <= 0 ) throw new IllegalStateException( "Timed out waiting for tests. " + dump() );
if( tests == null ) throw new IllegalStateException( "Computer did not provide any tests. " + dump() );
}
finally
{
lock.unlock();
}
return tests.buildChildren();
}
private static class DynamicNodeBuilder
{
private final String name;
private final Map<String, DynamicNodeBuilder> children;
private final Executable executor;
DynamicNodeBuilder( String name )
{
this.name = name;
this.children = new HashMap<>();
this.executor = null;
}
DynamicNodeBuilder( String name, Executable executor )
{
this.name = name;
this.children = Collections.emptyMap();
this.executor = executor;
}
DynamicNodeBuilder get( String name )
{
DynamicNodeBuilder child = children.get( name );
if( child == null ) children.put( name, child = new DynamicNodeBuilder( name ) );
return child;
}
void runs( String name, Executable executor )
{
DynamicNodeBuilder child = children.get( name );
int id = 0;
while( child != null )
{
id++;
String subName = name + "_" + id;
child = children.get( subName );
}
children.put( name, new DynamicNodeBuilder( name, executor ) );
}
DynamicNode build()
{
return executor == null
? DynamicContainer.dynamicContainer( name, buildChildren() )
: DynamicTest.dynamicTest( name, executor );
}
Stream<DynamicNode> buildChildren()
{
return children.values().stream().map( DynamicNodeBuilder::build );
}
}
private String dump()
{
if( !computer.isOn() ) return "Computer is currently off.";
Terminal term = computer.getAPIEnvironment().getTerminal();
StringBuilder builder = new StringBuilder().append( "Computer is currently on.\n" );
for( int line = 0; line < term.getHeight(); line++ )
{
builder.append( String.format( "%2d | %" + term.getWidth() + "s |\n", line + 1, term.getLine( line ) ) );
}
computer.shutdown();
return builder.toString();
}
private void tick()
{
computer.tick();
MainThread.executePendingTasks();
}
private static String formatName( String name )
{
return name.replace( "\0", " \u2192 " );
}
}

View File

@ -82,12 +82,21 @@ public class BasicEnvironment implements IComputerEnvironment
}
@Override
@Deprecated
public IMount createResourceMount( String domain, String subPath )
{
File file = getContainingFile();
return createMount( ComputerCraft.class, "assets/" + domain + "/" + subPath, "main" );
}
String path = "assets/" + domain + "/" + subPath;
@Override
public InputStream createResourceFile( String domain, String subPath )
{
return ComputerCraft.class.getClassLoader().getResourceAsStream( "assets/" + domain + "/" + subPath );
}
public static IMount createMount( Class<?> klass, String path, String fallback )
{
File file = getContainingFile(klass);
if( file.isFile() )
{
@ -109,7 +118,7 @@ public class BasicEnvironment implements IComputerEnvironment
while( baseFile != null && !wholeFile.exists() )
{
baseFile = baseFile.getParentFile();
wholeFile = new File( baseFile, "resources/main/" + path );
wholeFile = new File( baseFile, "resources/" + fallback + "/" + path );
}
if( !wholeFile.exists() ) throw new IllegalStateException( "Cannot find ROM mount at " + file );
@ -118,15 +127,10 @@ public class BasicEnvironment implements IComputerEnvironment
}
}
@Override
public InputStream createResourceFile( String domain, String subPath )
{
return ComputerCraft.class.getClassLoader().getResourceAsStream( "assets/" + domain + "/" + subPath );
}
private static File getContainingFile()
private static File getContainingFile(Class<?> klass)
{
String path = ComputerCraft.class.getProtectionDomain().getCodeSource().getLocation().getPath();
String path = klass.getProtectionDomain().getCodeSource().getLocation().getPath();
int bangIndex = path.indexOf( "!" );
// Plain old file, so step up from dan200.computercraft.

View File

@ -7,6 +7,8 @@
package dan200.computercraft.core.computer;
import dan200.computercraft.ComputerCraft;
import dan200.computercraft.api.filesystem.IMount;
import dan200.computercraft.api.filesystem.IWritableMount;
import dan200.computercraft.api.lua.ILuaAPI;
import dan200.computercraft.api.lua.ILuaContext;
import dan200.computercraft.api.lua.LuaException;
@ -18,6 +20,7 @@ import org.junit.jupiter.api.Assertions;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.function.Consumer;
/**
* Helper class to run a program on a computer.
@ -27,26 +30,33 @@ public class ComputerBootstrap
private static final int TPS = 20;
private static final int MAX_TIME = 10;
public static void run( String program )
public static void run( IMount mount, Consumer<Computer> setup )
{
run( program, -1 );
}
public static void run( String program, int shutdownAfter )
public static void run( String program )
{
ComputerCraft.logPeripheralErrors = true;
ComputerCraft.log = LogManager.getLogger( ComputerCraft.MOD_ID );
MemoryMount mount = new MemoryMount()
.addFile( "test.lua", program )
.addFile( "startup", "assertion.assert(pcall(loadfile('test.lua', _ENV))) os.shutdown()" );
run( mount, x -> {} );
}
public static void run( IWritableMount mount, Consumer<Computer> setup )
{
ComputerCraft.logPeripheralErrors = true;
ComputerCraft.log = LogManager.getLogger( ComputerCraft.MOD_ID );
Terminal term = new Terminal( ComputerCraft.terminalWidth_computer, ComputerCraft.terminalHeight_computer );
final Computer computer = new Computer( new BasicEnvironment( mount ), term, 0 );
AssertApi api = new AssertApi();
computer.addApi( api );
setup.accept( computer );
try
{
computer.turnOn();
@ -73,13 +83,6 @@ public class ComputerBootstrap
// Break if the computer was once on, and is now off.
everOn |= computer.isOn();
if( (everOn || tick > TPS) && !computer.isOn() ) break;
// Shutdown the computer after a period of time
if( shutdownAfter > 0 && tick != 0 && tick % shutdownAfter == 0 )
{
ComputerCraft.log.info( "Shutting down: shutdown after {}", shutdownAfter );
computer.shutdown();
}
}
if( computer.isOn() || !api.didAssert )

View File

@ -0,0 +1,417 @@
--- A very basic test framework for ComputerCraft
--
-- Like Busted (http://olivinelabs.com/busted/), but more memorable.
--
-- @usage
-- describe("something to test", function()
-- it("some property", function()
-- expect(some_function()):equals("What it should equal")
-- end)
-- end)
--- Assert an argument to the given function has the specified type.
--
-- @tparam string The function's name
-- @tparam int The argument index to this function
-- @tparam string The type this argument should have. May be 'value' for any
-- non-nil value.
-- @param val The value to check
-- @raise If this value doesn't match the expected type.
local function check(func, arg, ty, val)
if ty == 'value' then
if val == nil then
error(('%s: bad argument #%d (got nil)'):format(func, arg), 3)
end
elseif type(val) ~= ty then
return error(('%s: bad argument #%d (expected %s, got %s)'):format(func, arg, ty, type(val)), 3)
end
end
local error_mt = { __tostring = function(self) return self.message end }
--- Attempt to execute the provided function, gathering a stack trace when it
-- errors.
--
-- @tparam
-- @return[1] true
-- @return[2] false
-- @return[2] The error object
local function try(fn)
if not debug or not debug.traceback then
local ok, err = pcall(fn)
if ok or getmetatable(err) == error_mt then
return ok, err
else
return ok, setmetatable({ message = tostring(err) }, error_mt)
end
end
local ok, err = xpcall(fn, function(err)
return { message = err, trace = debug.traceback() }
end)
-- Restore a whole bunch of state
io.input(io.stdin)
io.output(io.stdout)
-- If we're an existing error, or we succeded then propagate it.
if ok then return ok, err end
if type(err) ~= "table" then
return setmetatable({ message = tostring(err) }, error_mt)
end
if getmetatable(err.message) == error_mt then return ok, err.message end
-- Find the common substring between the two traces. Yes, this is horrible.
local trace = debug.traceback()
for i = 1, #trace do
if trace:sub(-i) ~= err.trace:sub(-i) then
err.trace = err.trace:sub(1, -i)
break
end
end
return ok, setmetatable(err, error_mt)
end
--- Fail a test with the given message
--
-- @tparam string message The message to fail with
-- @raises An error with the given message
local function fail(message)
check('fail', 1, 'string', message)
error(setmetatable({ message = message, fail = true }, error_mt))
end
--- Format an object in order to make it more readable
--
-- @param value The value to format
-- @treturn string The formatted value
local function format(value)
-- TODO: Look into something like mbs's pretty printer.
local ok, res = pcall(textutils.serialise, value)
if ok then return res else return tostring(value) end
end
local expect_mt = {}
expect_mt.__index = expect_mt
--- Assert that this expectation has the provided value
--
-- @param value The value to require this expectation to be equal to
-- @raises If the values are not equal
function expect_mt:equals(value)
if value ~= self.value then
fail(("Expected %s\n but got %s"):format(format(value), format(self.value)))
end
return self
end
expect_mt.equal = expect_mt.equals
expect_mt.eq = expect_mt.equals
--- Assert that this expectation does not equal the provided value
--
-- @param value The value to require this expectation to not be equal to
-- @raises If the values are equal
function expect_mt:not_equals(value)
if value == self.value then
fail(("Expected any value but %s"):format(format(value)))
end
return self
end
expect_mt.not_equal = expect_mt.not_equals
expect_mt.ne = expect_mt.not_equals
--- Assert that this expectation has something of the provided type
--
-- @tparam string exp_type The type to require this expectation to have
-- @raises If it does not have that thpe
function expect_mt:type(exp_type)
local actual_type = type(self.value)
if exp_type ~= actual_type then
fail(("Expected value of type %s\n got %s"):format(exp_type, actual_type))
end
return self
end
local function are_same(eq, left, right)
if left == right then return true end
local ty = type(left)
if ty ~= type(right) or ty ~= "table" then return false end
-- If we've already explored/are exploring the left and right then return
if eq[left] and eq[left][right] then return true end
if not eq[left] then eq[left] = {[right] = true} else eq[left][right] = true end
if not eq[right] then eq[right] = {[left] = true} else eq[right][left] = true end
-- Verify all pairs in left are equal to those in right
for k, v in pairs(left) do
if not are_same(eq, v, right[k]) then return false end
end
-- And verify all pairs in right are present in left
for k in pairs(right) do
if left[k] == nil then return false end
end
return true
end
--- Assert that this expectation is structurally equivalent to
-- the provided object.
--
-- @param value The value to check for structural equivalence
-- @raises If they are not equivalent
function expect_mt:same(value)
if not are_same({}, self.value, value) then
fail(("Expected %s\n but got %s"):format(format(value), format(self.value)))
end
return self
end
--- Construct a new expectation from the provided value
--
-- @param value The value to apply assertions to
-- @return The new expectation
local function expect(value)
return setmetatable({ value = value}, expect_mt)
end
--- The stack of "describe"s.
local test_stack = { n = 0 }
--- Whether we're now running tests, and so cannot run any more.
local tests_locked = false
--- The list of tests that we'll run
local test_list, test_map, test_count = { }, { }, 0
--- Add a new test to our queue.
--
-- @param test The descriptor of this test
local function do_test(test)
-- Set the name if it doesn't already exist
if not test.name then test.name = table.concat(test_stack, "\0", 1, test_stack.n) end
test_count = test_count + 1
test_list[test_count] = test
test_map[test.name] = test_count
end
--- Get the "friendly" name of this test.
--
-- @treturn string This test's friendly name
local function test_name(test) return (test.name:gsub("\0", " \26 ")) end
--- Describe something which will be tested, such as a function or situation
--
-- @tparam string name The name of the object to test
-- @tparam function body A function which describes the tests for this object.
local function describe(name, body)
check('describe', 1, 'string', name)
check('describe', 2, 'function', body)
if tests_locked then error("Cannot describe something while running tests", 2) end
-- Push our name onto the stack, eval and pop it
local n = test_stack.n + 1
test_stack[n], test_stack.n = name, n
local ok, err = try(body)
-- We count errors as a (failing) test.
if not ok then do_test { error = err } end
test_stack.n = n - 1
end
--- Declare a single test within a context
--
-- @tparam string name What you are testing
-- @tparam function body A function which runs the test, failing if it does
-- the assertions are not met.
local function it(name, body)
check('it', 1, 'string', name)
check('it', 2, 'function', body)
if tests_locked then error("Cannot create test while running tests", 2) end
-- Push name onto the stack
local n = test_stack.n + 1
test_stack[n], test_stack.n, tests_locked = name, n, true
do_test { action = body }
-- Pop the test from the stack
test_stack.n, tests_locked = n - 1, false
end
--- Declare a single not-yet-implemented test
--
-- @tparam string name What you really should be testing but aren't
local function pending(name)
check('it', 1, 'string', name)
if tests_locked then error("Cannot create test while running tests", 2) end
local _, loc = pcall(error, "", 3)
loc = loc:gsub(":%s*$", "")
local n = test_stack.n + 1
test_stack[n], test_stack.n = name, n
do_test { pending = true, trace = loc }
test_stack.n = n - 1
end
local arg = ...
if arg == "--help" or arg == "-h" then
io.write("Usage: mcfly [DIR]\n")
io.write("\n")
io.write("Run tests in the provided DIRectory, or `spec` if not given.")
return
end
local root_dir = shell.resolve(arg or "spec")
if not fs.isDir(root_dir) then
io.stderr:write(("%q is not a directory.\n"):format(root_dir))
error()
end
do
-- Load in the tests from all our files
local env = setmetatable({
expect = expect, fail = fail,
describe = describe, it = it, pending = pending
}, { __index = _ENV })
local suffix = "_spec.lua"
local function run_in(sub_dir)
for _, name in ipairs(fs.list(sub_dir)) do
local file = fs.combine(sub_dir, name)
if fs.isDir(file) then
run_in(file)
elseif file:sub(-#suffix) == suffix then
local fun, err = loadfile(file, env)
if not fun then
do_test { name = file:sub(#root_dir + 2), error = { message = err } }
else
local ok, err = try(fun)
if not ok then do_test { name = file:sub(#root_dir + 2), error = err } end
end
end
end
end
run_in(root_dir)
end
-- Error if we've found no tests
if test_count == 0 then
io.stderr:write(("Could not find any tests in %q\n"):format(root_dir))
error()
end
-- The results of each test, as well as how many passed and the count.
local test_results, test_status, tests_run = { n = 0 }, {}, 0
-- All possible test statuses
local statuses = {
pass = { desc = "Pass", col = colours.green, dot = "\7" }, -- Circle
fail = { desc = "Failed", col = colours.red, dot = "\4" }, -- Diamond
error = { desc = "Error", col = colours.magenta, dot = "\4" },
pending = { desc = "Pending", col = colours.yellow, dot = "\186" }, -- Hollow circle
}
-- Set up each test status count.
for k in pairs(statuses) do test_status[k] = 0 end
--- Do the actual running of our test
local function do_run(test)
-- If we're a pre-computed test, determine our status message. Otherwise,
-- skip.
local status, err
if test.pending then
status = "pending"
elseif test.error then
err = test.error
status = "error"
elseif test.action then
local ok
ok, err = try(test.action)
status = ok and "pass" or (err.fail and "fail" or "error")
end
-- If we've a boolean status, then convert it into a string
if status == true then status = "pass"
elseif status == false then status = err.fail and "fail" or "error"
end
tests_run = tests_run + 1
test_status[status] = test_status[status] + 1
test_results[tests_run] = {
status = status, name = test.name,
message = test.message or err and err.message,
trace = test.trace or err and err.trace,
}
-- If we're running under howlci, then log some info.
if howlci then howlci.status(status, test_name(test)) end
if cct_test then cct_test.submit(test_results[tests_run]) end
-- Print our progress dot
local data = statuses[status]
term.setTextColour(data.col) io.write(data.dot)
term.setTextColour(colours.white)
end
-- Loop over all our tests, running them as required.
if cct_test then
-- If we're within a cct_test environment, then submit them and wait on tests
-- to be run.
cct_test.start(test_map)
while true do
local _, name = os.pullEvent("cct_test_run")
if not name then break end
do_run(test_list[test_map[name]])
end
else
for _, test in pairs(test_list) do do_run(test) end
end
-- Otherwise, display the results of each failure
io.write("\n\n")
for i = 1, tests_run do
local test = test_results[i]
if test.status ~= "pass" then
local status_data = statuses[test.status]
term.setTextColour(status_data.col)
io.write(status_data.desc)
term.setTextColour(colours.white)
io.write(" \26 " .. test_name(test) .. "\n")
if test.message then
io.write(" " .. test.message:gsub("\n", "\n ") .. "\n")
end
if test.trace then
term.setTextColour(colours.lightGrey)
io.write(" " .. test.trace:gsub("\n", "\n ") .. "\n")
end
io.write("\n")
end
end
-- And some summary statistics
local actual_count = tests_run - test_status.pending
local info = ("Ran %s test(s), of which %s passed (%g%%).")
:format(actual_count, test_status.pass, (test_status.pass / actual_count) * 100)
if test_status.pending > 0 then
info = info .. (" Skipped %d pending test(s)."):format(test_status.pending)
end
term.setTextColour(colours.white) io.write(info .. "\n")
if howlci then howlci.log("debug", info) sleep(3) end

View File

@ -0,0 +1,17 @@
describe("The http library", function()
describe("http.checkURL", function()
it("Accepts well formed domains", function()
expect({ http.checkURL("https://google.com")}):same({ true })
end)
it("Rejects malformed URLs", function()
expect({ http.checkURL("google.com")}):same({ false, "Must specify http or https" })
expect({ http.checkURL("wss://google.com")}):same({ false, "Invalid protocol 'wss'" })
end)
it("Rejects local domains", function()
expect({ http.checkURL("http://localhost")}):same({ false, "Domain not permitted" })
expect({ http.checkURL("http://127.0.0.1")}):same({ false, "Domain not permitted" })
end)
end)
end)

View File

@ -0,0 +1,57 @@
--- Tests the io library is (mostly) consistent with PUC Lua.
--
-- These tests are based on the tests for Lua 5.1
describe("The io library", function()
it("io.input on a handle returns that handle", function()
expect(io.input(io.stdin)):equals(io.stdin)
end)
it("io.output on a handle returns that handle", function()
expect(io.output(io.stdout)):equals(io.stdout)
end)
describe("io.type", function()
it("returns file on handles", function()
local handle = io.input()
expect(handle):type("table")
expect(io.type(handle)):equals("file")
end)
it("returns nil on values", function() expect(io.type(8)):equals(nil) end)
it("returns nil on tables", function()
expect(io.type(setmetatable({}, {}))):equals(nil)
end)
end)
describe("io.open", function()
it("returns an error message on non-existent files", function()
local a, b = io.open('xuxu_nao_existe')
expect(a):equals(nil)
expect(b):type("string")
end)
end)
pending("io.output allows redirecting and seeking", function()
fs.delete("/tmp/io_spec.txt")
io.output("/tmp/io_spec.txt")
expect(io.output()):not_equals(io.stdout)
expect(io.output():seek()):equal(0)
assert(io.write("alo alo"))
expect(io.output():seek()):equal(#("alo alo"))
expect(io.output():seek("cur", -3)):equal(#("alo alo")-3)
assert(io.write("joao"))
expect(io.output():seek("end"):equal(#("alo joao")))
expect(io.output():seek("set")):equal(0)
assert(io.write('"<22>lo"', "{a}\n", "second line\n", "third line \n"))
assert(io.write('<EFBFBD>fourth_line'))
io.output(io.stdout)
expect(io.output()):equals(io.stdout)
end)
end)