[LON-CAPA-cvs] cvs: modules /jerf/tests About_LON-CAPA_Testing.html ApacheRequest.pm ApacheRequestTest.pm /jerf/tests/Apache Constants.pm
bowersj2
lon-capa-cvs@mail.lon-capa.org
Thu, 22 May 2003 20:47:16 -0000
This is a MIME encoded message
--bowersj21053636436
Content-Type: text/plain
bowersj2 Thu May 22 16:47:16 2003 EDT
Added files:
/modules/jerf/tests About_LON-CAPA_Testing.html ApacheRequest.pm
ApacheRequestTest.pm
Modified files:
/modules/jerf/tests/Apache Constants.pm
Log:
Towards a testing platform for LON-CAPA.
--bowersj21053636436
Content-Type: text/plain
Content-Disposition: attachment; filename="bowersj2-20030522164716.txt"
Index: modules/jerf/tests/Apache/Constants.pm
diff -u modules/jerf/tests/Apache/Constants.pm:1.1 modules/jerf/tests/Apache/Constants.pm:1.2
--- modules/jerf/tests/Apache/Constants.pm:1.1 Thu May 22 14:52:49 2003
+++ modules/jerf/tests/Apache/Constants.pm Thu May 22 16:47:16 2003
@@ -1,7 +1,7 @@
# The LearningOnline Network with CAPA
# Navigate Maps Handler
#
-# $Id: Constants.pm,v 1.1 2003/05/22 18:52:49 bowersj2 Exp $
+# $Id: Constants.pm,v 1.2 2003/05/22 20:47:16 bowersj2 Exp $
#
# Copyright Michigan State University Board of Trustees
#
@@ -105,6 +105,80 @@
ITERATE2
FLAG
NO_ARGS);
+
+%EXPORT_TAGS = ( common =>
+ [qw(OK DECLINED DONE NOT_FOUND FORBIDDEN AUTH_REQUIRED SERVER_ERROR)],
+
+ response =>
+ [qw(DOCUMENT_FOLLOWS MOVED REDIRECT USE_LOCAL_COPY BAD_REQUEST BAD_GATEWAY
+ RESPONSE_CODES NOT_IMPLEMENTED CONTINUE NOT_AUTHORITATIVE)],
+
+ methods =>
+ [qw(METHODS M_CONNECT M_DELETE M_GET M_INVALID M_OPTIONS M_POST M_TRACE
+ M_PATCH M_PROPFIND M_PROPPATCH M_MKCOL M_COPY M_MOVE M_LOCK M_UNLOCK)],
+
+ options =>
+ [qw(OPT_NONE OPT_INDEXES OPT_INCLUDES OPT_SYM_LINKS OPT_EXECCGI
+ OPT_UNSET OPT_INCNOEXEC OPT_SYM_OWNER OPT_MULTI OPT_ALL)],
+
+ satisfy =>
+ [qw(SATISFY_ALL SATISFY_ANY SATISFY_NOSPEC)],
+
+ remotehost =>
+ [qw(REMOTE_HOST REMOTE_NAME REMOTE_NOLOOKUP REMOVE_DOUBLE_REV)],
+
+ http =>
+ [qw(HTTP_OK
+ HTTP_MOVED_TEMPORARILY
+ HTTP_MOVED_PERMANENTLY
+ HTTP_METHOD_NOT_ALLOWED
+ HTTP_NOT_MODIFIED
+ HTTP_UNAUTHORIZED
+ HTTP_FORBIDDEN
+ HTTP_NOT_FOUND
+ HTTP_BAD_REQUEST
+ HTTP_INTERNAL_SERVER_ERROR
+ HTTP_NOT_ACCEPTABLE
+ HTTP_NO_CONTENT
+ HTTP_PRECONDITION_FAILED
+ HTTP_SERVICE_UNAVAILABLE
+ HTTP_VARIANT_ALSO_VARIES)],
+
+ server =>
+ [qw(MODULE_MAGIC_NUMBER
+ SERVER_VERSION
+ SERVER_BUILT)],
+
+ config =>
+ [qw(DECLINE_CMD)],
+
+ types =>
+ [qw(DIR_MAGIC_TYPE)],
+
+ override =>
+ [qw(OR_NONE
+ OR_LIMIT
+ OR_OPTIONS
+ OR_FILEINFO
+ OR_AUTHCFG
+ OR_INDEXES
+ OR_UNSET
+ OR_ALL
+ ACCESS_CONF
+ RSRC_CONF)],
+
+ args_how =>
+ [qw(RAW_ARGS
+ TAKE1
+ TAKE2
+ TAKE12
+ TAKE3
+ TAKE23
+ TAKE123
+ ITERATE
+ ITERATE2
+ FLAG
+ NO_ARGS)]);
# This file was generated by running the following code inside of an Apache
Index: modules/jerf/tests/About_LON-CAPA_Testing.html
+++ modules/jerf/tests/About_LON-CAPA_Testing.html
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta http-equiv="content-type"
content="text/html; charset=ISO-8859-1">
<title>About LON-CAPA Testing</title>
</head>
<body>
<h2 style="text-align: center;">About LON-CAPA Testing<br>
</h2>
<br>
LON-CAPA's biggest problem has been bugs. <br>
<br>
The problem, of course, is that any change to the code has the
opportunity to introduce new bugs into the system, *and* cause old bugs
to reassert themselves. A simple analysis shows that over time it
becomes impossible to add features to a design without the number of
bugs being added exceeding the number of bugs being fixed by the
change; at this point the software is dead.<br>
<br>
Software engineers have known about this problem for decades but
surprisingly only in the last few years have answers to this problem
started to crystallize. Unfortunately, the answers to this problem have
typically come from radical methodologies such as "eXtreme
Programmming", which bring excessive baggage along with the solutions
to the real problems we all face. Fortunately, it is possible to
extract these solutions and use them independent of the radical
philosophy.<br>
<br>
By far the most importent thing to come out of eXtreme Programming is
its testing methodology. We all agree in the abstract that "testing" is
important, yet we (as in programmers in general, not LON-CAPA
specifically) do little to none of it, typically consisting of just
poking at the system and trying to break it, which is not systematic
and will always fail to exercise the system completely. eXtreme
Programming's contribution to testing is a practical methodology and
framework that can be used for testing, along with some requirements
for that testing framework.<br>
<br>
However, this is my (Jeremy's) synthesis of it to date, so don't expect
perfect correspondence with XP. The advantage of this is that I can
vouch for how well this works from personal experience, which I can't
do with XP.<br>
<h3>What Is A Test?</h3>
In XP, there are two types of tests, <span style="font-style: italic;">unit
tests</span> and <span style="font-style: italic;">acceptance tests</span>. <br>
<br>
Unit tests are tests written to exercise a particular module of the
system, as independently from the rest as possible. They are written to
ensure the module works correctly, and should test as many success <span
style="font-style: italic;">and failure</span> cases as possible. For
each test, you specify whether a given action will succeed or fail, and
preferably exactly <span style="font-style: italic;">how</span> it
succeeds (what it returns, or what side-effects it has) as precisely as
possible.<br>
<br>
An acceptance test is specifically design to test how well the system
conforms to some user requirement. Personally, I don't think these are
worth a seperate categorization, because in the final analysis that
what <span style="font-style: italic;">all</span> testing is, it's just
that the "user" of a module is the programmer, while the user of the
program is what we'd traditionally call the user.<br>
<br>
A test is specifically designed to be "fire-and-forget"; it should be
one simple command to fire a given test, or to fire all tests.
Moreover, they should run in a reasonable amount of time. The idea is
that as you are developing, you can frequently run the tests without
feeling like you're always sitting and twiddling your thumbs.<br>
<h3>Why Test?</h3>
Despite the fact we all feel bad about it, it is obvious that "Finding
bugs" is <span style="font-style: italic;">not</span> a sufficient
justification for testing, or everybody would be doing it, all the
time. What else can testing offer us?<br>
<ol>
<li><span style="font-style: italic;">A clear specification of what
the module should do</span>. Generally, a given concept should be in
the code precisely once; it is this idea that underlies every single
code structure proposal to date (OO, AOP, Agile Programming,
metadata-based programming, the list goes on). Testing methodology
modifies this to say that a concept should appear precisely <span
style="font-style: italic;">twice</span>, once in the code, once in
the tests.<br>
<br>
Generally one of the side-effects of testing is cause you to organize
your code into easily testable chunks, which may then be end up
recombined, but do not generally need to be completely re-written, so
this "extra code" is not generally an issue in practice.<br>
<br>
</li>
<li><span style="font-style: italic;">Confidence</span>. If the code
passes the tests, you can be confident it works. If you need to make a
change or add capabilities to the module, you can re-run the tests to
insure all the old functionality still works. When a bug arises, you
can code a test case for it and be confident that once you squash the
bug it will never come back without you knowing.<br>
<br>
</li>
<li><span style="font-style: italic;">Enough confidence to increase
layering</span>. This is an important enough consequence of confidence
that it's worth its own heading. Because you have confidence in the
working of the various modules, you are much more confident that you
can use the module as a part of a larger system, so you are much less
inclined to build monolithic systems that touch each other only on
explicit join points. Instead you can create a much more agile system
that interconnects deeply, with the confidence that not only will it
all work, but that most of the time if there is a problem, it will not
be terribly difficult to find which module is causing it. (Of course
subtle errors will always exist that will be hard to localize. But once
you do, you can write a test for it and make sure it never comes back
without being noticed.)<br>
<br>
</li>
<li><span style="font-style: italic;">Enough confidence to re-factor</span>.
This is also important enough that it's worth its own heading. Because
of the confidence you have mentioned in the previous point, you feel
more capable of re-factoring as necessary to grow the code cleanly,
instead of making timid hacks that eventually kill the code, as
mentioned in the intro. Support from unit tests and a dedication to do
it <span style="font-style: italic;">right</span> can keep you out of
the trap where timid hacks here and there eventually strangle the
product. <br>
<br>
In the long-term, <span style="font-weight: bold;">this is the most
important aspect of testing</span>, even moreso then mere bug finding.
The ability to refactor, sometimes even quite violently, and still be
confident you have all the old capability you did before can be a <span
style="font-style: italic;">huge </span>markey advantage, as you'll
be able to safely add new capabilities while those with inferior
testing methodologies will be stuck in that infinite-bug tweaking
scenario. Every aspect of testing should be bent towards making sure
this works out correctly; all the other benefits are incidental
side-effects.<br>
<br>
</li>
<li><span style="font-style: italic;">Increased development speed,
both long- <span style="font-weight: bold;">and</span> short-term</span>.
A persistent myth is that you can't afford to test, because you don't
have time. The exact opposite is true. Good unit test support speeds
long-term development by allowing you to perform radical re-factorings
as quickly as you could add a timid hack. Good unit test support speeds
development by helping enforce excellent modularization along fairly
natural boundaries. Good unit test support helps pinpoint bugs by make
it easy to run testing code over a problem section and gather a lot of
data quickly. Good unit test support helps ensure squashed bugs stay
down, even in the short-term. <br>
<br>
In the end, adding all the benefits of unit testing together reaps a
multiplicative <span style="font-style: italic;">increase</span> in
development speed <span style="font-style: italic;">and</span> an
increase in development quality, simultaneously. <br>
<br>
So why haven't I (Jeremy) already started doing it? Unit testing in a
web environment is non-trivial, especially when the environment was not
written with testing in mind. Also I just started to realize the
benefits over the last few months as I've applied it to my personal
projects.</li>
</ol>
<h2>The Unit Test Framework</h2>
Nobody has done this up until now because without understanding what we
hoped to gain from testing, without knowing what we <span
style="font-style: italic;">could</span> hope to gain from testing, we
would miss critical aspects of how to test and as a result not gain the
benefits listed above. For instance, we might test only the final
system, which gains nearly nothing. Even if unit tests were written, it
would be difficult to run them all at once, so nobody ever would. (It's <span
style="font-style: italic;">absolutely critical</span> that the tests
be easy to fire off, which is easy to miss.)<br>
<br>
Working out what the ideal unit testing framework is still occurring,
and nobody knows for certain what the final result will look like. But
right now the best-of-breed testers all derive from a Java testing
framework called JUnit. The closest Perl implementation is Test::Unit. <br>
<br>
Unit tests consist of seperate Test objects that the framework then
inspects to determine what to do. Each test has a setup and teardown
method which they can use to set up the environment. The object also
has a number of <span style="font-style: italic;">test_*</span> methods
which the framework will execute, looking for errors as it goes. It
collects the errors and reports them at the end of the run.<br>
<br>
It is easy to run a specific file, or to run all tests.<br>
<br>
How to use the framework will be covered by example, in the
ApacheRequest.pm and ApacheRequestTest.pm files. ApacheRequest is an
object we have to write in order to test LON-CAPA, since to test a web
system we need to simulate the requests as much as possible, while not
actually needing to go through the server (so we have direct access to
as much data as possible). <br>
<h2>How To Use Unit Tests</h2>
Ideally, all tests should be run before any given commit. This is not
always practical, but at the very least, all related tests should be
executed before any commit. This helps ensure the system does not grow
new bugs on a given commit.<br>
<br>
In addition, if we can start using these things, we should set up Data
to run the tests every night, and automatically mail the dev list if
something blows up. <br>
<br>
For each sub test_*, entirely seperate test objects are created, so be
aware the setup and teardown routines will be run that many times.
Module-scoped vars can be used as persistent globals if the need arises.<br>
</body>
</html>
Index: modules/jerf/tests/ApacheRequest.pm
+++ modules/jerf/tests/ApacheRequest.pm
# The LearningOnline Network with CAPA
# Navigate Maps Handler
#
# $Id: ApacheRequest.pm,v 1.1 2003/05/22 20:47:15 bowersj2 Exp $
#
# Copyright Michigan State University Board of Trustees
#
# This file is part of the LearningOnline Network with CAPA (LON-CAPA).
#
# LON-CAPA is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LON-CAPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LON-CAPA; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# /home/httpd/html/adm/gpl.txt
#
# http://www.lon-capa.org/
#
# (Testing Infrastructure: Apache Request Simulator
use lib '.';
=pod
=head1 ApacheRequest: Fake an Apache Request object
For testing LON-CAPA it is convenient to execute the LON-CAPA code outside
of the context of an Apache server, which is difficult to make work
correctly. This object tries to match the interface of the Apache request
object so that in combination with an %ENV setter, the LON-CAPA code thinks
it's running inside of the web server, but we have enough control to
test it.
This will also serve as a demo/introduction for Unit Testing in Perl,
in conjunction with ApacheRequestTest.pm, demonstrating how to test.
See ABOUT_LONCAPA_TESTING for more information.
This will get more functional as it is necessary.
=head2 Creating ApacheRequest Objects
ApacheRequest objects must be populated with the appropriate information.
The ApacheRequest object should be called with a hash reference. Note the
hash reference will be blessed into the ApacheRequest class, so you should
not expect to use it again later as a normal hash. The hash is allowed to
contain the following members:
=over 4
=item * I<querystring>: The querystring for the request. This may be passed as a
string, in which case that will be the query string passed into LON-CAPA,
or it can be a hash ref containing name => value pairs, in which case
it will be converted to the appropriate string for you.
=item * I<stuff>: Stuff.
=back
=head2 Using the ApacheRequest Object to Simulate Execution.
To set up an execution environment for a handler, do the following:
=over 4
=item 1. A
=item 2. B
=item 3. C
=back
Then call B<$r-E<gt>doHandler('Apache::lonmodule')>;
=cut
package ApacheRequest;
use Apache::Constants qw(:common :http);
# my constants
sub RFLUSH { return '*-*RFLUSH*-*RFLUSH*-*RFLUSH*-*'; }
sub HEADER { return '*-*HEADER*-*HEADER*-*HEADER*-*'; }
sub new {
my $proto = shift;
my $class = ref($proto) || $proto;
my $self = shift; # the args array
if (!defined($self)) { $self = {}; }
$self->{OUTPUT} = [];
# Handle args, if any
if (defined($self->{args})) {
if (ref($self->{args})) {
my $args = [];
foreach (keys(%{$self->{args}})) {
push @$args, escape($_) . '=' . escape($self->{args}->{$_});
}
$self->{args} = join('&', @$args);
}
} # else, just leave the string along
bless $self, $class;
return $self;
}
########################
# Simulation routines: Methods that are actually in the Apache Request Object
=pod
=head2 Apache Request Simulation methods
These are the methods this object emulates. Notes follow if the emulation significantly
differs from the real implementation. Does not check autoflush $|.
=item * I<print>($text): Adds the given text to the output array.
=cut
sub print {
my $self = shift;
push @{$self->{OUTPUT}}, shift;
return 1;
}
=pod
=item * I<rflush>(): Adds an rflush marker (ApacheRequest::RFLUSH) to the output array.
=cut
sub rflush {
my $self = shift;
push @{$self->{OUTPUT}}, RFLUSH();
}
=pod
=item * I<send_http_header>(): Adds a header marker (ApacheRequest::HEADER) to the
output array.
=cut
sub send_http_header {
my $self = shift;
push @{$self->{OUTPUT}}, HEADER();
}
=pod
=back
=cut
########################
# New routines: Methods not in the Apache Request Object
=pod
=head2 Additional Methods on ApacheRequest Objects
ApacheRequest objects implement additional methods not found on real request
objects to facilitate retreiving information from the results of running the
handler, and to set up the environment.
=over 4
=item * I<getOutput>(): Returns an array reference that contains all of the output
from the handler, including when the flushing occurred. Each time the handler
calls "print" on the the ApacheRequest object, a string corresponding to what
was added will be added to this array reference, and each time "rflush" is
called, an ApacheRequest::RFLUSH marker will be added.
=cut
sub getOutput {
my $self = shift;
return $self->{OUTPUT};
}
=pod
=item * I<getOutputString>(): Returns a string representing all of the displayed output
of the handler. May take a bit to run. Does not return rflushes or headers.
=cut
# this technically won't work perfectly if someone actually outputs
# '*-*RFLUSH*-*RFLUSH*-*RFLUSH*-*', but I'm willing to bet that won't
# happen anytime soon...
sub getOutputString {
my $self = shift;;
my $s = join('', @{$self->{OUTPUT}});
my $rflush = RFLUSH();
my $header = HEADER();
$s =~ s/\Q$rflush\E//g;
$s =~ s/\Q$header\E//g;
return $s;
}
=pod
=item * I<getReturnValue>(): Returns the return value of the handler.
=back
=cut
# Routines imported from lonnet.pm so as to avoid conflict while developing
sub escape {
my $str=shift;
$str =~ s/(\W)/"%".unpack('H2',$1)/eg;
return $str;
}
sub unescape {
my $str=shift;
$str =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C",hex($1))/eg;
return $str;
}
1;
__END__
Index: modules/jerf/tests/ApacheRequestTest.pm
+++ modules/jerf/tests/ApacheRequestTest.pm
# The LearningOnline Network with CAPA
# Navigate Maps Handler
#
# $Id: ApacheRequestTest.pm,v 1.1 2003/05/22 20:47:15 bowersj2 Exp $
#
# Copyright Michigan State University Board of Trustees
#
# This file is part of the LearningOnline Network with CAPA (LON-CAPA).
#
# LON-CAPA is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LON-CAPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LON-CAPA; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# /home/httpd/html/adm/gpl.txt
#
# http://www.lon-capa.org/
#
# (Testing Infrastructure: Apache Request Simulator Tester
package ApacheRequestTest;
# The base class that provides the integration with the test suite rountines,
# and all of the assert* methods.
use base qw(Test::Unit::TestCase);
use strict;
use Data::Dumper;
# Since we're testing this, we need this in here
use ApacheRequest;
sub new {
my $self = shift()->SUPER::new(@_);
return $self;
}
sub set_up {
my $self = shift;
# Set up anything all of your tests need here.
}
sub tear_down {
my $self = shift;
# Do the undoing of all of that stuff here.
}
# This is a utility function for the next text. Returns true if the
# two list references contain equal things, false otherwise.
# This verifies the promises made in the documentation: $r->print
# accepts a string, and you can get a array reference with those strings
# back via getOutput. getOutputString gets just the string, with no
# rflushes in it. rflush pushes ApacheRequest::RFLUSH into the output.
sub test_printing_system_works {
my $self = shift;
my $r = ApacheRequest->new();
$self->assert(defined($r));
my $first = "Hello!";
my $second = "There!";
#print Dumper($r);
# Assert that it starts out empty
# Even just this stupid test caught a stupid bug when I wrote it;
# I had forgotten to "return $self" at the end of the ApacheRequest
# constructor.
my $output = $r->getOutput();
$self->assert(defined($output));
$self->assert(scalar(@$output) == 0);
$r->print($first);
$self->assert(scalar(@$output) == 1);
$self->assert($output->[0] eq $first);
$r->rflush();
$self->assert(scalar(@$output) == 2);
# This is one case where a certain amount of copy & paste coding is acceptable
$self->assert($output->[0] eq $first);
$self->assert($output->[1] eq ApacheRequest::RFLUSH());
$r->print($second);
$self->assert($output->[0] eq $first);
$self->assert($output->[1] eq ApacheRequest::RFLUSH());
$self->assert($output->[2] eq $second);
$r->rflush(); # make sure it works with multiple rflushes
my $results = $r->getOutputString();
#print $results;
$self->assert($results eq ($first.$second));
$r->send_http_header();
$results = $r->getOutputString();
$self->assert($results eq ($first.$second));
}
# This verifies the argument handling works, including sending it a hash reference,
# as promised in the documentation.
sub test_argument_string_handling {
my $self = shift;
my $querystring = "a=b&c=d";
my $params = { querystring => $querystring };
my $r = ApacheRequest->new($params);
}
1;
--bowersj21053636436--